diff options
Diffstat (limited to 'arch/x86')
25 files changed, 535 insertions, 591 deletions
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index fa42f895fdde..169c2feda14a 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -106,9 +106,13 @@ define cmd_check_data_rel done endef +# We need to run two commands under "if_changed", so merge them into a +# single invocation. +quiet_cmd_check-and-link-vmlinux = LD $@ + cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld) + $(obj)/vmlinux: $(vmlinux-objs-y) FORCE - $(call if_changed,check_data_rel) - $(call if_changed,ld) + $(call if_changed,check-and-link-vmlinux) OBJCOPYFLAGS_vmlinux.bin := -R .comment -S $(obj)/vmlinux.bin: vmlinux FORCE diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index e98522ea6f09..1458b1700fc7 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -34,74 +34,13 @@ static void setup_boot_services##bits(struct efi_config *c) \ \ table = (typeof(table))sys_table; \ \ - c->runtime_services = table->runtime; \ - c->boot_services = table->boottime; \ - c->text_output = table->con_out; \ + c->runtime_services = table->runtime; \ + c->boot_services = table->boottime; \ + c->text_output = table->con_out; \ } BOOT_SERVICES(32); BOOT_SERVICES(64); -static inline efi_status_t __open_volume32(void *__image, void **__fh) -{ - efi_file_io_interface_t *io; - efi_loaded_image_32_t *image = __image; - efi_file_handle_32_t *fh; - efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID; - efi_status_t status; - void *handle = (void *)(unsigned long)image->device_handle; - unsigned long func; - - status = efi_call_early(handle_protocol, handle, - &fs_proto, (void **)&io); - if (status != EFI_SUCCESS) { - efi_printk(sys_table, "Failed to handle fs_proto\n"); - return status; - } - - func = (unsigned long)io->open_volume; - status = efi_early->call(func, io, &fh); - if (status != EFI_SUCCESS) - efi_printk(sys_table, "Failed to open volume\n"); - - *__fh = fh; - return status; -} - -static inline efi_status_t __open_volume64(void *__image, void **__fh) -{ - efi_file_io_interface_t *io; - efi_loaded_image_64_t *image = __image; - efi_file_handle_64_t *fh; - efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID; - efi_status_t status; - void *handle = (void *)(unsigned long)image->device_handle; - unsigned long func; - - status = efi_call_early(handle_protocol, handle, - &fs_proto, (void **)&io); - if (status != EFI_SUCCESS) { - efi_printk(sys_table, "Failed to handle fs_proto\n"); - return status; - } - - func = (unsigned long)io->open_volume; - status = efi_early->call(func, io, &fh); - if (status != EFI_SUCCESS) - efi_printk(sys_table, "Failed to open volume\n"); - - *__fh = fh; - return status; -} - -efi_status_t -efi_open_volume(efi_system_table_t *sys_table, void *__image, void **__fh) -{ - if (efi_early->is64) - return __open_volume64(__image, __fh); - - return __open_volume32(__image, __fh); -} - void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str) { efi_call_proto(efi_simple_text_output_protocol, output_string, @@ -109,7 +48,7 @@ void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str) } static efi_status_t -__setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom) +preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom) { struct pci_setup_rom *rom = NULL; efi_status_t status; @@ -134,16 +73,16 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom) status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom); if (status != EFI_SUCCESS) { - efi_printk(sys_table, "Failed to alloc mem for rom\n"); + efi_printk(sys_table, "Failed to allocate memory for 'rom'\n"); return status; } memset(rom, 0, sizeof(*rom)); - rom->data.type = SETUP_PCI; - rom->data.len = size - sizeof(struct setup_data); - rom->data.next = 0; - rom->pcilen = pci->romsize; + rom->data.type = SETUP_PCI; + rom->data.len = size - sizeof(struct setup_data); + rom->data.next = 0; + rom->pcilen = pci->romsize; *__rom = rom; status = efi_call_proto(efi_pci_io_protocol, pci.read, pci, @@ -179,96 +118,6 @@ free_struct: return status; } -static void -setup_efi_pci32(struct boot_params *params, void **pci_handle, - unsigned long size) -{ - efi_pci_io_protocol_t *pci = NULL; - efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID; - u32 *handles = (u32 *)(unsigned long)pci_handle; - efi_status_t status; - unsigned long nr_pci; - struct setup_data *data; - int i; - - data = (struct setup_data *)(unsigned long)params->hdr.setup_data; - - while (data && data->next) - data = (struct setup_data *)(unsigned long)data->next; - - nr_pci = size / sizeof(u32); - for (i = 0; i < nr_pci; i++) { - struct pci_setup_rom *rom = NULL; - u32 h = handles[i]; - - status = efi_call_early(handle_protocol, h, - &pci_proto, (void **)&pci); - - if (status != EFI_SUCCESS) - continue; - - if (!pci) - continue; - - status = __setup_efi_pci(pci, &rom); - if (status != EFI_SUCCESS) - continue; - - if (data) - data->next = (unsigned long)rom; - else - params->hdr.setup_data = (unsigned long)rom; - - data = (struct setup_data *)rom; - - } -} - -static void -setup_efi_pci64(struct boot_params *params, void **pci_handle, - unsigned long size) -{ - efi_pci_io_protocol_t *pci = NULL; - efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID; - u64 *handles = (u64 *)(unsigned long)pci_handle; - efi_status_t status; - unsigned long nr_pci; - struct setup_data *data; - int i; - - data = (struct setup_data *)(unsigned long)params->hdr.setup_data; - - while (data && data->next) - data = (struct setup_data *)(unsigned long)data->next; - - nr_pci = size / sizeof(u64); - for (i = 0; i < nr_pci; i++) { - struct pci_setup_rom *rom = NULL; - u64 h = handles[i]; - - status = efi_call_early(handle_protocol, h, - &pci_proto, (void **)&pci); - - if (status != EFI_SUCCESS) - continue; - - if (!pci) - continue; - - status = __setup_efi_pci(pci, &rom); - if (status != EFI_SUCCESS) - continue; - - if (data) - data->next = (unsigned long)rom; - else - params->hdr.setup_data = (unsigned long)rom; - - data = (struct setup_data *)rom; - - } -} - /* * There's no way to return an informative status from this function, * because any analysis (and printing of error messages) needs to be @@ -284,6 +133,9 @@ static void setup_efi_pci(struct boot_params *params) void **pci_handle = NULL; efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID; unsigned long size = 0; + unsigned long nr_pci; + struct setup_data *data; + int i; status = efi_call_early(locate_handle, EFI_LOCATE_BY_PROTOCOL, @@ -295,7 +147,7 @@ static void setup_efi_pci(struct boot_params *params) size, (void **)&pci_handle); if (status != EFI_SUCCESS) { - efi_printk(sys_table, "Failed to alloc mem for pci_handle\n"); + efi_printk(sys_table, "Failed to allocate memory for 'pci_handle'\n"); return; } @@ -307,10 +159,34 @@ static void setup_efi_pci(struct boot_params *params) if (status != EFI_SUCCESS) goto free_handle; - if (efi_early->is64) - setup_efi_pci64(params, pci_handle, size); - else - setup_efi_pci32(params, pci_handle, size); + data = (struct setup_data *)(unsigned long)params->hdr.setup_data; + + while (data && data->next) + data = (struct setup_data *)(unsigned long)data->next; + + nr_pci = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32)); + for (i = 0; i < nr_pci; i++) { + efi_pci_io_protocol_t *pci = NULL; + struct pci_setup_rom *rom; + + status = efi_call_early(handle_protocol, + efi_is_64bit() ? ((u64 *)pci_handle)[i] + : ((u32 *)pci_handle)[i], + &pci_proto, (void **)&pci); + if (status != EFI_SUCCESS || !pci) + continue; + + status = preserve_pci_rom_image(pci, &rom); + if (status != EFI_SUCCESS) + continue; + + if (data) + data->next = (unsigned long)rom; + else + params->hdr.setup_data = (unsigned long)rom; + + data = (struct setup_data *)rom; + } free_handle: efi_call_early(free_pool, pci_handle); @@ -341,8 +217,7 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params) status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size + sizeof(struct setup_data), &new); if (status != EFI_SUCCESS) { - efi_printk(sys_table, - "Failed to alloc mem for properties\n"); + efi_printk(sys_table, "Failed to allocate memory for 'properties'\n"); return; } @@ -358,9 +233,9 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params) new->next = 0; data = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data; - if (!data) + if (!data) { boot_params->hdr.setup_data = (unsigned long)new; - else { + } else { while (data->next) data = (struct setup_data *)(unsigned long)data->next; data->next = (unsigned long)new; @@ -380,81 +255,55 @@ static void setup_quirks(struct boot_params *boot_params) } } +/* + * See if we have Universal Graphics Adapter (UGA) protocol + */ static efi_status_t -setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height) +setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size) { - struct efi_uga_draw_protocol *uga = NULL, *first_uga; - efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID; + efi_status_t status; + u32 width, height; + void **uga_handle = NULL; + efi_uga_draw_protocol_t *uga = NULL, *first_uga; unsigned long nr_ugas; - u32 *handles = (u32 *)uga_handle; - efi_status_t status = EFI_INVALID_PARAMETER; int i; - first_uga = NULL; - nr_ugas = size / sizeof(u32); - for (i = 0; i < nr_ugas; i++) { - efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID; - u32 w, h, depth, refresh; - void *pciio; - u32 handle = handles[i]; - - status = efi_call_early(handle_protocol, handle, - &uga_proto, (void **)&uga); - if (status != EFI_SUCCESS) - continue; - - efi_call_early(handle_protocol, handle, &pciio_proto, &pciio); - - status = efi_early->call((unsigned long)uga->get_mode, uga, - &w, &h, &depth, &refresh); - if (status == EFI_SUCCESS && (!first_uga || pciio)) { - *width = w; - *height = h; - - /* - * Once we've found a UGA supporting PCIIO, - * don't bother looking any further. - */ - if (pciio) - break; - - first_uga = uga; - } - } + status = efi_call_early(allocate_pool, EFI_LOADER_DATA, + size, (void **)&uga_handle); + if (status != EFI_SUCCESS) + return status; - return status; -} + status = efi_call_early(locate_handle, + EFI_LOCATE_BY_PROTOCOL, + uga_proto, NULL, &size, uga_handle); + if (status != EFI_SUCCESS) + goto free_handle; -static efi_status_t -setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height) -{ - struct efi_uga_draw_protocol *uga = NULL, *first_uga; - efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID; - unsigned long nr_ugas; - u64 *handles = (u64 *)uga_handle; - efi_status_t status = EFI_INVALID_PARAMETER; - int i; + height = 0; + width = 0; first_uga = NULL; - nr_ugas = size / sizeof(u64); + nr_ugas = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32)); for (i = 0; i < nr_ugas; i++) { efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID; u32 w, h, depth, refresh; void *pciio; - u64 handle = handles[i]; + unsigned long handle = efi_is_64bit() ? ((u64 *)uga_handle)[i] + : ((u32 *)uga_handle)[i]; status = efi_call_early(handle_protocol, handle, - &uga_proto, (void **)&uga); + uga_proto, (void **)&uga); if (status != EFI_SUCCESS) continue; + pciio = NULL; efi_call_early(handle_protocol, handle, &pciio_proto, &pciio); - status = efi_early->call((unsigned long)uga->get_mode, uga, - &w, &h, &depth, &refresh); + status = efi_call_proto(efi_uga_draw_protocol, get_mode, uga, + &w, &h, &depth, &refresh); if (status == EFI_SUCCESS && (!first_uga || pciio)) { - *width = w; - *height = h; + width = w; + height = h; /* * Once we've found a UGA supporting PCIIO, @@ -467,59 +316,28 @@ setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height) } } - return status; -} - -/* - * See if we have Universal Graphics Adapter (UGA) protocol - */ -static efi_status_t setup_uga(struct screen_info *si, efi_guid_t *uga_proto, - unsigned long size) -{ - efi_status_t status; - u32 width, height; - void **uga_handle = NULL; - - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, - size, (void **)&uga_handle); - if (status != EFI_SUCCESS) - return status; - - status = efi_call_early(locate_handle, - EFI_LOCATE_BY_PROTOCOL, - uga_proto, NULL, &size, uga_handle); - if (status != EFI_SUCCESS) - goto free_handle; - - height = 0; - width = 0; - - if (efi_early->is64) - status = setup_uga64(uga_handle, size, &width, &height); - else - status = setup_uga32(uga_handle, size, &width, &height); - if (!width && !height) goto free_handle; /* EFI framebuffer */ - si->orig_video_isVGA = VIDEO_TYPE_EFI; + si->orig_video_isVGA = VIDEO_TYPE_EFI; - si->lfb_depth = 32; - si->lfb_width = width; - si->lfb_height = height; + si->lfb_depth = 32; + si->lfb_width = width; + si->lfb_height = height; - si->red_size = 8; - si->red_pos = 16; - si->green_size = 8; - si->green_pos = 8; - si->blue_size = 8; - si->blue_pos = 0; - si->rsvd_size = 8; - si->rsvd_pos = 24; + si->red_size = 8; + si->red_pos = 16; + si->green_size = 8; + si->green_pos = 8; + si->blue_size = 8; + si->blue_pos = 0; + si->rsvd_size = 8; + si->rsvd_pos = 24; free_handle: efi_call_early(free_pool, uga_handle); + return status; } @@ -586,7 +404,7 @@ struct boot_params *make_boot_params(struct efi_config *c) if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) return NULL; - if (efi_early->is64) + if (efi_is_64bit()) setup_boot_services64(efi_early); else setup_boot_services32(efi_early); @@ -601,7 +419,7 @@ struct boot_params *make_boot_params(struct efi_config *c) status = efi_low_alloc(sys_table, 0x4000, 1, (unsigned long *)&boot_params); if (status != EFI_SUCCESS) { - efi_printk(sys_table, "Failed to alloc lowmem for boot params\n"); + efi_printk(sys_table, "Failed to allocate lowmem for boot params\n"); return NULL; } @@ -617,9 +435,9 @@ struct boot_params *make_boot_params(struct efi_config *c) * Fill out some of the header fields ourselves because the * EFI firmware loader doesn't load the first sector. */ - hdr->root_flags = 1; - hdr->vid_mode = 0xffff; - hdr->boot_flag = 0xAA55; + hdr->root_flags = 1; + hdr->vid_mode = 0xffff; + hdr->boot_flag = 0xAA55; hdr->type_of_loader = 0x21; @@ -627,6 +445,7 @@ struct boot_params *make_boot_params(struct efi_config *c) cmdline_ptr = efi_convert_cmdline(sys_table, image, &options_size); if (!cmdline_ptr) goto fail; + hdr->cmd_line_ptr = (unsigned long)cmdline_ptr; /* Fill in upper bits of command line address, NOP on 32 bit */ boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32; @@ -663,10 +482,12 @@ struct boot_params *make_boot_params(struct efi_config *c) boot_params->ext_ramdisk_size = (u64)ramdisk_size >> 32; return boot_params; + fail2: efi_free(sys_table, options_size, hdr->cmd_line_ptr); fail: efi_free(sys_table, 0x4000, (unsigned long)boot_params); + return NULL; } @@ -678,7 +499,7 @@ static void add_e820ext(struct boot_params *params, unsigned long size; e820ext->type = SETUP_E820_EXT; - e820ext->len = nr_entries * sizeof(struct boot_e820_entry); + e820ext->len = nr_entries * sizeof(struct boot_e820_entry); e820ext->next = 0; data = (struct setup_data *)(unsigned long)params->hdr.setup_data; @@ -692,8 +513,8 @@ static void add_e820ext(struct boot_params *params, params->hdr.setup_data = (unsigned long)e820ext; } -static efi_status_t setup_e820(struct boot_params *params, - struct setup_data *e820ext, u32 e820ext_size) +static efi_status_t +setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_size) { struct boot_e820_entry *entry = params->e820_table; struct efi_info *efi = ¶ms->efi_info; @@ -814,11 +635,10 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext, } struct exit_boot_struct { - struct boot_params *boot_params; - struct efi_info *efi; - struct setup_data *e820ext; - __u32 e820ext_size; - bool is64; + struct boot_params *boot_params; + struct efi_info *efi; + struct setup_data *e820ext; + __u32 e820ext_size; }; static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, @@ -845,25 +665,25 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, first = false; } - signature = p->is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE; + signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE + : EFI32_LOADER_SIGNATURE; memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32)); - p->efi->efi_systab = (unsigned long)sys_table_arg; - p->efi->efi_memdesc_size = *map->desc_size; - p->efi->efi_memdesc_version = *map->desc_ver; - p->efi->efi_memmap = (unsigned long)*map->map; - p->efi->efi_memmap_size = *map->map_size; + p->efi->efi_systab = (unsigned long)sys_table_arg; + p->efi->efi_memdesc_size = *map->desc_size; + p->efi->efi_memdesc_version = *map->desc_ver; + p->efi->efi_memmap = (unsigned long)*map->map; + p->efi->efi_memmap_size = *map->map_size; #ifdef CONFIG_X86_64 - p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32; - p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32; + p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32; + p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32; #endif return EFI_SUCCESS; } -static efi_status_t exit_boot(struct boot_params *boot_params, - void *handle, bool is64) +static efi_status_t exit_boot(struct boot_params *boot_params, void *handle) { unsigned long map_sz, key, desc_size, buff_size; efi_memory_desc_t *mem_map; @@ -874,17 +694,16 @@ static efi_status_t exit_boot(struct boot_params *boot_params, struct efi_boot_memmap map; struct exit_boot_struct priv; - map.map = &mem_map; - map.map_size = &map_sz; - map.desc_size = &desc_size; - map.desc_ver = &desc_version; - map.key_ptr = &key; - map.buff_size = &buff_size; - priv.boot_params = boot_params; - priv.efi = &boot_params->efi_info; - priv.e820ext = NULL; - priv.e820ext_size = 0; - priv.is64 = is64; + map.map = &mem_map; + map.map_size = &map_sz; + map.desc_size = &desc_size; + map.desc_ver = &desc_version; + map.key_ptr = &key; + map.buff_size = &buff_size; + priv.boot_params = boot_params; + priv.efi = &boot_params->efi_info; + priv.e820ext = NULL; + priv.e820ext_size = 0; /* Might as well exit boot services now */ status = efi_exit_boot_services(sys_table, handle, &map, &priv, @@ -892,10 +711,11 @@ static efi_status_t exit_boot(struct boot_params *boot_params, if (status != EFI_SUCCESS) return status; - e820ext = priv.e820ext; - e820ext_size = priv.e820ext_size; + e820ext = priv.e820ext; + e820ext_size = priv.e820ext_size; + /* Historic? */ - boot_params->alt_mem_k = 32 * 1024; + boot_params->alt_mem_k = 32 * 1024; status = setup_e820(boot_params, e820ext, e820ext_size); if (status != EFI_SUCCESS) @@ -908,8 +728,8 @@ static efi_status_t exit_boot(struct boot_params *boot_params, * On success we return a pointer to a boot_params structure, and NULL * on failure. */ -struct boot_params *efi_main(struct efi_config *c, - struct boot_params *boot_params) +struct boot_params * +efi_main(struct efi_config *c, struct boot_params *boot_params) { struct desc_ptr *gdt = NULL; efi_loaded_image_t *image; @@ -918,13 +738,11 @@ struct boot_params *efi_main(struct efi_config *c, struct desc_struct *desc; void *handle; efi_system_table_t *_table; - bool is64; efi_early = c; _table = (efi_system_table_t *)(unsigned long)efi_early->table; handle = (void *)(unsigned long)efi_early->image_handle; - is64 = efi_early->is64; sys_table = _table; @@ -932,7 +750,7 @@ struct boot_params *efi_main(struct efi_config *c, if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) goto fail; - if (is64) + if (efi_is_64bit()) setup_boot_services64(efi_early); else setup_boot_services32(efi_early); @@ -957,7 +775,7 @@ struct boot_params *efi_main(struct efi_config *c, status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*gdt), (void **)&gdt); if (status != EFI_SUCCESS) { - efi_printk(sys_table, "Failed to alloc mem for gdt structure\n"); + efi_printk(sys_table, "Failed to allocate memory for 'gdt' structure\n"); goto fail; } @@ -965,7 +783,7 @@ struct boot_params *efi_main(struct efi_config *c, status = efi_low_alloc(sys_table, gdt->size, 8, (unsigned long *)&gdt->address); if (status != EFI_SUCCESS) { - efi_printk(sys_table, "Failed to alloc mem for gdt\n"); + efi_printk(sys_table, "Failed to allocate memory for 'gdt'\n"); goto fail; } @@ -988,7 +806,7 @@ struct boot_params *efi_main(struct efi_config *c, hdr->code32_start = bzimage_addr; } - status = exit_boot(boot_params, handle, is64); + status = exit_boot(boot_params, handle); if (status != EFI_SUCCESS) { efi_printk(sys_table, "exit_boot() failed!\n"); goto fail; @@ -1002,19 +820,20 @@ struct boot_params *efi_main(struct efi_config *c, if (IS_ENABLED(CONFIG_X86_64)) { /* __KERNEL32_CS */ - desc->limit0 = 0xffff; - desc->base0 = 0x0000; - desc->base1 = 0x0000; - desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ; - desc->s = DESC_TYPE_CODE_DATA; - desc->dpl = 0; - desc->p = 1; - desc->limit1 = 0xf; - desc->avl = 0; - desc->l = 0; - desc->d = SEG_OP_SIZE_32BIT; - desc->g = SEG_GRANULARITY_4KB; - desc->base2 = 0x00; + desc->limit0 = 0xffff; + desc->base0 = 0x0000; + desc->base1 = 0x0000; + desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ; + desc->s = DESC_TYPE_CODE_DATA; + desc->dpl = 0; + desc->p = 1; + desc->limit1 = 0xf; + desc->avl = 0; + desc->l = 0; + desc->d = SEG_OP_SIZE_32BIT; + desc->g = SEG_GRANULARITY_4KB; + desc->base2 = 0x00; + desc++; } else { /* Second entry is unused on 32-bit */ @@ -1022,15 +841,16 @@ struct boot_params *efi_main(struct efi_config *c, } /* __KERNEL_CS */ - desc->limit0 = 0xffff; - desc->base0 = 0x0000; - desc->base1 = 0x0000; - desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ; - desc->s = DESC_TYPE_CODE_DATA; - desc->dpl = 0; - desc->p = 1; - desc->limit1 = 0xf; - desc->avl = 0; + desc->limit0 = 0xffff; + desc->base0 = 0x0000; + desc->base1 = 0x0000; + desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ; + desc->s = DESC_TYPE_CODE_DATA; + desc->dpl = 0; + desc->p = 1; + desc->limit1 = 0xf; + desc->avl = 0; + if (IS_ENABLED(CONFIG_X86_64)) { desc->l = 1; desc->d = 0; @@ -1038,41 +858,41 @@ struct boot_params *efi_main(struct efi_config *c, desc->l = 0; desc->d = SEG_OP_SIZE_32BIT; } - desc->g = SEG_GRANULARITY_4KB; - desc->base2 = 0x00; + desc->g = SEG_GRANULARITY_4KB; + desc->base2 = 0x00; desc++; /* __KERNEL_DS */ - desc->limit0 = 0xffff; - desc->base0 = 0x0000; - desc->base1 = 0x0000; - desc->type = SEG_TYPE_DATA | SEG_TYPE_READ_WRITE; - desc->s = DESC_TYPE_CODE_DATA; - desc->dpl = 0; - desc->p = 1; - desc->limit1 = 0xf; - desc->avl = 0; - desc->l = 0; - desc->d = SEG_OP_SIZE_32BIT; - desc->g = SEG_GRANULARITY_4KB; - desc->base2 = 0x00; + desc->limit0 = 0xffff; + desc->base0 = 0x0000; + desc->base1 = 0x0000; + desc->type = SEG_TYPE_DATA | SEG_TYPE_READ_WRITE; + desc->s = DESC_TYPE_CODE_DATA; + desc->dpl = 0; + desc->p = 1; + desc->limit1 = 0xf; + desc->avl = 0; + desc->l = 0; + desc->d = SEG_OP_SIZE_32BIT; + desc->g = SEG_GRANULARITY_4KB; + desc->base2 = 0x00; desc++; if (IS_ENABLED(CONFIG_X86_64)) { /* Task segment value */ - desc->limit0 = 0x0000; - desc->base0 = 0x0000; - desc->base1 = 0x0000; - desc->type = SEG_TYPE_TSS; - desc->s = 0; - desc->dpl = 0; - desc->p = 1; - desc->limit1 = 0x0; - desc->avl = 0; - desc->l = 0; - desc->d = 0; - desc->g = SEG_GRANULARITY_4KB; - desc->base2 = 0x00; + desc->limit0 = 0x0000; + desc->base0 = 0x0000; + desc->base1 = 0x0000; + desc->type = SEG_TYPE_TSS; + desc->s = 0; + desc->dpl = 0; + desc->p = 1; + desc->limit1 = 0x0; + desc->avl = 0; + desc->l = 0; + desc->d = 0; + desc->g = SEG_GRANULARITY_4KB; + desc->base2 = 0x00; desc++; } @@ -1082,5 +902,6 @@ struct boot_params *efi_main(struct efi_config *c, return boot_params; fail: efi_printk(sys_table, "efi_main() failed!\n"); + return NULL; } diff --git a/arch/x86/boot/compressed/eboot.h b/arch/x86/boot/compressed/eboot.h index e799dc5c6448..8297387c4676 100644 --- a/arch/x86/boot/compressed/eboot.h +++ b/arch/x86/boot/compressed/eboot.h @@ -12,22 +12,22 @@ #define DESC_TYPE_CODE_DATA (1 << 0) -struct efi_uga_draw_protocol_32 { +typedef struct { u32 get_mode; u32 set_mode; u32 blt; -}; +} efi_uga_draw_protocol_32_t; -struct efi_uga_draw_protocol_64 { +typedef struct { u64 get_mode; u64 set_mode; u64 blt; -}; +} efi_uga_draw_protocol_64_t; -struct efi_uga_draw_protocol { +typedef struct { void *get_mode; void *set_mode; void *blt; -}; +} efi_uga_draw_protocol_t; #endif /* BOOT_COMPRESSED_EBOOT_H */ diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index 8c5107545251..9e2157371491 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -1,3 +1,4 @@ +#include <asm/e820/types.h> #include <asm/processor.h> #include "pgtable.h" #include "../string.h" @@ -34,10 +35,62 @@ unsigned long *trampoline_32bit __section(.data); extern struct boot_params *boot_params; int cmdline_find_option_bool(const char *option); +static unsigned long find_trampoline_placement(void) +{ + unsigned long bios_start, ebda_start; + unsigned long trampoline_start; + struct boot_e820_entry *entry; + int i; + + /* + * Find a suitable spot for the trampoline. + * This code is based on reserve_bios_regions(). + */ + + ebda_start = *(unsigned short *)0x40e << 4; + bios_start = *(unsigned short *)0x413 << 10; + + if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX) + bios_start = BIOS_START_MAX; + + if (ebda_start > BIOS_START_MIN && ebda_start < bios_start) + bios_start = ebda_start; + + bios_start = round_down(bios_start, PAGE_SIZE); + + /* Find the first usable memory region under bios_start. */ + for (i = boot_params->e820_entries - 1; i >= 0; i--) { + entry = &boot_params->e820_table[i]; + + /* Skip all entries above bios_start. */ + if (bios_start <= entry->addr) + continue; + + /* Skip non-RAM entries. */ + if (entry->type != E820_TYPE_RAM) + continue; + + /* Adjust bios_start to the end of the entry if needed. */ + if (bios_start > entry->addr + entry->size) + bios_start = entry->addr + entry->size; + + /* Keep bios_start page-aligned. */ + bios_start = round_down(bios_start, PAGE_SIZE); + + /* Skip the entry if it's too small. */ + if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr) + continue; + + break; + } + + /* Place the trampoline just below the end of low memory */ + return bios_start - TRAMPOLINE_32BIT_SIZE; +} + struct paging_config paging_prepare(void *rmode) { struct paging_config paging_config = {}; - unsigned long bios_start, ebda_start; /* Initialize boot_params. Required for cmdline_find_option_bool(). */ boot_params = rmode; @@ -61,23 +114,7 @@ struct paging_config paging_prepare(void *rmode) paging_config.l5_required = 1; } - /* - * Find a suitable spot for the trampoline. - * This code is based on reserve_bios_regions(). - */ - - ebda_start = *(unsigned short *)0x40e << 4; - bios_start = *(unsigned short *)0x413 << 10; - - if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX) - bios_start = BIOS_START_MAX; - - if (ebda_start > BIOS_START_MIN && ebda_start < bios_start) - bios_start = ebda_start; - - /* Place the trampoline just below the end of low memory, aligned to 4k */ - paging_config.trampoline_start = bios_start - TRAMPOLINE_32BIT_SIZE; - paging_config.trampoline_start = round_down(paging_config.trampoline_start, PAGE_SIZE); + paging_config.trampoline_start = find_trampoline_placement(); trampoline_32bit = (unsigned long *)paging_config.trampoline_start; diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c index 5de7c0d46edf..acd11b3bf639 100644 --- a/arch/x86/crypto/aegis128-aesni-glue.c +++ b/arch/x86/crypto/aegis128-aesni-glue.c @@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis128_aesni_alg[] = { } }; -static const struct x86_cpu_id aesni_cpu_id[] = { - X86_FEATURE_MATCH(X86_FEATURE_AES), - X86_FEATURE_MATCH(X86_FEATURE_XMM2), - {} -}; -MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id); - static int __init crypto_aegis128_aesni_module_init(void) { - if (!x86_match_cpu(aesni_cpu_id)) + if (!boot_cpu_has(X86_FEATURE_XMM2) || + !boot_cpu_has(X86_FEATURE_AES) || + !boot_cpu_has(X86_FEATURE_OSXSAVE) || + !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) return -ENODEV; return crypto_register_aeads(crypto_aegis128_aesni_alg, diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c index 876e4866e633..2071c3d1ae07 100644 --- a/arch/x86/crypto/aegis128l-aesni-glue.c +++ b/arch/x86/crypto/aegis128l-aesni-glue.c @@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis128l_aesni_alg[] = { } }; -static const struct x86_cpu_id aesni_cpu_id[] = { - X86_FEATURE_MATCH(X86_FEATURE_AES), - X86_FEATURE_MATCH(X86_FEATURE_XMM2), - {} -}; -MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id); - static int __init crypto_aegis128l_aesni_module_init(void) { - if (!x86_match_cpu(aesni_cpu_id)) + if (!boot_cpu_has(X86_FEATURE_XMM2) || + !boot_cpu_has(X86_FEATURE_AES) || + !boot_cpu_has(X86_FEATURE_OSXSAVE) || + !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) return -ENODEV; return crypto_register_aeads(crypto_aegis128l_aesni_alg, diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c index 2b5dd3af8f4d..b5f2a8fd5a71 100644 --- a/arch/x86/crypto/aegis256-aesni-glue.c +++ b/arch/x86/crypto/aegis256-aesni-glue.c @@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis256_aesni_alg[] = { } }; -static const struct x86_cpu_id aesni_cpu_id[] = { - X86_FEATURE_MATCH(X86_FEATURE_AES), - X86_FEATURE_MATCH(X86_FEATURE_XMM2), - {} -}; -MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id); - static int __init crypto_aegis256_aesni_module_init(void) { - if (!x86_match_cpu(aesni_cpu_id)) + if (!boot_cpu_has(X86_FEATURE_XMM2) || + !boot_cpu_has(X86_FEATURE_AES) || + !boot_cpu_has(X86_FEATURE_OSXSAVE) || + !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) return -ENODEV; return crypto_register_aeads(crypto_aegis256_aesni_alg, diff --git a/arch/x86/crypto/morus1280-avx2-glue.c b/arch/x86/crypto/morus1280-avx2-glue.c index f111f36d26dc..6634907d6ccd 100644 --- a/arch/x86/crypto/morus1280-avx2-glue.c +++ b/arch/x86/crypto/morus1280-avx2-glue.c @@ -37,15 +37,11 @@ asmlinkage void crypto_morus1280_avx2_final(void *state, void *tag_xor, MORUS1280_DECLARE_ALGS(avx2, "morus1280-avx2", 400); -static const struct x86_cpu_id avx2_cpu_id[] = { - X86_FEATURE_MATCH(X86_FEATURE_AVX2), - {} -}; -MODULE_DEVICE_TABLE(x86cpu, avx2_cpu_id); - static int __init crypto_morus1280_avx2_module_init(void) { - if (!x86_match_cpu(avx2_cpu_id)) + if (!boot_cpu_has(X86_FEATURE_AVX2) || + !boot_cpu_has(X86_FEATURE_OSXSAVE) || + !cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) return -ENODEV; return crypto_register_aeads(crypto_morus1280_avx2_algs, diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c index 839270aa713c..95cf857d2cbb 100644 --- a/arch/x86/crypto/morus1280-sse2-glue.c +++ b/arch/x86/crypto/morus1280-sse2-glue.c @@ -37,15 +37,11 @@ asmlinkage void crypto_morus1280_sse2_final(void *state, void *tag_xor, MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350); -static const struct x86_cpu_id sse2_cpu_id[] = { - X86_FEATURE_MATCH(X86_FEATURE_XMM2), - {} -}; -MODULE_DEVICE_TABLE(x86cpu, sse2_cpu_id); - static int __init crypto_morus1280_sse2_module_init(void) { - if (!x86_match_cpu(sse2_cpu_id)) + if (!boot_cpu_has(X86_FEATURE_XMM2) || + !boot_cpu_has(X86_FEATURE_OSXSAVE) || + !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) return -ENODEV; return crypto_register_aeads(crypto_morus1280_sse2_algs, diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c index 26b47e2db8d2..615fb7bc9a32 100644 --- a/arch/x86/crypto/morus640-sse2-glue.c +++ b/arch/x86/crypto/morus640-sse2-glue.c @@ -37,15 +37,11 @@ asmlinkage void crypto_morus640_sse2_final(void *state, void *tag_xor, MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400); -static const struct x86_cpu_id sse2_cpu_id[] = { - X86_FEATURE_MATCH(X86_FEATURE_XMM2), - {} -}; -MODULE_DEVICE_TABLE(x86cpu, sse2_cpu_id); - static int __init crypto_morus640_sse2_module_init(void) { - if (!x86_match_cpu(sse2_cpu_id)) + if (!boot_cpu_has(X86_FEATURE_XMM2) || + !boot_cpu_has(X86_FEATURE_OSXSAVE) || + !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) return -ENODEV; return crypto_register_aeads(crypto_morus640_sse2_algs, diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 73a522d53b53..8ae7ffda8f98 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -981,7 +981,7 @@ ENTRY(\sym) call \do_sym - jmp error_exit /* %ebx: no swapgs flag */ + jmp error_exit .endif END(\sym) .endm @@ -1222,7 +1222,6 @@ END(paranoid_exit) /* * Save all registers in pt_regs, and switch GS if needed. - * Return: EBX=0: came from user mode; EBX=1: otherwise */ ENTRY(error_entry) UNWIND_HINT_FUNC @@ -1269,7 +1268,6 @@ ENTRY(error_entry) * for these here too. */ .Lerror_kernelspace: - incl %ebx leaq native_irq_return_iret(%rip), %rcx cmpq %rcx, RIP+8(%rsp) je .Lerror_bad_iret @@ -1303,28 +1301,20 @@ ENTRY(error_entry) /* * Pretend that the exception came from user mode: set up pt_regs - * as if we faulted immediately after IRET and clear EBX so that - * error_exit knows that we will be returning to user mode. + * as if we faulted immediately after IRET. */ mov %rsp, %rdi call fixup_bad_iret mov %rax, %rsp - decl %ebx jmp .Lerror_entry_from_usermode_after_swapgs END(error_entry) - -/* - * On entry, EBX is a "return to kernel mode" flag: - * 1: already in kernel mode, don't need SWAPGS - * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode - */ ENTRY(error_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF - testl %ebx, %ebx - jnz retint_kernel + testb $3, CS(%rsp) + jz retint_kernel jmp retint_user END(error_exit) diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 4b98101209a1..d50bb4dc0650 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -579,7 +579,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) { struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); struct perf_event *event = pcpu->event; - struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event *hwc; struct perf_sample_data data; struct perf_raw_record raw; struct pt_regs regs; @@ -602,6 +602,10 @@ fail: return 0; } + if (WARN_ON_ONCE(!event)) + goto fail; + + hwc = &event->hw; msr = hwc->config_base; buf = ibs_data.regs; rdmsrl(msr, *buf); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 707b2a96e516..86f0c15dcc2d 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2997,6 +2997,9 @@ static int intel_pmu_hw_config(struct perf_event *event) } if (x86_pmu.pebs_aliases) x86_pmu.pebs_aliases(event); + + if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) + event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY; } if (needs_branch_stack(event)) { diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 8cf03f101938..8dbba77e0518 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1186,16 +1186,20 @@ static void setup_pebs_sample_data(struct perf_event *event, } /* + * We must however always use iregs for the unwinder to stay sane; the + * record BP,SP,IP can point into thin air when the record is from a + * previous PMI context or an (I)RET happend between the record and + * PMI. + */ + if (sample_type & PERF_SAMPLE_CALLCHAIN) + data->callchain = perf_callchain(event, iregs); + + /* * We use the interrupt regs as a base because the PEBS record does not * contain a full regs set, specifically it seems to lack segment * descriptors, which get used by things like user_mode(). * * In the simple case fix up only the IP for PERF_SAMPLE_IP. - * - * We must however always use BP,SP from iregs for the unwinder to stay - * sane; the record BP,SP can point into thin air when the record is - * from a previous PMI context or an (I)RET happend between the record - * and PMI. */ *regs = *iregs; @@ -1214,15 +1218,8 @@ static void setup_pebs_sample_data(struct perf_event *event, regs->si = pebs->si; regs->di = pebs->di; - /* - * Per the above; only set BP,SP if we don't need callchains. - * - * XXX: does this make sense? - */ - if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { - regs->bp = pebs->bp; - regs->sp = pebs->sp; - } + regs->bp = pebs->bp; + regs->sp = pebs->sp; #ifndef CONFIG_X86_32 regs->r8 = pebs->r8; diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index c9e1e0bef3c3..e17ab885b1e9 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -28,7 +28,7 @@ #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) #define UNCORE_PCI_DEV_IDX(data) (data & 0xff) #define UNCORE_EXTRA_PCI_DEV 0xff -#define UNCORE_EXTRA_PCI_DEV_MAX 3 +#define UNCORE_EXTRA_PCI_DEV_MAX 4 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 87dc0263a2e1..51d7c117e3c7 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -1029,6 +1029,7 @@ void snbep_uncore_cpu_init(void) enum { SNBEP_PCI_QPI_PORT0_FILTER, SNBEP_PCI_QPI_PORT1_FILTER, + BDX_PCI_QPI_PORT2_FILTER, HSWEP_PCI_PCU_3, }; @@ -3286,15 +3287,18 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = { }, { /* QPI Port 0 filter */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86), - .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0), + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, + SNBEP_PCI_QPI_PORT0_FILTER), }, { /* QPI Port 1 filter */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96), - .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1), + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, + SNBEP_PCI_QPI_PORT1_FILTER), }, { /* QPI Port 2 filter */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46), - .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2), + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, + BDX_PCI_QPI_PORT2_FILTER), }, { /* PCU.3 (for Capability registers) */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0), diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h index 9ef5ee03d2d7..159622ee0674 100644 --- a/arch/x86/include/asm/qspinlock_paravirt.h +++ b/arch/x86/include/asm/qspinlock_paravirt.h @@ -43,7 +43,7 @@ asm (".pushsection .text;" "push %rdx;" "mov $0x1,%eax;" "xor %edx,%edx;" - "lock cmpxchg %dl,(%rdi);" + LOCK_PREFIX "cmpxchg %dl,(%rdi);" "cmp $0x1,%al;" "jne .slowpath;" "pop %rdx;" diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 2aabd4cb0e3f..adbda5847b14 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -573,6 +573,9 @@ static u32 skx_deadline_rev(void) case 0x04: return 0x02000014; } + if (boot_cpu_data.x86_stepping > 4) + return 0; + return ~0U; } diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 8c50754c09c1..4b767284b7f5 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -123,8 +123,8 @@ void mce_setup(struct mce *m) { memset(m, 0, sizeof(struct mce)); m->cpu = m->extcpu = smp_processor_id(); - /* We hope get_seconds stays lockless */ - m->time = get_seconds(); + /* need the internal __ version to avoid deadlocks */ + m->time = __ktime_get_real_seconds(); m->cpuvendor = boot_cpu_data.x86_vendor; m->cpuid = cpuid_eax(1); m->socketid = cpu_data(m->extcpu).phys_proc_id; @@ -1104,6 +1104,101 @@ static void mce_unmap_kpfn(unsigned long pfn) } #endif + +/* + * Cases where we avoid rendezvous handler timeout: + * 1) If this CPU is offline. + * + * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to + * skip those CPUs which remain looping in the 1st kernel - see + * crash_nmi_callback(). + * + * Note: there still is a small window between kexec-ing and the new, + * kdump kernel establishing a new #MC handler where a broadcasted MCE + * might not get handled properly. + */ +static bool __mc_check_crashing_cpu(int cpu) +{ + if (cpu_is_offline(cpu) || + (crashing_cpu != -1 && crashing_cpu != cpu)) { + u64 mcgstatus; + + mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); + if (mcgstatus & MCG_STATUS_RIPV) { + mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); + return true; + } + } + return false; +} + +static void __mc_scan_banks(struct mce *m, struct mce *final, + unsigned long *toclear, unsigned long *valid_banks, + int no_way_out, int *worst) +{ + struct mca_config *cfg = &mca_cfg; + int severity, i; + + for (i = 0; i < cfg->banks; i++) { + __clear_bit(i, toclear); + if (!test_bit(i, valid_banks)) + continue; + + if (!mce_banks[i].ctl) + continue; + + m->misc = 0; + m->addr = 0; + m->bank = i; + + m->status = mce_rdmsrl(msr_ops.status(i)); + if (!(m->status & MCI_STATUS_VAL)) + continue; + + /* + * Corrected or non-signaled errors are handled by + * machine_check_poll(). Leave them alone, unless this panics. + */ + if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) && + !no_way_out) + continue; + + /* Set taint even when machine check was not enabled. */ + add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); + + severity = mce_severity(m, cfg->tolerant, NULL, true); + + /* + * When machine check was for corrected/deferred handler don't + * touch, unless we're panicking. + */ + if ((severity == MCE_KEEP_SEVERITY || + severity == MCE_UCNA_SEVERITY) && !no_way_out) + continue; + + __set_bit(i, toclear); + + /* Machine check event was not enabled. Clear, but ignore. */ + if (severity == MCE_NO_SEVERITY) + continue; + + mce_read_aux(m, i); + + /* assuming valid severity level != 0 */ + m->severity = severity; + + mce_log(m); + + if (severity > *worst) { + *final = *m; + *worst = severity; + } + } + + /* mce_clear_state will clear *final, save locally for use later */ + *m = *final; +} + /* * The actual machine check handler. This only handles real * exceptions when something got corrupted coming in through int 18. @@ -1118,68 +1213,45 @@ static void mce_unmap_kpfn(unsigned long pfn) */ void do_machine_check(struct pt_regs *regs, long error_code) { + DECLARE_BITMAP(valid_banks, MAX_NR_BANKS); + DECLARE_BITMAP(toclear, MAX_NR_BANKS); struct mca_config *cfg = &mca_cfg; + int cpu = smp_processor_id(); + char *msg = "Unknown"; struct mce m, *final; - int i; int worst = 0; - int severity; /* * Establish sequential order between the CPUs entering the machine * check handler. */ int order = -1; + /* * If no_way_out gets set, there is no safe way to recover from this * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway. */ int no_way_out = 0; + /* * If kill_it gets set, there might be a way to recover from this * error. */ int kill_it = 0; - DECLARE_BITMAP(toclear, MAX_NR_BANKS); - DECLARE_BITMAP(valid_banks, MAX_NR_BANKS); - char *msg = "Unknown"; /* * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES * on Intel. */ int lmce = 1; - int cpu = smp_processor_id(); - /* - * Cases where we avoid rendezvous handler timeout: - * 1) If this CPU is offline. - * - * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to - * skip those CPUs which remain looping in the 1st kernel - see - * crash_nmi_callback(). - * - * Note: there still is a small window between kexec-ing and the new, - * kdump kernel establishing a new #MC handler where a broadcasted MCE - * might not get handled properly. - */ - if (cpu_is_offline(cpu) || - (crashing_cpu != -1 && crashing_cpu != cpu)) { - u64 mcgstatus; - - mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); - if (mcgstatus & MCG_STATUS_RIPV) { - mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); - return; - } - } + if (__mc_check_crashing_cpu(cpu)) + return; ist_enter(regs); this_cpu_inc(mce_exception_count); - if (!cfg->banks) - goto out; - mce_gather_info(&m, regs); m.tsc = rdtsc(); @@ -1220,67 +1292,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) order = mce_start(&no_way_out); } - for (i = 0; i < cfg->banks; i++) { - __clear_bit(i, toclear); - if (!test_bit(i, valid_banks)) - continue; - if (!mce_banks[i].ctl) - continue; - - m.misc = 0; - m.addr = 0; - m.bank = i; - - m.status = mce_rdmsrl(msr_ops.status(i)); - if ((m.status & MCI_STATUS_VAL) == 0) - continue; - - /* - * Non uncorrected or non signaled errors are handled by - * machine_check_poll. Leave them alone, unless this panics. - */ - if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) && - !no_way_out) - continue; - - /* - * Set taint even when machine check was not enabled. - */ - add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); - - severity = mce_severity(&m, cfg->tolerant, NULL, true); - - /* - * When machine check was for corrected/deferred handler don't - * touch, unless we're panicing. - */ - if ((severity == MCE_KEEP_SEVERITY || - severity == MCE_UCNA_SEVERITY) && !no_way_out) - continue; - __set_bit(i, toclear); - if (severity == MCE_NO_SEVERITY) { - /* - * Machine check event was not enabled. Clear, but - * ignore. - */ - continue; - } - - mce_read_aux(&m, i); - - /* assuming valid severity level != 0 */ - m.severity = severity; - - mce_log(&m); - - if (severity > worst) { - *final = m; - worst = severity; - } - } - - /* mce_clear_state will clear *final, save locally for use later */ - m = *final; + __mc_scan_banks(&m, final, toclear, valid_banks, no_way_out, &worst); if (!no_way_out) mce_clear_state(toclear); @@ -1319,7 +1331,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) if (worst > 0) mce_report_event(regs); mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); -out: + sync_core(); if (worst != MCE_AR_SEVERITY && !kill_it) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index d594690d8b95..6b8f11521c41 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -890,7 +890,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, if (cache->nobjs >= min) return 0; while (cache->nobjs < ARRAY_SIZE(cache->objects)) { - page = (void *)__get_free_page(GFP_KERNEL); + page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT); if (!page) return -ENOMEM; cache->objects[cache->nobjs++] = page; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e30da9a2430c..5d8e317c2b04 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -7893,6 +7893,8 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) HRTIMER_MODE_REL_PINNED); vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; + vmx->nested.vpid02 = allocate_vpid(); + vmx->nested.vmxon = true; return 0; @@ -8480,21 +8482,20 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) /* Emulate the VMPTRST instruction */ static int handle_vmptrst(struct kvm_vcpu *vcpu) { - unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); - u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - gva_t vmcs_gva; + unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION); + u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); + gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; struct x86_exception e; + gva_t gva; if (!nested_vmx_check_permission(vcpu)) return 1; - if (get_vmx_mem_address(vcpu, exit_qualification, - vmx_instruction_info, true, &vmcs_gva)) + if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva)) return 1; /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ - if (kvm_write_guest_virt_system(vcpu, vmcs_gva, - (void *)&to_vmx(vcpu)->nested.current_vmptr, - sizeof(u64), &e)) { + if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, + sizeof(gpa_t), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -10370,11 +10371,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) goto free_vmcs; } - if (nested) { + if (nested) nested_vmx_setup_ctls_msrs(&vmx->nested.msrs, kvm_vcpu_apicv_active(&vmx->vcpu)); - vmx->nested.vpid02 = allocate_vpid(); - } vmx->nested.posted_intr_nv = -1; vmx->nested.current_vmptr = -1ull; @@ -10391,7 +10390,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) return &vmx->vcpu; free_vmcs: - free_vpid(vmx->nested.vpid02); free_loaded_vmcs(vmx->loaded_vmcs); free_msrs: kfree(vmx->guest_msrs); diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index 55799873ebe5..8f6cc71e0848 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -1441,8 +1441,8 @@ static void emit_prologue(u8 **pprog, u32 stack_depth) /* sub esp,STACK_SIZE */ EMIT2_off32(0x81, 0xEC, STACK_SIZE); - /* sub ebp,SCRATCH_SIZE+4+12*/ - EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 16); + /* sub ebp,SCRATCH_SIZE+12*/ + EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 12); /* xor ebx,ebx */ EMIT2(0x31, add_2reg(0xC0, IA32_EBX, IA32_EBX)); @@ -1475,8 +1475,8 @@ static void emit_epilogue(u8 **pprog, u32 stack_depth) /* mov edx,dword ptr [ebp+off]*/ EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r0[1])); - /* add ebp,SCRATCH_SIZE+4+12*/ - EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 16); + /* add ebp,SCRATCH_SIZE+12*/ + EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 12); /* mov ebx,dword ptr [ebp-12]*/ EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), -12); diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 77873ce700ae..ee5d08f25ce4 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -417,7 +417,7 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va) if (!(md->attribute & EFI_MEMORY_WB)) flags |= _PAGE_PCD; - if (sev_active()) + if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO) flags |= _PAGE_ENC; pfn = md->phys_addr >> PAGE_SHIFT; @@ -636,6 +636,8 @@ void efi_switch_mm(struct mm_struct *mm) #ifdef CONFIG_EFI_MIXED extern efi_status_t efi64_thunk(u32, ...); +static DEFINE_SPINLOCK(efi_runtime_lock); + #define runtime_service32(func) \ ({ \ u32 table = (u32)(unsigned long)efi.systab; \ @@ -657,17 +659,14 @@ extern efi_status_t efi64_thunk(u32, ...); #define efi_thunk(f, ...) \ ({ \ efi_status_t __s; \ - unsigned long __flags; \ u32 __func; \ \ - local_irq_save(__flags); \ arch_efi_call_virt_setup(); \ \ __func = runtime_service32(f); \ __s = efi64_thunk(__func, __VA_ARGS__); \ \ arch_efi_call_virt_teardown(); \ - local_irq_restore(__flags); \ \ __s; \ }) @@ -702,14 +701,17 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc) { efi_status_t status; u32 phys_tm, phys_tc; + unsigned long flags; spin_lock(&rtc_lock); + spin_lock_irqsave(&efi_runtime_lock, flags); phys_tm = virt_to_phys_or_null(tm); phys_tc = virt_to_phys_or_null(tc); status = efi_thunk(get_time, phys_tm, phys_tc); + spin_unlock_irqrestore(&efi_runtime_lock, flags); spin_unlock(&rtc_lock); return status; @@ -719,13 +721,16 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm) { efi_status_t status; u32 phys_tm; + unsigned long flags; spin_lock(&rtc_lock); + spin_lock_irqsave(&efi_runtime_lock, flags); phys_tm = virt_to_phys_or_null(tm); status = efi_thunk(set_time, phys_tm); + spin_unlock_irqrestore(&efi_runtime_lock, flags); spin_unlock(&rtc_lock); return status; @@ -737,8 +742,10 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending, { efi_status_t status; u32 phys_enabled, phys_pending, phys_tm; + unsigned long flags; spin_lock(&rtc_lock); + spin_lock_irqsave(&efi_runtime_lock, flags); phys_enabled = virt_to_phys_or_null(enabled); phys_pending = virt_to_phys_or_null(pending); @@ -747,6 +754,7 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending, status = efi_thunk(get_wakeup_time, phys_enabled, phys_pending, phys_tm); + spin_unlock_irqrestore(&efi_runtime_lock, flags); spin_unlock(&rtc_lock); return status; @@ -757,13 +765,16 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) { efi_status_t status; u32 phys_tm; + unsigned long flags; spin_lock(&rtc_lock); + spin_lock_irqsave(&efi_runtime_lock, flags); phys_tm = virt_to_phys_or_null(tm); status = efi_thunk(set_wakeup_time, enabled, phys_tm); + spin_unlock_irqrestore(&efi_runtime_lock, flags); spin_unlock(&rtc_lock); return status; @@ -781,6 +792,9 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, efi_status_t status; u32 phys_name, phys_vendor, phys_attr; u32 phys_data_size, phys_data; + unsigned long flags; + + spin_lock_irqsave(&efi_runtime_lock, flags); phys_data_size = virt_to_phys_or_null(data_size); phys_vendor = virt_to_phys_or_null(vendor); @@ -791,6 +805,8 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, status = efi_thunk(get_variable, phys_name, phys_vendor, phys_attr, phys_data_size, phys_data); + spin_unlock_irqrestore(&efi_runtime_lock, flags); + return status; } @@ -800,6 +816,34 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor, { u32 phys_name, phys_vendor, phys_data; efi_status_t status; + unsigned long flags; + + spin_lock_irqsave(&efi_runtime_lock, flags); + + phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); + phys_vendor = virt_to_phys_or_null(vendor); + phys_data = virt_to_phys_or_null_size(data, data_size); + + /* If data_size is > sizeof(u32) we've got problems */ + status = efi_thunk(set_variable, phys_name, phys_vendor, + attr, data_size, phys_data); + + spin_unlock_irqrestore(&efi_runtime_lock, flags); + + return status; +} + +static efi_status_t +efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor, + u32 attr, unsigned long data_size, + void *data) +{ + u32 phys_name, phys_vendor, phys_data; + efi_status_t status; + unsigned long flags; + + if (!spin_trylock_irqsave(&efi_runtime_lock, flags)) + return EFI_NOT_READY; phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); phys_vendor = virt_to_phys_or_null(vendor); @@ -809,6 +853,8 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor, status = efi_thunk(set_variable, phys_name, phys_vendor, attr, data_size, phys_data); + spin_unlock_irqrestore(&efi_runtime_lock, flags); + return status; } @@ -819,6 +865,9 @@ efi_thunk_get_next_variable(unsigned long *name_size, { efi_status_t status; u32 phys_name_size, phys_name, phys_vendor; + unsigned long flags; + + spin_lock_irqsave(&efi_runtime_lock, flags); phys_name_size = virt_to_phys_or_null(name_size); phys_vendor = virt_to_phys_or_null(vendor); @@ -827,6 +876,8 @@ efi_thunk_get_next_variable(unsigned long *name_size, status = efi_thunk(get_next_variable, phys_name_size, phys_name, phys_vendor); + spin_unlock_irqrestore(&efi_runtime_lock, flags); + return status; } @@ -835,10 +886,15 @@ efi_thunk_get_next_high_mono_count(u32 *count) { efi_status_t status; u32 phys_count; + unsigned long flags; + + spin_lock_irqsave(&efi_runtime_lock, flags); phys_count = virt_to_phys_or_null(count); status = efi_thunk(get_next_high_mono_count, phys_count); + spin_unlock_irqrestore(&efi_runtime_lock, flags); + return status; } @@ -847,10 +903,15 @@ efi_thunk_reset_system(int reset_type, efi_status_t status, unsigned long data_size, efi_char16_t *data) { u32 phys_data; + unsigned long flags; + + spin_lock_irqsave(&efi_runtime_lock, flags); phys_data = virt_to_phys_or_null_size(data, data_size); efi_thunk(reset_system, reset_type, status, data_size, phys_data); + + spin_unlock_irqrestore(&efi_runtime_lock, flags); } static efi_status_t @@ -872,10 +933,40 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space, { efi_status_t status; u32 phys_storage, phys_remaining, phys_max; + unsigned long flags; + + if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) + return EFI_UNSUPPORTED; + + spin_lock_irqsave(&efi_runtime_lock, flags); + + phys_storage = virt_to_phys_or_null(storage_space); + phys_remaining = virt_to_phys_or_null(remaining_space); + phys_max = virt_to_phys_or_null(max_variable_size); + + status = efi_thunk(query_variable_info, attr, phys_storage, + phys_remaining, phys_max); + + spin_unlock_irqrestore(&efi_runtime_lock, flags); + + return status; +} + +static efi_status_t +efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space, + u64 *remaining_space, + u64 *max_variable_size) +{ + efi_status_t status; + u32 phys_storage, phys_remaining, phys_max; + unsigned long flags; if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) return EFI_UNSUPPORTED; + if (!spin_trylock_irqsave(&efi_runtime_lock, flags)) + return EFI_NOT_READY; + phys_storage = virt_to_phys_or_null(storage_space); phys_remaining = virt_to_phys_or_null(remaining_space); phys_max = virt_to_phys_or_null(max_variable_size); @@ -883,6 +974,8 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space, status = efi_thunk(query_variable_info, attr, phys_storage, phys_remaining, phys_max); + spin_unlock_irqrestore(&efi_runtime_lock, flags); + return status; } @@ -908,9 +1001,11 @@ void efi_thunk_runtime_setup(void) efi.get_variable = efi_thunk_get_variable; efi.get_next_variable = efi_thunk_get_next_variable; efi.set_variable = efi_thunk_set_variable; + efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking; efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count; efi.reset_system = efi_thunk_reset_system; efi.query_variable_info = efi_thunk_query_variable_info; + efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking; efi.update_capsule = efi_thunk_update_capsule; efi.query_capsule_caps = efi_thunk_query_capsule_caps; } diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 36c1f8b9f7e0..844d31cb8a0c 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -105,12 +105,11 @@ early_param("efi_no_storage_paranoia", setup_storage_paranoia); */ void efi_delete_dummy_variable(void) { - efi.set_variable((efi_char16_t *)efi_dummy_name, - &EFI_DUMMY_GUID, - EFI_VARIABLE_NON_VOLATILE | - EFI_VARIABLE_BOOTSERVICE_ACCESS | - EFI_VARIABLE_RUNTIME_ACCESS, - 0, NULL); + efi.set_variable_nonblocking((efi_char16_t *)efi_dummy_name, + &EFI_DUMMY_GUID, + EFI_VARIABLE_NON_VOLATILE | + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS, 0, NULL); } /* @@ -249,7 +248,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) int num_entries; void *new; - if (efi_mem_desc_lookup(addr, &md)) { + if (efi_mem_desc_lookup(addr, &md) || + md.type != EFI_BOOT_SERVICES_DATA) { pr_err("Failed to lookup EFI memory descriptor for %pa\n", &addr); return; } diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c index 744afdc18cf3..56c44d865f7b 100644 --- a/arch/x86/um/mem_32.c +++ b/arch/x86/um/mem_32.c @@ -16,7 +16,7 @@ static int __init gate_vma_init(void) if (!FIXADDR_USER_START) return 0; - gate_vma.vm_mm = NULL; + vma_init(&gate_vma, NULL); gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; |