diff options
Diffstat (limited to 'drivers/firmware/efi/efi.c')
-rw-r--r-- | drivers/firmware/efi/efi.c | 101 |
1 files changed, 78 insertions, 23 deletions
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 249eb70691b0..4c46ff6f2242 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -592,27 +592,43 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, early_memunmap(tbl, sizeof(*tbl)); } + return 0; +} +int __init efi_apply_persistent_mem_reservations(void) +{ if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { unsigned long prsv = efi.mem_reserve; while (prsv) { struct linux_efi_memreserve *rsv; - - /* reserve the entry itself */ - memblock_reserve(prsv, sizeof(*rsv)); - - rsv = early_memremap(prsv, sizeof(*rsv)); - if (rsv == NULL) { + u8 *p; + int i; + + /* + * Just map a full page: that is what we will get + * anyway, and it permits us to map the entire entry + * before knowing its size. + */ + p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE), + PAGE_SIZE); + if (p == NULL) { pr_err("Could not map UEFI memreserve entry!\n"); return -ENOMEM; } - if (rsv->size) - memblock_reserve(rsv->base, rsv->size); + rsv = (void *)(p + prsv % PAGE_SIZE); + + /* reserve the entry itself */ + memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size)); + + for (i = 0; i < atomic_read(&rsv->count); i++) { + memblock_reserve(rsv->entry[i].base, + rsv->entry[i].size); + } prsv = rsv->next; - early_memunmap(rsv, sizeof(*rsv)); + early_memunmap(p, PAGE_SIZE); } } @@ -963,36 +979,75 @@ bool efi_is_table_address(unsigned long phys_addr) } static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); +static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; -int efi_mem_reserve_persistent(phys_addr_t addr, u64 size) +static int __init efi_memreserve_map_root(void) { - struct linux_efi_memreserve *rsv, *parent; - if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR) return -ENODEV; - rsv = kmalloc(sizeof(*rsv), GFP_KERNEL); - if (!rsv) + efi_memreserve_root = memremap(efi.mem_reserve, + sizeof(*efi_memreserve_root), + MEMREMAP_WB); + if (WARN_ON_ONCE(!efi_memreserve_root)) return -ENOMEM; + return 0; +} - parent = memremap(efi.mem_reserve, sizeof(*rsv), MEMREMAP_WB); - if (!parent) { - kfree(rsv); - return -ENOMEM; +int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) +{ + struct linux_efi_memreserve *rsv; + unsigned long prsv; + int rc, index; + + if (efi_memreserve_root == (void *)ULONG_MAX) + return -ENODEV; + + if (!efi_memreserve_root) { + rc = efi_memreserve_map_root(); + if (rc) + return rc; } - rsv->base = addr; - rsv->size = size; + /* first try to find a slot in an existing linked list entry */ + for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) { + rsv = __va(prsv); + index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); + if (index < rsv->size) { + rsv->entry[index].base = addr; + rsv->entry[index].size = size; + + return 0; + } + } + + /* no slot found - allocate a new linked list entry */ + rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC); + if (!rsv) + return -ENOMEM; + + rsv->size = EFI_MEMRESERVE_COUNT(PAGE_SIZE); + atomic_set(&rsv->count, 1); + rsv->entry[0].base = addr; + rsv->entry[0].size = size; spin_lock(&efi_mem_reserve_persistent_lock); - rsv->next = parent->next; - parent->next = __pa(rsv); + rsv->next = efi_memreserve_root->next; + efi_memreserve_root->next = __pa(rsv); spin_unlock(&efi_mem_reserve_persistent_lock); - memunmap(parent); + return 0; +} +static int __init efi_memreserve_root_init(void) +{ + if (efi_memreserve_root) + return 0; + if (efi_memreserve_map_root()) + efi_memreserve_root = (void *)ULONG_MAX; return 0; } +early_initcall(efi_memreserve_root_init); #ifdef CONFIG_KEXEC static int update_efi_random_seed(struct notifier_block *nb, |