diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-02 09:54:56 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-02 09:54:56 +0300 |
commit | d3b5d35290d729a2518af00feca867385a1b08fa (patch) | |
tree | 7b56c0863d59bc57f7c7dcf5d5665c56b05f1d1b /drivers | |
parent | aa2a4b6569d5b10491b606a86e574dff3852597a (diff) | |
parent | 71389703839ebe9cb426c72d5f0bd549592e583c (diff) | |
download | linux-d3b5d35290d729a2518af00feca867385a1b08fa.tar.xz |
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Ingo Molnar:
"The main x86 MM changes in this cycle were:
- continued native kernel PCID support preparation patches to the TLB
flushing code (Andy Lutomirski)
- various fixes related to 32-bit compat syscall returning address
over 4Gb in applications, launched from 64-bit binaries - motivated
by C/R frameworks such as Virtuozzo. (Dmitry Safonov)
- continued Intel 5-level paging enablement: in particular the
conversion of x86 GUP to the generic GUP code. (Kirill A. Shutemov)
- x86/mpx ABI corner case fixes/enhancements (Joerg Roedel)
- ... plus misc updates, fixes and cleanups"
* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (62 commits)
mm, zone_device: Replace {get, put}_zone_device_page() with a single reference to fix pmem crash
x86/mm: Fix flush_tlb_page() on Xen
x86/mm: Make flush_tlb_mm_range() more predictable
x86/mm: Remove flush_tlb() and flush_tlb_current_task()
x86/vm86/32: Switch to flush_tlb_mm_range() in mark_screen_rdonly()
x86/mm/64: Fix crash in remove_pagetable()
Revert "x86/mm/gup: Switch GUP to the generic get_user_page_fast() implementation"
x86/boot/e820: Remove a redundant self assignment
x86/mm: Fix dump pagetables for 4 levels of page tables
x86/mpx, selftests: Only check bounds-vs-shadow when we keep shadow
x86/mpx: Correctly report do_mpx_bt_fault() failures to user-space
Revert "x86/mm/numa: Remove numa_nodemask_from_meminfo()"
x86/espfix: Add support for 5-level paging
x86/kasan: Extend KASAN to support 5-level paging
x86/mm: Add basic defines/helpers for CONFIG_X86_5LEVEL=y
x86/paravirt: Add 5-level support to the paravirt code
x86/mm: Define virtual memory map for 5-level paging
x86/asm: Remove __VIRTUAL_MASK_SHIFT==47 assert
x86/boot: Detect 5-level paging support
x86/mm/numa: Remove numa_nodemask_from_meminfo()
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/dax/pmem.c | 2 | ||||
-rw-r--r-- | drivers/lguest/x86/core.c | 6 | ||||
-rw-r--r-- | drivers/nvdimm/pmem.c | 13 | ||||
-rw-r--r-- | drivers/pnp/pnpbios/bioscalls.c | 10 |
4 files changed, 20 insertions, 11 deletions
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c index 033f49b31fdc..cb0d742fa23f 100644 --- a/drivers/dax/pmem.c +++ b/drivers/dax/pmem.c @@ -43,6 +43,7 @@ static void dax_pmem_percpu_exit(void *data) struct dax_pmem *dax_pmem = to_dax_pmem(ref); dev_dbg(dax_pmem->dev, "%s\n", __func__); + wait_for_completion(&dax_pmem->cmp); percpu_ref_exit(ref); } @@ -53,7 +54,6 @@ static void dax_pmem_percpu_kill(void *data) dev_dbg(dax_pmem->dev, "%s\n", __func__); percpu_ref_kill(ref); - wait_for_completion(&dax_pmem->cmp); } static int dax_pmem_probe(struct device *dev) diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index d71f6323ac00..b4f79b923aea 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c @@ -504,7 +504,7 @@ void __init lguest_arch_host_init(void) * byte, not the size, hence the "-1"). */ state->host_gdt_desc.size = GDT_SIZE-1; - state->host_gdt_desc.address = (long)get_cpu_gdt_table(i); + state->host_gdt_desc.address = (long)get_cpu_gdt_rw(i); /* * All CPUs on the Host use the same Interrupt Descriptor @@ -554,8 +554,8 @@ void __init lguest_arch_host_init(void) * The Host needs to be able to use the LGUEST segments on this * CPU, too, so put them in the Host GDT. */ - get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; - get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; + get_cpu_gdt_rw(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; + get_cpu_gdt_rw(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; } /* diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 5b536be5a12e..fbc640bf06b0 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -25,6 +25,7 @@ #include <linux/badblocks.h> #include <linux/memremap.h> #include <linux/vmalloc.h> +#include <linux/blk-mq.h> #include <linux/pfn_t.h> #include <linux/slab.h> #include <linux/pmem.h> @@ -231,6 +232,11 @@ static void pmem_release_queue(void *q) blk_cleanup_queue(q); } +static void pmem_freeze_queue(void *q) +{ + blk_freeze_queue_start(q); +} + static void pmem_release_disk(void *disk) { del_gendisk(disk); @@ -284,6 +290,9 @@ static int pmem_attach_disk(struct device *dev, if (!q) return -ENOMEM; + if (devm_add_action_or_reset(dev, pmem_release_queue, q)) + return -ENOMEM; + pmem->pfn_flags = PFN_DEV; if (is_nd_pfn(dev)) { addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter, @@ -303,10 +312,10 @@ static int pmem_attach_disk(struct device *dev, pmem->size, ARCH_MEMREMAP_PMEM); /* - * At release time the queue must be dead before + * At release time the queue must be frozen before * devm_memremap_pages is unwound */ - if (devm_add_action_or_reset(dev, pmem_release_queue, q)) + if (devm_add_action_or_reset(dev, pmem_freeze_queue, q)) return -ENOMEM; if (IS_ERR(addr)) diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c index 438d4c72c7b3..ff563db025b3 100644 --- a/drivers/pnp/pnpbios/bioscalls.c +++ b/drivers/pnp/pnpbios/bioscalls.c @@ -54,7 +54,7 @@ __asm__(".text \n" #define Q2_SET_SEL(cpu, selname, address, size) \ do { \ - struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \ + struct desc_struct *gdt = get_cpu_gdt_rw((cpu)); \ set_desc_base(&gdt[(selname) >> 3], (u32)(address)); \ set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \ } while(0) @@ -95,8 +95,8 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, return PNP_FUNCTION_NOT_SUPPORTED; cpu = get_cpu(); - save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8]; - get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc; + save_desc_40 = get_cpu_gdt_rw(cpu)[0x40 / 8]; + get_cpu_gdt_rw(cpu)[0x40 / 8] = bad_bios_desc; /* On some boxes IRQ's during PnP BIOS calls are deadly. */ spin_lock_irqsave(&pnp_bios_lock, flags); @@ -134,7 +134,7 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, :"memory"); spin_unlock_irqrestore(&pnp_bios_lock, flags); - get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40; + get_cpu_gdt_rw(cpu)[0x40 / 8] = save_desc_40; put_cpu(); /* If we get here and this is set then the PnP BIOS faulted on us. */ @@ -477,7 +477,7 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) pnp_bios_callpoint.segment = PNP_CS16; for_each_possible_cpu(i) { - struct desc_struct *gdt = get_cpu_gdt_table(i); + struct desc_struct *gdt = get_cpu_gdt_rw(i); if (!gdt) continue; set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_CS32], |