From 40f7828b36e3b9dd2fdc34d065e342cddf8d7bef Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Wed, 14 Dec 2016 15:04:01 -0800 Subject: btrfs: better handle btrfs_printk() defaults Commit 262c5e86fec7 ("printk/btrfs: handle more message headers") triggers: warning: `ratelimit' may be used uninitialized in this function with gcc (4.1.2) and probably many other versions. The code actually is correct but a bit twisted. Let's make it more straightforward and set the default values at the beginning. Link: http://lkml.kernel.org/r/20161213135246.GQ3506@pathway.suse.cz Signed-off-by: Petr Mladek Reported-by: Geert Uytterhoeven Reviewed-by: Geert Uytterhoeven Acked-by: David Sterba Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/btrfs/super.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 180f910339f4..3b713b6fcc26 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -202,12 +202,12 @@ static struct ratelimit_state printk_limits[] = { void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) { struct super_block *sb = fs_info->sb; - char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1]; + char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0"; struct va_format vaf; va_list args; - const char *type = NULL; int kern_level; - struct ratelimit_state *ratelimit; + const char *type = logtypes[4]; + struct ratelimit_state *ratelimit = &printk_limits[4]; va_start(args, fmt); @@ -223,12 +223,6 @@ void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) fmt += size; } - if (!type) { - *lvl = '\0'; - type = logtypes[4]; - ratelimit = &printk_limits[4]; - } - vaf.fmt = fmt; vaf.va = &args; -- cgit v1.2.3 From 4d1f0fb096aedea7bb5489af93498a82e467c480 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Wed, 14 Dec 2016 15:04:04 -0800 Subject: kernel/watchdog: use nmi registers snapshot in hardlockup handler NMI handler doesn't call set_irq_regs(), it's set only by normal IRQ. Thus get_irq_regs() returns NULL or stale registers snapshot with IP/SP pointing to the code interrupted by IRQ which was interrupted by NMI. NULL isn't a problem: in this case watchdog calls dump_stack() and prints full stack trace including NMI. But if we're stuck in IRQ handler then NMI watchlog will print stack trace without IRQ part at all. This patch uses registers snapshot passed into NMI handler as arguments: these registers point exactly to the instruction interrupted by NMI. Fixes: 55537871ef66 ("kernel/watchdog.c: perform all-CPU backtrace in case of hard lockup") Link: http://lkml.kernel.org/r/146771764784.86724.6006627197118544150.stgit@buzz Signed-off-by: Konstantin Khlebnikov Cc: Jiri Kosina Cc: Ulrich Obergfell Cc: Aaron Tomlin Cc: [4.4+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/watchdog.c | 1 - 1 file changed, 1 deletion(-) diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 9acb29f280ec..6d1020c03d41 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -344,7 +344,6 @@ static void watchdog_overflow_callback(struct perf_event *event, */ if (is_hardlockup()) { int this_cpu = smp_processor_id(); - struct pt_regs *regs = get_irq_regs(); /* only print hardlockups once */ if (__this_cpu_read(hard_watchdog_warn) == true) -- cgit v1.2.3 From 73e64c51afc56d4863ae225e947ba2f16ad04487 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 14 Dec 2016 15:04:07 -0800 Subject: mm, compaction: allow compaction for GFP_NOFS requests compaction has been disabled for GFP_NOFS and GFP_NOIO requests since the direct compaction was introduced by commit 56de7263fcf3 ("mm: compaction: direct compact when a high-order allocation fails"). The main reason is that the migration of page cache pages might recurse back to fs/io layer and we could potentially deadlock. This is overly conservative because all the anonymous memory is migrateable in the GFP_NOFS context just fine. This might be a large portion of the memory in many/most workkloads. Remove the GFP_NOFS restriction and make sure that we skip all fs pages (those with a mapping) while isolating pages to be migrated. We cannot consider clean fs pages because they might need a metadata update so only isolate pages without any mapping for nofs requests. The effect of this patch will be probably very limited in many/most workloads because higher order GFP_NOFS requests are quite rare, although different configurations might lead to very different results. David Chinner has mentioned a heavy metadata workload with 64kB block which to quote him: : Unfortunately, there was an era of cargo cult configuration tweaks in the : Ceph community that has resulted in a large number of production machines : with XFS filesystems configured this way. And a lot of them store large : numbers of small files and run under significant sustained memory : pressure. : : I slowly working towards getting rid of these high order allocations and : replacing them with the equivalent number of single page allocations, but : I haven't got that (complex) change working yet. We can do the following to simulate that workload: $ mkfs.xfs -f -n size=64k $ mount /mnt/scratch $ time ./fs_mark -D 10000 -S0 -n 100000 -s 0 -L 32 \ -d /mnt/scratch/0 -d /mnt/scratch/1 \ -d /mnt/scratch/2 -d /mnt/scratch/3 \ -d /mnt/scratch/4 -d /mnt/scratch/5 \ -d /mnt/scratch/6 -d /mnt/scratch/7 \ -d /mnt/scratch/8 -d /mnt/scratch/9 \ -d /mnt/scratch/10 -d /mnt/scratch/11 \ -d /mnt/scratch/12 -d /mnt/scratch/13 \ -d /mnt/scratch/14 -d /mnt/scratch/15 and indeed is hammers the system with many high order GFP_NOFS requests as per a simle tracepoint during the load: $ echo '!(gfp_flags & 0x80) && (gfp_flags &0x400000)' > $TRACE_MNT/events/kmem/mm_page_alloc/filter I am getting 5287609 order=0 37 order=1 1594905 order=2 3048439 order=3 6699207 order=4 66645 order=5 My testing was done in a kvm guest so performance numbers should be taken with a grain of salt but there seems to be a difference when the patch is applied: * Original kernel FSUse% Count Size Files/sec App Overhead 1 1600000 0 4300.1 20745838 3 3200000 0 4239.9 23849857 5 4800000 0 4243.4 25939543 6 6400000 0 4248.4 19514050 8 8000000 0 4262.1 20796169 9 9600000 0 4257.6 21288675 11 11200000 0 4259.7 19375120 13 12800000 0 4220.7 22734141 14 14400000 0 4238.5 31936458 16 16000000 0 4231.5 23409901 18 17600000 0 4045.3 23577700 19 19200000 0 2783.4 58299526 21 20800000 0 2678.2 40616302 23 22400000 0 2693.5 83973996 and xfs complaining about memory allocation not making progress [ 2304.372647] XFS: fs_mark(3289) possible memory allocation deadlock size 65624 in kmem_alloc (mode:0x2408240) [ 2304.443323] XFS: fs_mark(3285) possible memory allocation deadlock size 65728 in kmem_alloc (mode:0x2408240) [ 4796.772477] XFS: fs_mark(3424) possible memory allocation deadlock size 46936 in kmem_alloc (mode:0x2408240) [ 4796.775329] XFS: fs_mark(3423) possible memory allocation deadlock size 51416 in kmem_alloc (mode:0x2408240) [ 4797.388808] XFS: fs_mark(3424) possible memory allocation deadlock size 65728 in kmem_alloc (mode:0x2408240) * Patched kernel FSUse% Count Size Files/sec App Overhead 1 1600000 0 4289.1 19243934 3 3200000 0 4241.6 32828865 5 4800000 0 4248.7 32884693 6 6400000 0 4314.4 19608921 8 8000000 0 4269.9 24953292 9 9600000 0 4270.7 33235572 11 11200000 0 4346.4 40817101 13 12800000 0 4285.3 29972397 14 14400000 0 4297.2 20539765 16 16000000 0 4219.6 18596767 18 17600000 0 4273.8 49611187 19 19200000 0 4300.4 27944451 21 20800000 0 4270.6 22324585 22 22400000 0 4317.6 22650382 24 24000000 0 4065.2 22297964 So the dropdown at Count 19200000 didn't happen and there was only a single warning about allocation not making progress [ 3063.815003] XFS: fs_mark(3272) possible memory allocation deadlock size 65624 in kmem_alloc (mode:0x2408240) This suggests that the patch has helped even though there is not all that much of anonymous memory as the workload mostly generates fs metadata. I assume the success rate would be higher with more anonymous memory which should be the case in many workloads. [akpm@linux-foundation.org: fix comment] Link: http://lkml.kernel.org/r/20161012114721.31853-1-mhocko@kernel.org Signed-off-by: Michal Hocko Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Joonsoo Kim Cc: Dave Chinner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index 223464227299..949198d01260 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -818,6 +818,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, page_count(page) > page_mapcount(page)) goto isolate_fail; + /* + * Only allow to migrate anonymous pages in GFP_NOFS context + * because those do not depend on fs locks. + */ + if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) + goto isolate_fail; + /* If we already hold the lock, we can skip some rechecking */ if (!locked) { locked = compact_trylock_irqsave(zone_lru_lock(zone), @@ -1677,14 +1684,16 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio) { - int may_enter_fs = gfp_mask & __GFP_FS; int may_perform_io = gfp_mask & __GFP_IO; struct zoneref *z; struct zone *zone; enum compact_result rc = COMPACT_SKIPPED; - /* Check if the GFP flags allow compaction */ - if (!may_enter_fs || !may_perform_io) + /* + * Check if the GFP flags allow compaction - GFP_NOIO is really + * tricky context because the migration might require IO + */ + if (!may_perform_io) return COMPACT_SKIPPED; trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); @@ -1751,6 +1760,7 @@ static void compact_node(int nid) .mode = MIGRATE_SYNC, .ignore_skip_hint = true, .whole_zone = true, + .gfp_mask = GFP_KERNEL, }; @@ -1876,6 +1886,7 @@ static void kcompactd_do_work(pg_data_t *pgdat) .classzone_idx = pgdat->kcompactd_classzone_idx, .mode = MIGRATE_SYNC_LIGHT, .ignore_skip_hint = true, + .gfp_mask = GFP_KERNEL, }; trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, -- cgit v1.2.3 From c7be96af89d4b53211862d8599b2430e8900ed92 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Wed, 14 Dec 2016 15:04:10 -0800 Subject: signals: avoid unnecessary taking of sighand->siglock When running certain database workload on a high-end system with many CPUs, it was found that spinlock contention in the sigprocmask syscalls became a significant portion of the overall CPU cycles as shown below. 9.30% 9.30% 905387 dataserver /proc/kcore 0x7fff8163f4d2 [k] _raw_spin_lock_irq | ---_raw_spin_lock_irq | |--99.34%-- __set_current_blocked | sigprocmask | sys_rt_sigprocmask | system_call_fastpath | | | |--50.63%-- __swapcontext | | | | | |--99.91%-- upsleepgeneric | | | |--49.36%-- __setcontext | | ktskRun Looking further into the swapcontext function in glibc, it was found that the function always call sigprocmask() without checking if there are changes in the signal mask. A check was added to the __set_current_blocked() function to avoid taking the sighand->siglock spinlock if there is no change in the signal mask. This will prevent unneeded spinlock contention when many threads are trying to call sigprocmask(). With this patch applied, the spinlock contention in sigprocmask() was gone. Link: http://lkml.kernel.org/r/1474979209-11867-1-git-send-email-Waiman.Long@hpe.com Signed-off-by: Waiman Long Acked-by: Oleg Nesterov Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Stas Sergeev Cc: Scott J Norton Cc: Douglas Hatch Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/signal.h | 17 +++++++++++++++++ kernel/signal.c | 7 +++++++ 2 files changed, 24 insertions(+) diff --git a/include/linux/signal.h b/include/linux/signal.h index b63f63eaa39c..5308304993be 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -97,6 +97,23 @@ static inline int sigisemptyset(sigset_t *set) } } +static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2) +{ + switch (_NSIG_WORDS) { + case 4: + return (set1->sig[3] == set2->sig[3]) && + (set1->sig[2] == set2->sig[2]) && + (set1->sig[1] == set2->sig[1]) && + (set1->sig[0] == set2->sig[0]); + case 2: + return (set1->sig[1] == set2->sig[1]) && + (set1->sig[0] == set2->sig[0]); + case 1: + return set1->sig[0] == set2->sig[0]; + } + return 0; +} + #define sigmask(sig) (1UL << ((sig) - 1)) #ifndef __HAVE_ARCH_SIG_SETOPS diff --git a/kernel/signal.c b/kernel/signal.c index 29a410780aa9..ae60996fedff 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2491,6 +2491,13 @@ void __set_current_blocked(const sigset_t *newset) { struct task_struct *tsk = current; + /* + * In case the signal mask hasn't changed, there is nothing we need + * to do. The current->blocked shouldn't be modified by other task. + */ + if (sigequalsets(&tsk->blocked, newset)) + return; + spin_lock_irq(&tsk->sighand->siglock); __set_task_blocked(tsk, newset); spin_unlock_irq(&tsk->sighand->siglock); -- cgit v1.2.3 From 760c6a9139c37e16502362b22656d0cc4e840e8f Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Wed, 14 Dec 2016 15:04:14 -0800 Subject: coredump: clarify "unsafe core_pattern" warning I was amused to find "unsafe core_pattern" warning having these lines in /etc/sysctl.conf: fs.suid_dumpable=2 kernel.core_pattern=/core/core-%e-%p-%E kernel.core_uses_pid=0 Turns out kernel is formally right. Default core_pattern is just "core", which doesn't qualify for secure path while setting suid.dumpable. Hint admins about solution, clarify sysctl names, delete unnecessary '\' characters (string literals are concatenated regardless) and reformat for easier grepping. Link: http://lkml.kernel.org/r/20161029152124.GA1258@avx2 Signed-off-by: Alexey Dobriyan Acked-by: Kees Cook Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sysctl.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 39b3368f6de6..1475d2545b7e 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2389,9 +2389,11 @@ static void validate_coredump_safety(void) #ifdef CONFIG_COREDUMP if (suid_dumpable == SUID_DUMP_ROOT && core_pattern[0] != '/' && core_pattern[0] != '|') { - printk(KERN_WARNING "Unsafe core_pattern used with "\ - "suid_dumpable=2. Pipe handler or fully qualified "\ - "core dump path required.\n"); + printk(KERN_WARNING +"Unsafe core_pattern used with fs.suid_dumpable=2.\n" +"Pipe handler or fully qualified core dump path required.\n" +"Set kernel.core_pattern before fs.suid_dumpable.\n" + ); } #endif } -- cgit v1.2.3 From 69f58384791ac6da4165ce8e6defd6f408f4afdf Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 14 Dec 2016 15:04:16 -0800 Subject: Revert "kdump, vmcoreinfo: report memory sections virtual addresses" This reverts commit 0549a3c02efb ("kdump, vmcoreinfo: report memory sections virtual addresses"). Commit 0549a3c02efb tells the userspace utility makedumpfile the randomized base address of these memmory sections when mm kaslr is enabled. However the following patch "kexec: export the value of phys_base instead of symbol address" makes makedumpfile not need these addresses any more. Besides we should use VMCOREINFO_NUMBER to export the value of the variable so that we can use the existing number_table mechanism of Makedumpfile to fetch it. So revert it now. If needed we can add it later. http://lists.infradead.org/pipermail/kexec/2016-October/017540.html Link: http://lkml.kernel.org/r/1478568596-30060-1-git-send-email-bhe@redhat.com Signed-off-by: Baoquan He Cc: Thomas Garnier Cc: Baoquan He Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H . Peter Anvin" Cc: Eric Biederman Cc: Xunlei Pang Cc: HATAYAMA Daisuke Cc: Kees Cook Cc: Eugene Surovegin Cc: Dave Young Cc: AKASHI Takahiro Cc: Atsushi Kumagai Cc: Dave Anderson Cc: Pratyush Anand Cc: Vivek Goyal Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/machine_kexec_64.c | 3 --- include/linux/kexec.h | 6 ------ 2 files changed, 9 deletions(-) diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 8c1f218926d7..5a294e48b185 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -337,9 +337,6 @@ void arch_crash_save_vmcoreinfo(void) #endif vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); - VMCOREINFO_PAGE_OFFSET(PAGE_OFFSET); - VMCOREINFO_VMALLOC_START(VMALLOC_START); - VMCOREINFO_VMEMMAP_START(VMEMMAP_START); } /* arch-dependent functionality related to kexec file-based syscall */ diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 406c33dcae13..d7437777baaa 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -259,12 +259,6 @@ phys_addr_t paddr_vmcoreinfo_note(void); vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name) #define VMCOREINFO_CONFIG(name) \ vmcoreinfo_append_str("CONFIG_%s=y\n", #name) -#define VMCOREINFO_PAGE_OFFSET(value) \ - vmcoreinfo_append_str("PAGE_OFFSET=%lx\n", (unsigned long)value) -#define VMCOREINFO_VMALLOC_START(value) \ - vmcoreinfo_append_str("VMALLOC_START=%lx\n", (unsigned long)value) -#define VMCOREINFO_VMEMMAP_START(value) \ - vmcoreinfo_append_str("VMEMMAP_START=%lx\n", (unsigned long)value) extern struct kimage *kexec_image; extern struct kimage *kexec_crash_image; -- cgit v1.2.3 From 401721ecd1dcb0a428aa5d6832ee05ffbdbffbbe Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 14 Dec 2016 15:04:20 -0800 Subject: kexec: export the value of phys_base instead of symbol address Currently in x86_64, the symbol address of phys_base is exported to vmcoreinfo. Dave Anderson complained this is really useless for his Crash implementation. Because in user-space utility Crash and Makedumpfile which exported vmcore information is mainly used for, value of phys_base is needed to covert virtual address of exported kernel symbol to physical address. Especially init_level4_pgt, if we want to access and go over the page table to look up a PA corresponding to VA, firstly we need calculate page_dir = SYMBOL(init_level4_pgt) - __START_KERNEL_map + phys_base; Now in Crash and Makedumpfile, we have to analyze the vmcore elf program header to get value of phys_base. As Dave said, it would be preferable if it were readily availabl in vmcoreinfo rather than depending upon the PT_LOAD semantics. Hence in this patch change to export the value of phys_base instead of its virtual address. And people also complained that KERNEL_IMAGE_SIZE exporting is x86_64 only, should be moved into arch dependent function arch_crash_save_vmcoreinfo. Do the moving in this patch. Link: http://lkml.kernel.org/r/1478568596-30060-2-git-send-email-bhe@redhat.com Signed-off-by: Baoquan He Cc: Thomas Garnier Cc: Baoquan He Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H . Peter Anvin" Cc: Eric Biederman Cc: Xunlei Pang Cc: HATAYAMA Daisuke Cc: Kees Cook Cc: Eugene Surovegin Cc: Dave Young Cc: AKASHI Takahiro Cc: Atsushi Kumagai Cc: Dave Anderson Cc: Pratyush Anand Cc: Vivek Goyal Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/machine_kexec_64.c | 3 ++- kernel/kexec_core.c | 3 --- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 5a294e48b185..307b1f4543de 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -328,7 +328,7 @@ void machine_kexec(struct kimage *image) void arch_crash_save_vmcoreinfo(void) { - VMCOREINFO_SYMBOL(phys_base); + VMCOREINFO_NUMBER(phys_base); VMCOREINFO_SYMBOL(init_level4_pgt); #ifdef CONFIG_NUMA @@ -337,6 +337,7 @@ void arch_crash_save_vmcoreinfo(void) #endif vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); + VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE); } /* arch-dependent functionality related to kexec file-based syscall */ diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 561675589511..8ad3a29eb728 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -1467,9 +1467,6 @@ static int __init crash_save_vmcoreinfo_init(void) #endif VMCOREINFO_NUMBER(PG_head_mask); VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE); -#ifdef CONFIG_X86 - VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE); -#endif #ifdef CONFIG_HUGETLB_PAGE VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR); #endif -- cgit v1.2.3 From 8e53c073a41795365eb0e47ae23441dc01c70511 Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Wed, 14 Dec 2016 15:04:23 -0800 Subject: kexec: add cond_resched into kimage_alloc_crash_control_pages A soft lookup will occur when I run trinity in syscall kexec_load. the corresponding stack information is as follows. BUG: soft lockup - CPU#6 stuck for 22s! [trinity-c6:13859] Kernel panic - not syncing: softlockup: hung tasks CPU: 6 PID: 13859 Comm: trinity-c6 Tainted: G O L ----V------- 3.10.0-327.28.3.35.zhongjiang.x86_64 #1 Hardware name: Huawei Technologies Co., Ltd. Tecal BH622 V2/BC01SRSA0, BIOS RMIBV386 06/30/2014 Call Trace: dump_stack+0x19/0x1b panic+0xd8/0x214 watchdog_timer_fn+0x1cc/0x1e0 __hrtimer_run_queues+0xd2/0x260 hrtimer_interrupt+0xb0/0x1e0 ? call_softirq+0x1c/0x30 local_apic_timer_interrupt+0x37/0x60 smp_apic_timer_interrupt+0x3f/0x60 apic_timer_interrupt+0x6d/0x80 ? kimage_alloc_control_pages+0x80/0x270 ? kmem_cache_alloc_trace+0x1ce/0x1f0 ? do_kimage_alloc_init+0x1f/0x90 kimage_alloc_init+0x12a/0x180 SyS_kexec_load+0x20a/0x260 system_call_fastpath+0x16/0x1b the first time allocation of control pages may take too much time because crash_res.end can be set to a higher value. we need to add cond_resched to avoid the issue. The patch have been tested and above issue is not appear. Link: http://lkml.kernel.org/r/1481164674-42775-1-git-send-email-zhongjiang@huawei.com Signed-off-by: zhong jiang Acked-by: "Eric W. Biederman" Cc: Xunlei Pang Cc: Dave Young Cc: Vivek Goyal Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kexec_core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 8ad3a29eb728..5617cc412444 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -441,6 +441,8 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image, while (hole_end <= crashk_res.end) { unsigned long i; + cond_resched(); + if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT) break; /* See if I overlap any of the segments */ -- cgit v1.2.3 From 7560ef39dc0bfebba8f43766d8bb4c1ec5eb66fc Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Wed, 14 Dec 2016 15:04:26 -0800 Subject: sysctl: add KERN_CONT to deprecated_sysctl_warning() Do not break lines while printk()ing values. kernel: warning: process `tomoyo_file_tes' used the deprecated sysctl system call with kernel: 3. kernel: 5. kernel: 56. kernel: Link: http://lkml.kernel.org/r/1480814833-4976-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa Acked-by: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sysctl_binary.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index 6eb99c17dbd8..ece4b177052b 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c @@ -1354,8 +1354,8 @@ static void deprecated_sysctl_warning(const int *name, int nlen) "warning: process `%s' used the deprecated sysctl " "system call with ", current->comm); for (i = 0; i < nlen; i++) - printk("%d.", name[i]); - printk("\n"); + printk(KERN_CONT "%d.", name[i]); + printk(KERN_CONT "\n"); } return; } -- cgit v1.2.3 From 8a3385d2d47cd912f28d23bd2225ee37de518d86 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:04:29 -0800 Subject: arch/arc: add option to skip sync on DMA mapping Patch series "Add support for DMA writable pages being writable by the network stack", v3. The first 19 patches in the set add support for the DMA attribute DMA_ATTR_SKIP_CPU_SYNC on multiple platforms/architectures. This is needed so that we can flag the calls to dma_map/unmap_page so that we do not invalidate cache lines that do not currently belong to the device. Instead we have to take care of this in the driver via a call to sync_single_range_for_cpu prior to freeing the Rx page. Patch 20 adds support for dma_map_page_attrs and dma_unmap_page_attrs so that we can unmap and map a page using the DMA_ATTR_SKIP_CPU_SYNC attribute. Patch 21 adds support for freeing a page that has multiple references being held by a single caller. This way we can free page fragments that were allocated by a given driver. The last 2 patches use these updates in the igb driver, and lay the groundwork to allow for us to reimplement the use of build_skb. This patch (of 23): This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to avoid invoking cache line invalidation if the driver will just handle it later via a sync_for_cpu or sync_for_device call. Link: http://lkml.kernel.org/r/20161110113419.76501.38491.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Acked-by: Vineet Gupta Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arc/mm/dma.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index cd8aad8226dd..08450a1a5b5f 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -158,7 +158,10 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, unsigned long attrs) { phys_addr_t paddr = page_to_phys(page) + offset; - _dma_cache_sync(paddr, size, dir); + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + _dma_cache_sync(paddr, size, dir); + return plat_phys_to_dma(dev, paddr); } -- cgit v1.2.3 From fc1b138de7917d8fda5995aeecdb10c8182d9f75 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:04:32 -0800 Subject: arch/arm: add option to skip sync on DMA map and unmap The use of DMA_ATTR_SKIP_CPU_SYNC was not consistent across all of the DMA APIs in the arch/arm folder. This change is meant to correct that so that we get consistent behavior. Link: http://lkml.kernel.org/r/20161110113424.76501.2715.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Cc: Russell King Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/common/dmabounce.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 301281645d08..75055df1cda3 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -243,7 +243,8 @@ static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) } static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction dir) + enum dma_data_direction dir, + unsigned long attrs) { struct dmabounce_device_info *device_info = dev->archdata.dmabounce; struct safe_buffer *buf; @@ -262,7 +263,8 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, __func__, buf->ptr, virt_to_dma(dev, buf->ptr), buf->safe, buf->safe_dma_addr); - if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { + if ((dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) && + !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", __func__, ptr, buf->safe, size); memcpy(buf->safe, ptr, size); @@ -272,7 +274,8 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, } static inline void unmap_single(struct device *dev, struct safe_buffer *buf, - size_t size, enum dma_data_direction dir) + size_t size, enum dma_data_direction dir, + unsigned long attrs) { BUG_ON(buf->size != size); BUG_ON(buf->direction != dir); @@ -283,7 +286,8 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf, DO_STATS(dev->archdata.dmabounce->bounce_count++); - if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { + if ((dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) && + !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { void *ptr = buf->ptr; dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", @@ -334,7 +338,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, return DMA_ERROR_CODE; } - return map_single(dev, page_address(page) + offset, size, dir); + return map_single(dev, page_address(page) + offset, size, dir, attrs); } /* @@ -357,7 +361,7 @@ static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t return; } - unmap_single(dev, buf, size, dir); + unmap_single(dev, buf, size, dir, attrs); } static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, -- cgit v1.2.3 From e8b4762c22589a514add9bf9dda7a3050e4fa54c Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:04:35 -0800 Subject: arch/avr32: add option to skip sync on DMA map The use of DMA_ATTR_SKIP_CPU_SYNC was not consistent across all of the DMA APIs in the arch/arm folder. This change is meant to correct that so that we get consistent behavior. Link: http://lkml.kernel.org/r/20161110113430.76501.79737.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Acked-by: Hans-Christian Noren Egtvedt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/avr32/mm/dma-coherent.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c index 58610d0df7ed..54534e5d0781 100644 --- a/arch/avr32/mm/dma-coherent.c +++ b/arch/avr32/mm/dma-coherent.c @@ -146,7 +146,8 @@ static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page, { void *cpu_addr = page_address(page) + offset; - dma_cache_sync(dev, cpu_addr, size, direction); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_cache_sync(dev, cpu_addr, size, direction); return virt_to_bus(cpu_addr); } @@ -162,6 +163,10 @@ static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist, sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset; virt = sg_virt(sg); + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + dma_cache_sync(dev, virt, sg->length, direction); } -- cgit v1.2.3 From 8c16a2e209d563a1909451c769417a6f953e5b2c Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:04:38 -0800 Subject: arch/blackfin: add option to skip sync on DMA map The use of DMA_ATTR_SKIP_CPU_SYNC was not consistent across all of the DMA APIs in the arch/arm folder. This change is meant to correct that so that we get consistent behavior. Link: http://lkml.kernel.org/r/20161110113436.76501.13386.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Cc: Steven Miao Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/blackfin/kernel/dma-mapping.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c index 53fbbb61aa86..a27a74a18fb0 100644 --- a/arch/blackfin/kernel/dma-mapping.c +++ b/arch/blackfin/kernel/dma-mapping.c @@ -118,6 +118,10 @@ static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list, for_each_sg(sg_list, sg, nents, i) { sg->dma_address = (dma_addr_t) sg_virt(sg); + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction); } @@ -143,7 +147,9 @@ static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page, { dma_addr_t handle = (dma_addr_t)(page_address(page) + offset); - _dma_sync(handle, size, dir); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + _dma_sync(handle, size, dir); + return handle; } -- cgit v1.2.3 From 64c596b59c726514f548e15f13a87010efbff40d Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:04:41 -0800 Subject: arch/c6x: add option to skip sync on DMA map and unmap This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to avoid invoking cache line invalidation if the driver will just handle it later via a sync_for_cpu or sync_for_device call. Link: http://lkml.kernel.org/r/20161110113442.76501.7673.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Acked-by: Mark Salter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/c6x/kernel/dma.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c index db4a6a301f5e..6752df32ef06 100644 --- a/arch/c6x/kernel/dma.c +++ b/arch/c6x/kernel/dma.c @@ -42,14 +42,17 @@ static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page, { dma_addr_t handle = virt_to_phys(page_address(page) + offset); - c6x_dma_sync(handle, size, dir); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + c6x_dma_sync(handle, size, dir); + return handle; } static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { - c6x_dma_sync(handle, size, dir); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + c6x_dma_sync(handle, size, dir); } static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, @@ -60,7 +63,8 @@ static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, for_each_sg(sglist, sg, nents, i) { sg->dma_address = sg_phys(sg); - c6x_dma_sync(sg->dma_address, sg->length, dir); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + c6x_dma_sync(sg->dma_address, sg->length, dir); } return nents; @@ -72,9 +76,11 @@ static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, struct scatterlist *sg; int i; + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + return; + for_each_sg(sglist, sg, nents, i) c6x_dma_sync(sg_dma_address(sg), sg->length, dir); - } static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, -- cgit v1.2.3 From 34f8be79a772094a96da83f52f247cff2bc13978 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:04:43 -0800 Subject: arch/frv: add option to skip sync on DMA map The use of DMA_ATTR_SKIP_CPU_SYNC was not consistent across all of the DMA APIs in the arch/arm folder. This change is meant to correct that so that we get consistent behavior. Link: http://lkml.kernel.org/r/20161110113447.76501.93160.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Cc: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/frv/mb93090-mb00/pci-dma-nommu.c | 14 ++++++++++---- arch/frv/mb93090-mb00/pci-dma.c | 9 +++++++-- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/arch/frv/mb93090-mb00/pci-dma-nommu.c b/arch/frv/mb93090-mb00/pci-dma-nommu.c index 90f2e4cb33d6..187688128c65 100644 --- a/arch/frv/mb93090-mb00/pci-dma-nommu.c +++ b/arch/frv/mb93090-mb00/pci-dma-nommu.c @@ -109,16 +109,19 @@ static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, unsigned long attrs) { - int i; struct scatterlist *sg; + int i; + + BUG_ON(direction == DMA_NONE); + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + return nents; for_each_sg(sglist, sg, nents, i) { frv_cache_wback_inv(sg_dma_address(sg), sg_dma_address(sg) + sg_dma_len(sg)); } - BUG_ON(direction == DMA_NONE); - return nents; } @@ -127,7 +130,10 @@ static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, enum dma_data_direction direction, unsigned long attrs) { BUG_ON(direction == DMA_NONE); - flush_dcache_page(page); + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + flush_dcache_page(page); + return (dma_addr_t) page_to_phys(page) + offset; } diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c index f585745b1abc..dba7df918144 100644 --- a/arch/frv/mb93090-mb00/pci-dma.c +++ b/arch/frv/mb93090-mb00/pci-dma.c @@ -40,13 +40,16 @@ static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, unsigned long attrs) { + struct scatterlist *sg; unsigned long dampr2; void *vaddr; int i; - struct scatterlist *sg; BUG_ON(direction == DMA_NONE); + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + return nents; + dampr2 = __get_DAMPR(2); for_each_sg(sglist, sg, nents, i) { @@ -70,7 +73,9 @@ static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, unsigned long attrs) { - flush_dcache_page(page); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + flush_dcache_page(page); + return (dma_addr_t) page_to_phys(page) + offset; } -- cgit v1.2.3 From b8a346dd472af45e1ee8cbc77101dad0b836210b Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:04:46 -0800 Subject: arch/hexagon: Add option to skip DMA sync as a part of mapping This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to avoid invoking cache line invalidation if the driver will just handle it later via a sync_for_cpu or sync_for_device call. Link: http://lkml.kernel.org/r/20161110113452.76501.45864.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Cc: Richard Kuo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/hexagon/kernel/dma.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index b9017785fb71..dbc4f1003da4 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c @@ -119,6 +119,9 @@ static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg, s->dma_length = s->length; + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + flush_dcache_range(dma_addr_to_virt(s->dma_address), dma_addr_to_virt(s->dma_address + s->length)); } @@ -180,7 +183,8 @@ static dma_addr_t hexagon_map_page(struct device *dev, struct page *page, if (!check_addr("map_single", dev, bus, size)) return bad_dma_address; - dma_sync(dma_addr_to_virt(bus), size, dir); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_sync(dma_addr_to_virt(bus), size, dir); return bus; } -- cgit v1.2.3 From 5140d2344f1036dadffbb949a60af971b6dde0d3 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:04:49 -0800 Subject: arch/m68k: add option to skip DMA sync as a part of mapping This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to avoid invoking cache line invalidation if the driver will just handle it later via a sync_for_cpu or sync_for_device call. Link: http://lkml.kernel.org/r/20161110113457.76501.77603.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Cc: Geert Uytterhoeven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/m68k/kernel/dma.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c index 8cf97cbadc91..07070065a425 100644 --- a/arch/m68k/kernel/dma.c +++ b/arch/m68k/kernel/dma.c @@ -134,7 +134,9 @@ static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page, { dma_addr_t handle = page_to_phys(page) + offset; - dma_sync_single_for_device(dev, handle, size, dir); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_sync_single_for_device(dev, handle, size, dir); + return handle; } @@ -146,6 +148,10 @@ static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist, for_each_sg(sglist, sg, nents, i) { sg->dma_address = sg_phys(sg); + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir); } -- cgit v1.2.3 From 38bdbdc7e391f5326a9f27f0bc8be0c175a73567 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:04:52 -0800 Subject: arch/metag: add option to skip DMA sync as a part of map and unmap This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to avoid invoking cache line invalidation if the driver will just handle it via a sync_for_cpu or sync_for_device call. Link: http://lkml.kernel.org/r/20161110113503.76501.80809.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Cc: James Hogan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/metag/kernel/dma.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c index 0db31e24c541..91968d92652b 100644 --- a/arch/metag/kernel/dma.c +++ b/arch/metag/kernel/dma.c @@ -484,8 +484,9 @@ static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, unsigned long attrs) { - dma_sync_for_device((void *)(page_to_phys(page) + offset), size, - direction); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_sync_for_device((void *)(page_to_phys(page) + offset), + size, direction); return page_to_phys(page) + offset; } @@ -493,7 +494,8 @@ static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, unsigned long attrs) { - dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); } static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist, @@ -507,6 +509,10 @@ static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist, BUG_ON(!sg_page(sg)); sg->dma_address = sg_phys(sg); + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + dma_sync_for_device(sg_virt(sg), sg->length, direction); } @@ -525,6 +531,10 @@ static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, BUG_ON(!sg_page(sg)); sg->dma_address = sg_phys(sg); + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + dma_sync_for_cpu(sg_virt(sg), sg->length, direction); } } -- cgit v1.2.3 From 98ac2fc274e07a22bea43682cf8998f47ffea9d0 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:04:55 -0800 Subject: arch/microblaze: add option to skip DMA sync as a part of map and unmap This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to avoid invoking cache line invalidation if the driver will just handle it via a sync_for_cpu or sync_for_device call. Link: http://lkml.kernel.org/r/20161110113508.76501.77583.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Cc: Michal Simek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/microblaze/kernel/dma.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index ec04dc1e2527..818daf230eb4 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -61,6 +61,10 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, /* FIXME this part of code is untested */ for_each_sg(sgl, sg, nents, i) { sg->dma_address = sg_phys(sg); + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + __dma_sync(page_to_phys(sg_page(sg)) + sg->offset, sg->length, direction); } @@ -80,7 +84,8 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, enum dma_data_direction direction, unsigned long attrs) { - __dma_sync(page_to_phys(page) + offset, size, direction); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + __dma_sync(page_to_phys(page) + offset, size, direction); return page_to_phys(page) + offset; } @@ -95,7 +100,8 @@ static inline void dma_direct_unmap_page(struct device *dev, * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and * dma_address is physical address */ - __dma_sync(dma_address, size, direction); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + __dma_sync(dma_address, size, direction); } static inline void -- cgit v1.2.3 From 9f318d470e37a470d65449f94a06165ea72dbce1 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:04:58 -0800 Subject: arch/mips: add option to skip DMA sync as a part of map and unmap This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to avoid invoking cache line invalidation if the driver will just handle it via a sync_for_cpu or sync_for_device call. Link: http://lkml.kernel.org/r/20161110113513.76501.32321.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Cc: Ralf Baechle Cc: Keguang Zhang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/mips/loongson64/common/dma-swiotlb.c | 2 +- arch/mips/mm/dma-default.c | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c index 1a80b6f73ab2..aab4fd681e1f 100644 --- a/arch/mips/loongson64/common/dma-swiotlb.c +++ b/arch/mips/loongson64/common/dma-swiotlb.c @@ -61,7 +61,7 @@ static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { - int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, 0); + int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, attrs); mb(); return r; diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 46d5696c4f27..a39c36af97ad 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -293,7 +293,7 @@ static inline void __dma_sync(struct page *page, static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, unsigned long attrs) { - if (cpu_needs_post_dma_flush(dev)) + if (cpu_needs_post_dma_flush(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_sync(dma_addr_to_page(dev, dma_addr), dma_addr & ~PAGE_MASK, size, direction); plat_post_dma_flush(dev); @@ -307,7 +307,8 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist, struct scatterlist *sg; for_each_sg(sglist, sg, nents, i) { - if (!plat_device_is_coherent(dev)) + if (!plat_device_is_coherent(dev) && + !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_sync(sg_page(sg), sg->offset, sg->length, direction); #ifdef CONFIG_NEED_SG_DMA_LENGTH @@ -324,7 +325,7 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, unsigned long attrs) { - if (!plat_device_is_coherent(dev)) + if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_sync(page, offset, size, direction); return plat_map_dma_mem_page(dev, page) + offset; @@ -339,6 +340,7 @@ static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, for_each_sg(sglist, sg, nhwentries, i) { if (!plat_device_is_coherent(dev) && + !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && direction != DMA_TO_DEVICE) __dma_sync(sg_page(sg), sg->offset, sg->length, direction); -- cgit v1.2.3 From abdf4799dac6210e3d09d368c4ed19e984c45dce Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:05:00 -0800 Subject: arch/nios2: add option to skip DMA sync as a part of map and unmap This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to avoid invoking cache line invalidation if the driver will just handle it via a sync_for_cpu or sync_for_device call. Link: http://lkml.kernel.org/r/20161110113518.76501.52225.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Reviewed-by: Tobias Klauser Cc: Ley Foon Tan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/nios2/mm/dma-mapping.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c index d800fad87896..f6a5dcf9d682 100644 --- a/arch/nios2/mm/dma-mapping.c +++ b/arch/nios2/mm/dma-mapping.c @@ -98,13 +98,17 @@ static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, int i; for_each_sg(sg, sg, nents, i) { - void *addr; + void *addr = sg_virt(sg); - addr = sg_virt(sg); - if (addr) { - __dma_sync_for_device(addr, sg->length, direction); - sg->dma_address = sg_phys(sg); - } + if (!addr) + continue; + + sg->dma_address = sg_phys(sg); + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + + __dma_sync_for_device(addr, sg->length, direction); } return nents; @@ -117,7 +121,9 @@ static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, { void *addr = page_address(page) + offset; - __dma_sync_for_device(addr, size, direction); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + __dma_sync_for_device(addr, size, direction); + return page_to_phys(page) + offset; } @@ -125,7 +131,8 @@ static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, unsigned long attrs) { - __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); } static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg, @@ -138,6 +145,9 @@ static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg, if (direction == DMA_TO_DEVICE) return; + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + return; + for_each_sg(sg, sg, nhwentries, i) { addr = sg_virt(sg); if (addr) -- cgit v1.2.3 From 043b42bcbbc6bc87c457b156041d74d51f6cebc2 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:05:03 -0800 Subject: arch/openrisc: add option to skip DMA sync as a part of mapping This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to avoid invoking cache line invalidation if the driver will just handle it via a sync_for_cpu or sync_for_device call. Link: http://lkml.kernel.org/r/20161110113524.76501.87966.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Cc: Jonas Bonn Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/openrisc/kernel/dma.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index 140c99140649..906998bac957 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -141,6 +141,9 @@ or1k_map_page(struct device *dev, struct page *page, unsigned long cl; dma_addr_t addr = page_to_phys(page) + offset; + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + return addr; + switch (dir) { case DMA_TO_DEVICE: /* Flush the dcache for the requested range */ -- cgit v1.2.3 From f50a2bd298b47e1957f67f794aed90f2c3bd9a10 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:05:06 -0800 Subject: arch/parisc: add option to skip DMA sync as a part of map and unmap This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to avoid invoking cache line invalidation if the driver will just handle it via a sync_for_cpu or sync_for_device call. Link: http://lkml.kernel.org/r/20161110113529.76501.44762.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Cc: "James E.J. Bottomley" Cc: Helge Deller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/parisc/kernel/pci-dma.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 494ff6e8c88a..b6298a85e8ae 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -459,7 +459,9 @@ static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page, void *addr = page_address(page) + offset; BUG_ON(direction == DMA_NONE); - flush_kernel_dcache_range((unsigned long) addr, size); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + flush_kernel_dcache_range((unsigned long) addr, size); + return virt_to_phys(addr); } @@ -469,8 +471,11 @@ static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, { BUG_ON(direction == DMA_NONE); + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + return; + if (direction == DMA_TO_DEVICE) - return; + return; /* * For PCI_DMA_FROMDEVICE this flush is not necessary for the @@ -479,7 +484,6 @@ static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, */ flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size); - return; } static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, @@ -496,6 +500,10 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr); sg_dma_len(sg) = sg->length; + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + flush_kernel_dcache_range(vaddr, sg->length); } return nents; @@ -510,14 +518,16 @@ static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, BUG_ON(direction == DMA_NONE); + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + return; + if (direction == DMA_TO_DEVICE) - return; + return; /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */ for_each_sg(sglist, sg, nents, i) flush_kernel_vmap_range(sg_virt(sg), sg->length); - return; } static void pa11_dma_sync_single_for_cpu(struct device *dev, -- cgit v1.2.3 From 6f774809612d1f1fc800b452ee1ef447a277ec9d Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:05:09 -0800 Subject: arch/powerpc: add option to skip DMA sync as a part of mapping This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to avoid invoking cache line invalidation if the driver will just handle it via a sync_for_cpu or sync_for_device call. Link: http://lkml.kernel.org/r/20161110113534.76501.86492.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Acked-by: Michael Ellerman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/kernel/dma.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index e64a6016fba7..6877e3fa95bb 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -203,6 +203,10 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, for_each_sg(sgl, sg, nents, i) { sg->dma_address = sg_phys(sg) + get_dma_offset(dev); sg->dma_length = sg->length; + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); } @@ -235,7 +239,10 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, unsigned long attrs) { BUG_ON(dir == DMA_NONE); - __dma_sync_page(page, offset, size, dir); + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + __dma_sync_page(page, offset, size, dir); + return page_to_phys(page) + offset + get_dma_offset(dev); } -- cgit v1.2.3 From a08120017d7d659719d446fb036d500fa9b1e6f9 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:05:12 -0800 Subject: arch/sh: add option to skip DMA sync as a part of mapping This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to avoid invoking cache line invalidation if the driver will just handle it via a sync_for_cpu or sync_for_device call. Link: http://lkml.kernel.org/r/20161110113539.76501.6539.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Cc: Yoshinori Sato Cc: Rich Felker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sh/kernel/dma-nommu.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c index eadb669a7329..47fee3b6e29c 100644 --- a/arch/sh/kernel/dma-nommu.c +++ b/arch/sh/kernel/dma-nommu.c @@ -18,7 +18,9 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, dma_addr_t addr = page_to_phys(page) + offset; WARN_ON(size == 0); - dma_cache_sync(dev, page_address(page) + offset, size, dir); + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_cache_sync(dev, page_address(page) + offset, size, dir); return addr; } @@ -35,7 +37,8 @@ static int nommu_map_sg(struct device *dev, struct scatterlist *sg, for_each_sg(sg, s, nents, i) { BUG_ON(!sg_page(s)); - dma_cache_sync(dev, sg_virt(s), s->length, dir); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_cache_sync(dev, sg_virt(s), s->length, dir); s->dma_address = sg_phys(s); s->dma_length = s->length; -- cgit v1.2.3 From 68bbc28f616c18b695a13e59adefb9a1fc0c46a0 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:05:15 -0800 Subject: arch/sparc: add option to skip DMA sync as a part of map and unmap This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to avoid invoking cache line invalidation if the driver will just handle it via a sync_for_cpu or sync_for_device call. Link: http://lkml.kernel.org/r/20161110113544.76501.40008.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Cc: "David S. Miller" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sparc/kernel/iommu.c | 4 ++-- arch/sparc/kernel/ioport.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 852a3291db96..9df997995f6b 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -415,7 +415,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; /* Step 1: Kick data out of streaming buffers if necessary. */ - if (strbuf->strbuf_enabled) + if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); @@ -640,7 +640,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, base = iommu->page_table + entry; dma_handle &= IO_PAGE_MASK; - if (strbuf->strbuf_enabled) + if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) strbuf_flush(strbuf, iommu, dma_handle, ctx, npages, direction); diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 2344103414d1..6ffaec44931a 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -527,7 +527,7 @@ static dma_addr_t pci32_map_page(struct device *dev, struct page *page, static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size, enum dma_data_direction dir, unsigned long attrs) { - if (dir != PCI_DMA_TODEVICE) + if (dir != PCI_DMA_TODEVICE && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) dma_make_coherent(ba, PAGE_ALIGN(size)); } @@ -572,7 +572,7 @@ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, struct scatterlist *sg; int n; - if (dir != PCI_DMA_TODEVICE) { + if (dir != PCI_DMA_TODEVICE && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { for_each_sg(sgl, sg, nents, n) { dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length)); } -- cgit v1.2.3 From 33c77e53d838dc27e9d2edec5b6ab6a11861fb32 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:05:18 -0800 Subject: arch/tile: add option to skip DMA sync as a part of map and unmap This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to avoid invoking cache line invalidation if the driver will just handle it via a sync_for_cpu or sync_for_device call. Link: http://lkml.kernel.org/r/20161110113550.76501.73060.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Cc: Chris Metcalf Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/tile/kernel/pci-dma.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index 09bb774b39cd..24e0f8c21f2f 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c @@ -213,10 +213,12 @@ static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, for_each_sg(sglist, sg, nents, i) { sg->dma_address = sg_phys(sg); - __dma_prep_pa_range(sg->dma_address, sg->length, direction); #ifdef CONFIG_NEED_SG_DMA_LENGTH sg->dma_length = sg->length; #endif + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + __dma_prep_pa_range(sg->dma_address, sg->length, direction); } return nents; @@ -232,6 +234,8 @@ static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, BUG_ON(!valid_dma_direction(direction)); for_each_sg(sglist, sg, nents, i) { sg->dma_address = sg_phys(sg); + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; __dma_complete_pa_range(sg->dma_address, sg->length, direction); } @@ -245,7 +249,8 @@ static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, BUG_ON(!valid_dma_direction(direction)); BUG_ON(offset + size > PAGE_SIZE); - __dma_prep_page(page, offset, size, direction); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + __dma_prep_page(page, offset, size, direction); return page_to_pa(page) + offset; } @@ -256,6 +261,9 @@ static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address, { BUG_ON(!valid_dma_direction(direction)); + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + return; + __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), dma_address & (PAGE_SIZE - 1), size, direction); } -- cgit v1.2.3 From 4bfa135abec99f4b35b5636f56a6db75ce3c64d7 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:05:21 -0800 Subject: arch/xtensa: add option to skip DMA sync as a part of mapping This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to avoid invoking cache line invalidation if the driver will just handle it via a sync_for_cpu or sync_for_device call. Link: http://lkml.kernel.org/r/20161110113555.76501.52536.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Cc: Max Filippov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/xtensa/kernel/pci-dma.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 1e68806d6695..6a16decf278f 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -189,7 +189,9 @@ static dma_addr_t xtensa_map_page(struct device *dev, struct page *page, { dma_addr_t dma_handle = page_to_phys(page) + offset; - xtensa_sync_single_for_device(dev, dma_handle, size, dir); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + xtensa_sync_single_for_device(dev, dma_handle, size, dir); + return dma_handle; } @@ -197,7 +199,8 @@ static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { - xtensa_sync_single_for_cpu(dev, dma_handle, size, dir); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + xtensa_sync_single_for_cpu(dev, dma_handle, size, dir); } static int xtensa_map_sg(struct device *dev, struct scatterlist *sg, -- cgit v1.2.3 From 0495c3d367944e4af053983ff3cdf256b567b053 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:05:23 -0800 Subject: dma: add calls for dma_map_page_attrs and dma_unmap_page_attrs Add support for mapping and unmapping a page with attributes. The primary use for this is currently to allow for us to pass the DMA_ATTR_SKIP_CPU_SYNC attribute when mapping and unmapping a page. On some architectures such as ARM the synchronization has significant overhead and if we are already taking care of the sync_for_cpu and sync_for_device from the driver there isn't much need to handle this in the map/unmap calls as well. Link: http://lkml.kernel.org/r/20161110113601.76501.46095.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/dma-mapping.h | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 08528afdf58b..10c5a17b1f51 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -243,29 +243,33 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg ops->unmap_sg(dev, sg, nents, dir, attrs); } -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - size_t offset, size_t size, - enum dma_data_direction dir) +static inline dma_addr_t dma_map_page_attrs(struct device *dev, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; kmemcheck_mark_initialized(page_address(page) + offset, size); BUG_ON(!valid_dma_direction(dir)); - addr = ops->map_page(dev, page, offset, size, dir, 0); + addr = ops->map_page(dev, page, offset, size, dir, attrs); debug_dma_map_page(dev, page, offset, size, dir, addr, false); return addr; } -static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) +static inline void dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_page) - ops->unmap_page(dev, addr, size, dir, 0); + ops->unmap_page(dev, addr, size, dir, attrs); debug_dma_unmap_page(dev, addr, size, dir, false); } @@ -385,6 +389,8 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) +#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) +#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size); -- cgit v1.2.3 From 44fdffd70504c15b617686753dfdf9eb0ddf3729 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:05:26 -0800 Subject: mm: add support for releasing multiple instances of a page Add a function that allows us to batch free a page that has multiple references outstanding. Specifically this function can be used to drop a page being used in the page frag alloc cache. With this drivers can make use of functionality similar to the page frag alloc cache without having to do any workarounds for the fact that there is no function that frees multiple references. Link: http://lkml.kernel.org/r/20161110113606.76501.70752.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Cc: "David S. Miller" Cc: "James E.J. Bottomley" Cc: Chris Metcalf Cc: David Howells Cc: Geert Uytterhoeven Cc: Hans-Christian Noren Egtvedt Cc: Helge Deller Cc: James Hogan Cc: Jeff Kirsher Cc: Jonas Bonn Cc: Keguang Zhang Cc: Ley Foon Tan Cc: Mark Salter Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Ralf Baechle Cc: Rich Felker Cc: Richard Kuo Cc: Russell King Cc: Steven Miao Cc: Tobias Klauser Cc: Vineet Gupta Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 2 ++ mm/page_alloc.c | 14 ++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index f8041f9de31e..4175dca4ac39 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -506,6 +506,8 @@ extern void free_hot_cold_page(struct page *page, bool cold); extern void free_hot_cold_page_list(struct list_head *list, bool cold); struct page_frag_cache; +extern void __page_frag_drain(struct page *page, unsigned int order, + unsigned int count); extern void *__alloc_page_frag(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask); extern void __free_page_frag(void *addr); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f64e7bcb43b7..2c6d5f64feca 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3925,6 +3925,20 @@ static struct page *__page_frag_refill(struct page_frag_cache *nc, return page; } +void __page_frag_drain(struct page *page, unsigned int order, + unsigned int count) +{ + VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); + + if (page_ref_sub_and_test(page, count)) { + if (order == 0) + free_hot_cold_page(page, false); + else + __free_pages_ok(page, order); + } +} +EXPORT_SYMBOL(__page_frag_drain); + void *__alloc_page_frag(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask) { -- cgit v1.2.3 From 5be5955425c22df5e25ee0628b57f523437af6dc Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:05:30 -0800 Subject: igb: update driver to make use of DMA_ATTR_SKIP_CPU_SYNC The ARM architecture provides a mechanism for deferring cache line invalidation in the case of map/unmap. This patch makes use of this mechanism to avoid unnecessary synchronization. A secondary effect of this change is that the portion of the page that has been synchronized for use by the CPU should be writable and could be passed up the stack (at least on ARM). The last bit that occurred to me is that on architectures where the sync_for_cpu call invalidates cache lines we were prefetching and then invalidating the first 128 bytes of the packet. To avoid that I have moved the sync up to before we perform the prefetch and allocate the skbuff so that we can actually make use of it. Link: http://lkml.kernel.org/r/20161110113611.76501.98897.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Acked-by: Jeff Kirsher Cc: "David S. Miller" Cc: "James E.J. Bottomley" Cc: Chris Metcalf Cc: David Howells Cc: Geert Uytterhoeven Cc: Hans-Christian Noren Egtvedt Cc: Helge Deller Cc: James Hogan Cc: Jonas Bonn Cc: Keguang Zhang Cc: Ley Foon Tan Cc: Mark Salter Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Ralf Baechle Cc: Rich Felker Cc: Richard Kuo Cc: Russell King Cc: Steven Miao Cc: Tobias Klauser Cc: Vineet Gupta Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/net/ethernet/intel/igb/igb_main.c | 53 +++++++++++++++++++------------ 1 file changed, 33 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index cae24a8ccf47..60f122531e29 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3947,10 +3947,21 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) if (!buffer_info->page) continue; - dma_unmap_page(rx_ring->dev, - buffer_info->dma, - PAGE_SIZE, - DMA_FROM_DEVICE); + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + buffer_info->dma, + buffer_info->page_offset, + IGB_RX_BUFSZ, + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, + buffer_info->dma, + PAGE_SIZE, + DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); __free_page(buffer_info->page); buffer_info->page = NULL; @@ -6812,12 +6823,6 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring, /* transfer page from old buffer to new buffer */ *new_buff = *old_buff; - - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, - old_buff->page_offset, - IGB_RX_BUFSZ, - DMA_FROM_DEVICE); } static inline bool igb_page_is_reserved(struct page *page) @@ -6938,6 +6943,13 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, page = rx_buffer->page; prefetchw(page); + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + if (likely(!skb)) { void *page_addr = page_address(page) + rx_buffer->page_offset; @@ -6962,21 +6974,15 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, prefetchw(skb->data); } - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - size, - DMA_FROM_DEVICE); - /* pull page into skb */ if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) { /* hand second half of page back to the ring */ igb_reuse_rx_page(rx_ring, rx_buffer); } else { /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buffer->dma, - PAGE_SIZE, DMA_FROM_DEVICE); + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); } /* clear contents of rx_buffer */ @@ -7234,7 +7240,8 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, } /* map page for use */ - dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, + DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use @@ -7275,6 +7282,12 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) if (!igb_alloc_mapped_page(rx_ring, bi)) break; + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + IGB_RX_BUFSZ, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ -- cgit v1.2.3 From bd4171a5d4c2827f4ae7a235389648d7c149a41b Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 14 Dec 2016 15:05:34 -0800 Subject: igb: update code to better handle incrementing page count Update the driver code so that we do bulk updates of the page reference count instead of just incrementing it by one reference at a time. The advantage to doing this is that we cut down on atomic operations and this in turn should give us a slight improvement in cycles per packet. In addition if we eventually move this over to using build_skb the gains will be more noticeable. Link: http://lkml.kernel.org/r/20161110113616.76501.17072.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck Acked-by: Jeff Kirsher Cc: "David S. Miller" Cc: "James E.J. Bottomley" Cc: Chris Metcalf Cc: David Howells Cc: Geert Uytterhoeven Cc: Hans-Christian Noren Egtvedt Cc: Helge Deller Cc: James Hogan Cc: Jonas Bonn Cc: Keguang Zhang Cc: Ley Foon Tan Cc: Mark Salter Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Ralf Baechle Cc: Rich Felker Cc: Richard Kuo Cc: Russell King Cc: Steven Miao Cc: Tobias Klauser Cc: Vineet Gupta Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/net/ethernet/intel/igb/igb.h | 7 ++++++- drivers/net/ethernet/intel/igb/igb_main.c | 24 +++++++++++++++++------- 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index d11093dce1b9..acbc3abe2ddd 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -210,7 +210,12 @@ struct igb_tx_buffer { struct igb_rx_buffer { dma_addr_t dma; struct page *page; - unsigned int page_offset; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; }; struct igb_tx_queue_stats { diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 60f122531e29..a761001308dc 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3962,7 +3962,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) PAGE_SIZE, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); - __free_page(buffer_info->page); + __page_frag_drain(buffer_info->page, 0, + buffer_info->pagecnt_bias); buffer_info->page = NULL; } @@ -6834,13 +6835,15 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, struct page *page, unsigned int truesize) { + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--; + /* avoid re-using remote pages */ if (unlikely(igb_page_is_reserved(page))) return false; #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) + if (unlikely(page_ref_count(page) != pagecnt_bias)) return false; /* flip page offset to other buffer */ @@ -6853,10 +6856,14 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, return false; #endif - /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. */ - page_ref_inc(page); + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX); + rx_buffer->pagecnt_bias = USHRT_MAX; + } return true; } @@ -6908,7 +6915,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, return true; /* this page cannot be reused so discard it */ - __free_page(page); return false; } @@ -6979,10 +6985,13 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, /* hand second half of page back to the ring */ igb_reuse_rx_page(rx_ring, rx_buffer); } else { - /* we are not reusing the buffer so unmap it */ + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + __page_frag_drain(page, 0, rx_buffer->pagecnt_bias); } /* clear contents of rx_buffer */ @@ -7256,6 +7265,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, bi->dma = dma; bi->page = page; bi->page_offset = 0; + bi->pagecnt_bias = 1; return true; } -- cgit v1.2.3 From 9a29d0fbc2d9ad99fb8a981ab72548cc360e9d4c Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 14 Dec 2016 15:05:38 -0800 Subject: relay: check array offset before using it Smatch complains that we started using the array offset before we checked that it was valid. Fixes: 017c59c042d0 ('relay: Use per CPU constructs for the relay channel buffer pointers') Link: http://lkml.kernel.org/r/20161013084947.GC16198@mwanda Signed-off-by: Dan Carpenter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/relay.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/relay.c b/kernel/relay.c index da79a109dbeb..8f18d314a96a 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -809,11 +809,11 @@ void relay_subbufs_consumed(struct rchan *chan, { struct rchan_buf *buf; - if (!chan) + if (!chan || cpu >= NR_CPUS) return; buf = *per_cpu_ptr(chan->buf, cpu); - if (cpu >= NR_CPUS || !buf || subbufs_consumed > chan->n_subbufs) + if (!buf || subbufs_consumed > chan->n_subbufs) return; if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed) -- cgit v1.2.3 From 700199b0c192c7243539757177782780f5a45afb Mon Sep 17 00:00:00 2001 From: Andreas Platschek Date: Wed, 14 Dec 2016 15:05:40 -0800 Subject: Kconfig: lib/Kconfig.debug: fix references to Documenation Documentation on development tools was moved to Documentation/devl-tools and sphinxified (renamed from .txt to .rst). References in lib/Kconfig.debug need to be updated to the new location. Link: http://lkml.kernel.org/r/1476698152-29340-2-git-send-email-andreas.platschek@opentech.at Signed-off-by: Andreas Platschek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e6327d102184..7446097f72bd 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -194,8 +194,8 @@ config GDB_SCRIPTS build directory. If you load vmlinux into gdb, the helper scripts will be automatically imported by gdb as well, and additional functions are available to analyze a Linux kernel - instance. See Documentation/gdb-kernel-debugging.txt for further - details. + instance. See Documentation/dev-tools/gdb-kernel-debugging.rst + for further details. config ENABLE_WARN_DEPRECATED bool "Enable __deprecated logic" @@ -542,7 +542,7 @@ config DEBUG_KMEMLEAK difference being that the orphan objects are not freed but only shown in /sys/kernel/debug/kmemleak. Enabling this feature will introduce an overhead to memory - allocations. See Documentation/kmemleak.txt for more + allocations. See Documentation/dev-tools/kmemleak.rst for more details. Enabling DEBUG_SLAB or SLUB_DEBUG may increase the chances @@ -739,7 +739,7 @@ config KCOV different machines and across reboots. If you need stable PC values, disable RANDOMIZE_BASE. - For more details, see Documentation/kcov.txt. + For more details, see Documentation/dev-tools/kcov.rst. config KCOV_INSTRUMENT_ALL bool "Instrument all code by default" -- cgit v1.2.3 From 0462554707d62d53fc400ecb3cc19d7f9c786b95 Mon Sep 17 00:00:00 2001 From: Andreas Platschek Date: Wed, 14 Dec 2016 15:05:43 -0800 Subject: Kconfig: lib/Kconfig.ubsan fix reference to ubsan documentation Documenation/ubsan.txt was moved to Documentation/dev-tools/ubsan.rst, this fixes the reference. Link: http://lkml.kernel.org/r/1476698152-29340-3-git-send-email-andreas.platschek@opentech.at Signed-off-by: Andreas Platschek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.ubsan | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index bc6e651df68c..a669c193b878 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -10,7 +10,8 @@ config UBSAN This option enables undefined behaviour sanity checker Compile-time instrumentation is used to detect various undefined behaviours in runtime. Various types of checks may be enabled - via boot parameter ubsan_handle (see: Documentation/ubsan.txt). + via boot parameter ubsan_handle + (see: Documentation/dev-tools/ubsan.rst). config UBSAN_SANITIZE_ALL bool "Enable instrumentation for the entire kernel" -- cgit v1.2.3 From db862358a4a96f52d3b0c713c703828f90d97de9 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 14 Dec 2016 15:05:46 -0800 Subject: kcov: add more missing includes It is fragile that some definitions acquired via transitive dependencies, as shown in below: atomic_* () ENOMEM/EN* () EXPORT_SYMBOL () device_initcall () preempt_* () Include them to prevent possible issues. Link: http://lkml.kernel.org/r/1481163221-40170-1-git-send-email-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Suggested-by: Mark Rutland Cc: Dmitry Vyukov Cc: Andrey Ryabinin Cc: Mark Rutland Cc: James Morse Cc: Kefeng Wang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kcov.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/kcov.c b/kernel/kcov.c index 3cbb0c879705..cc2fa35ca480 100644 --- a/kernel/kcov.c +++ b/kernel/kcov.c @@ -1,11 +1,16 @@ #define pr_fmt(fmt) "kcov: " fmt #define DISABLE_BRANCH_PROFILING +#include #include +#include +#include #include #include #include +#include #include +#include #include #include #include -- cgit v1.2.3 From 2d13bb6494c807bcf3f78af0e96c0b8615a94385 Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Wed, 14 Dec 2016 15:05:49 -0800 Subject: kernel/debug/debug_core.c: more properly delay for secondary CPUs We've got a delay loop waiting for secondary CPUs. That loop uses loops_per_jiffy. However, loops_per_jiffy doesn't actually mean how many tight loops make up a jiffy on all architectures. It is quite common to see things like this in the boot log: Calibrating delay loop (skipped), value calculated using timer frequency.. 48.00 BogoMIPS (lpj=24000) In my case I was seeing lots of cases where other CPUs timed out entering the debugger only to print their stack crawls shortly after the kdb> prompt was written. Elsewhere in kgdb we already use udelay(), so that should be safe enough to use to implement our timeout. We'll delay 1 ms for 1000 times, which should give us a full second of delay (just like the old code wanted) but allow us to notice that we're done every 1 ms. [akpm@linux-foundation.org: simplifications, per Daniel] Link: http://lkml.kernel.org/r/1477091361-2039-1-git-send-email-dianders@chromium.org Signed-off-by: Douglas Anderson Reviewed-by: Daniel Thompson Cc: Jason Wessel Cc: Brian Norris Cc: [4.0+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/debug/debug_core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 0874e2edd275..79517e5549f1 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -598,11 +598,11 @@ return_normal: /* * Wait for the other CPUs to be notified and be waiting for us: */ - time_left = loops_per_jiffy * HZ; + time_left = MSEC_PER_SEC; while (kgdb_do_roundup && --time_left && (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) != online_cpus) - cpu_relax(); + udelay(1000); if (!time_left) pr_crit("Timed out waiting for secondary CPUs.\n"); -- cgit v1.2.3 From d1bd8ead126668a2d6c42d97cc3664e95b3fa1dc Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Wed, 14 Dec 2016 15:05:52 -0800 Subject: kdb: remove unused kdb_event handling kdb_event state variable is only set but never checked in the kernel code. http://www.spinics.net/lists/kdb/msg01733.html suggests that this variable affected WARN_CONSOLE_UNLOCKED() in the original implementation. But this check never went upstream. The semantic is unclear and racy. The value is updated after the kdb_printf_lock is acquired and after it is released. It should be symmetric at minimum. The value should be manipulated either inside or outside the locked area. Fortunately, it seems that the original function is gone and we could simply remove the state variable. Link: http://lkml.kernel.org/r/1480412276-16690-2-git-send-email-pmladek@suse.com Signed-off-by: Petr Mladek Suggested-by: Daniel Thompson Cc: Jason Wessel Cc: Peter Zijlstra Cc: Sergey Senozhatsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kdb.h | 1 - kernel/debug/kdb/kdb_io.c | 2 -- kernel/debug/kdb/kdb_main.c | 1 - 3 files changed, 4 deletions(-) diff --git a/include/linux/kdb.h b/include/linux/kdb.h index 410decacff8f..eb706188dc23 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -77,7 +77,6 @@ extern int kdb_poll_idx; * number whenever the kernel debugger is entered. */ extern int kdb_initial_cpu; -extern atomic_t kdb_event; /* Types and messages used for dynamically added kdb shell commands */ diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index 98c9011eac78..46f477bebe0c 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -576,7 +576,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap) KDB_STATE_SET(PRINTF_LOCK); spin_lock_irqsave(&kdb_printf_lock, flags); got_printf_lock = 1; - atomic_inc(&kdb_event); } else { __acquire(kdb_printf_lock); } @@ -851,7 +850,6 @@ kdb_print_out: got_printf_lock = 0; spin_unlock_irqrestore(&kdb_printf_lock, flags); KDB_STATE_CLEAR(PRINTF_LOCK); - atomic_dec(&kdb_event); } else { __release(kdb_printf_lock); } diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 2a20c0dfdafc..ca183919d302 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -60,7 +60,6 @@ int kdb_grep_trailing; * Kernel debugger state flags */ int kdb_flags; -atomic_t kdb_event; /* * kdb_lock protects updates to kdb_initial_cpu. Used to -- cgit v1.2.3 From d5d8d3d0d4adcc3aec6e2e0fb656165014a712b7 Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Wed, 14 Dec 2016 15:05:55 -0800 Subject: kdb: properly synchronize vkdb_printf() calls with other CPUs kdb_printf_lock does not prevent other CPUs from entering the critical section because it is ignored when KDB_STATE_PRINTF_LOCK is set. The problematic situation might look like: CPU0 CPU1 vkdb_printf() if (!KDB_STATE(PRINTF_LOCK)) KDB_STATE_SET(PRINTF_LOCK); spin_lock_irqsave(&kdb_printf_lock, flags); vkdb_printf() if (!KDB_STATE(PRINTF_LOCK)) BANG: The PRINTF_LOCK state is set and CPU1 is entering the critical section without spinning on the lock. The problem is that the code tries to implement locking using two state variables that are not handled atomically. Well, we need a custom locking because we want to allow reentering the critical section on the very same CPU. Let's use solution from Petr Zijlstra that was proposed for a similar scenario, see https://lkml.kernel.org/r/20161018171513.734367391@infradead.org This patch uses the same trick with cmpxchg(). The only difference is that we want to handle only recursion from the same context and therefore we disable interrupts. In addition, KDB_STATE_PRINTF_LOCK is removed. In fact, we are not able to set it a non-racy way. Link: http://lkml.kernel.org/r/1480412276-16690-3-git-send-email-pmladek@suse.com Signed-off-by: Petr Mladek Reviewed-by: Daniel Thompson Cc: Jason Wessel Cc: Peter Zijlstra Cc: Sergey Senozhatsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/debug/kdb/kdb_io.c | 30 +++++++++++++----------------- kernel/debug/kdb/kdb_private.h | 1 - 2 files changed, 13 insertions(+), 18 deletions(-) diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index 46f477bebe0c..daa76154fb1b 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -555,16 +555,16 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap) int colcount; int logging, saved_loglevel = 0; int saved_trap_printk; - int got_printf_lock = 0; int retlen = 0; int fnd, len; + int this_cpu, old_cpu; + static int kdb_printf_cpu = -1; char *cp, *cp2, *cphold = NULL, replaced_byte = ' '; char *moreprompt = "more> "; struct console *c = console_drivers; - static DEFINE_SPINLOCK(kdb_printf_lock); unsigned long uninitialized_var(flags); - preempt_disable(); + local_irq_save(flags); saved_trap_printk = kdb_trap_printk; kdb_trap_printk = 0; @@ -572,12 +572,13 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap) * But if any cpu goes recursive in kdb, just print the output, * even if it is interleaved with any other text. */ - if (!KDB_STATE(PRINTF_LOCK)) { - KDB_STATE_SET(PRINTF_LOCK); - spin_lock_irqsave(&kdb_printf_lock, flags); - got_printf_lock = 1; - } else { - __acquire(kdb_printf_lock); + this_cpu = smp_processor_id(); + for (;;) { + old_cpu = cmpxchg(&kdb_printf_cpu, -1, this_cpu); + if (old_cpu == -1 || old_cpu == this_cpu) + break; + + cpu_relax(); } diag = kdbgetintenv("LINES", &linecount); @@ -846,15 +847,10 @@ kdb_print_out: suspend_grep = 0; /* end of what may have been a recursive call */ if (logging) console_loglevel = saved_loglevel; - if (KDB_STATE(PRINTF_LOCK) && got_printf_lock) { - got_printf_lock = 0; - spin_unlock_irqrestore(&kdb_printf_lock, flags); - KDB_STATE_CLEAR(PRINTF_LOCK); - } else { - __release(kdb_printf_lock); - } + /* kdb_printf_cpu locked the code above. */ + smp_store_release(&kdb_printf_cpu, old_cpu); kdb_trap_printk = saved_trap_printk; - preempt_enable(); + local_irq_restore(flags); return retlen; } diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index 75014d7f4568..fc224fbcf954 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h @@ -132,7 +132,6 @@ extern int kdb_state; #define KDB_STATE_PAGER 0x00000400 /* pager is available */ #define KDB_STATE_GO_SWITCH 0x00000800 /* go is switching * back to initial cpu */ -#define KDB_STATE_PRINTF_LOCK 0x00001000 /* Holds kdb_printf lock */ #define KDB_STATE_WAIT_IPI 0x00002000 /* Waiting for kdb_ipi() NMI */ #define KDB_STATE_RECURSE 0x00004000 /* Recursive entry to kdb */ #define KDB_STATE_IP_ADJUSTED 0x00008000 /* Restart IP has been -- cgit v1.2.3 From 34aaff40b42148b23dcde40152480e25c7d2d759 Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Wed, 14 Dec 2016 15:05:58 -0800 Subject: kdb: call vkdb_printf() from vprintk_default() only when wanted kdb_trap_printk allows to pass normal printk() messages to kdb via vkdb_printk(). For example, it is used to get backtrace using the classic show_stack(), see kdb_show_stack(). vkdb_printf() tries to avoid a potential infinite loop by disabling the trap. But this approach is racy, for example: CPU1 CPU2 vkdb_printf() // assume that kdb_trap_printk == 0 saved_trap_printk = kdb_trap_printk; kdb_trap_printk = 0; kdb_show_stack() kdb_trap_printk++; Problem1: Now, a nested printk() on CPU0 calls vkdb_printf() even when it should have been disabled. It will not cause a deadlock but... // using the outdated saved value: 0 kdb_trap_printk = saved_trap_printk; kdb_trap_printk--; Problem2: Now, kdb_trap_printk == -1 and will stay like this. It means that all messages will get passed to kdb from now on. This patch removes the racy saved_trap_printk handling. Instead, the recursion is prevented by a check for the locked CPU. The solution is still kind of racy. A non-related printk(), from another process, might get trapped by vkdb_printf(). And the wanted printk() might not get trapped because kdb_printf_cpu is assigned. But this problem existed even with the original code. A proper solution would be to get_cpu() before setting kdb_trap_printk and trap messages only from this CPU. I am not sure if it is worth the effort, though. In fact, the race is very theoretical. When kdb is running any of the commands that use kdb_trap_printk there is a single active CPU and the other CPUs should be in a holding pen inside kgdb_cpu_enter(). The only time this is violated is when there is a timeout waiting for the other CPUs to report to the holding pen. Finally, note that the situation is a bit schizophrenic. vkdb_printf() explicitly allows recursion but only from KDB code that calls kdb_printf() directly. On the other hand, the generic printk() recursion is not allowed because it might cause an infinite loop. This is why we could not hide the decision inside vkdb_printf() easily. Link: http://lkml.kernel.org/r/1480412276-16690-4-git-send-email-pmladek@suse.com Signed-off-by: Petr Mladek Cc: Daniel Thompson Cc: Jason Wessel Cc: Peter Zijlstra Cc: Sergey Senozhatsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kdb.h | 1 + kernel/debug/kdb/kdb_io.c | 9 ++------- kernel/printk/printk.c | 3 ++- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/include/linux/kdb.h b/include/linux/kdb.h index eb706188dc23..68bd88223417 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -161,6 +161,7 @@ enum kdb_msgsrc { }; extern int kdb_trap_printk; +extern int kdb_printf_cpu; extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list args); extern __printf(1, 2) int kdb_printf(const char *, ...); diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index daa76154fb1b..e74be38245ad 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -30,6 +30,7 @@ char kdb_prompt_str[CMD_BUFLEN]; int kdb_trap_printk; +int kdb_printf_cpu = -1; static int kgdb_transition_check(char *buffer) { @@ -554,24 +555,19 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap) int linecount; int colcount; int logging, saved_loglevel = 0; - int saved_trap_printk; int retlen = 0; int fnd, len; int this_cpu, old_cpu; - static int kdb_printf_cpu = -1; char *cp, *cp2, *cphold = NULL, replaced_byte = ' '; char *moreprompt = "more> "; struct console *c = console_drivers; unsigned long uninitialized_var(flags); - local_irq_save(flags); - saved_trap_printk = kdb_trap_printk; - kdb_trap_printk = 0; - /* Serialize kdb_printf if multiple cpus try to write at once. * But if any cpu goes recursive in kdb, just print the output, * even if it is interleaved with any other text. */ + local_irq_save(flags); this_cpu = smp_processor_id(); for (;;) { old_cpu = cmpxchg(&kdb_printf_cpu, -1, this_cpu); @@ -849,7 +845,6 @@ kdb_print_out: console_loglevel = saved_loglevel; /* kdb_printf_cpu locked the code above. */ smp_store_release(&kdb_printf_cpu, old_cpu); - kdb_trap_printk = saved_trap_printk; local_irq_restore(flags); return retlen; } diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 577f2288d19f..a3ce35e0fa1e 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1926,7 +1926,8 @@ int vprintk_default(const char *fmt, va_list args) int r; #ifdef CONFIG_KGDB_KDB - if (unlikely(kdb_trap_printk)) { + /* Allow to pass printk() to kdb but avoid a recursion. */ + if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0)) { r = vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args); return r; } -- cgit v1.2.3 From 35e669e1a254e8b60d4a8983205b383666cc01ca Mon Sep 17 00:00:00 2001 From: "Francisco Blas Izquierdo Riera (klondike)" Date: Wed, 14 Dec 2016 15:06:01 -0800 Subject: initramfs: select builtin initram compression algorithm on KConfig instead of Makefile Move the current builtin initram compression algorithm selection from the Makefile into the INITRAMFS_COMPRESSION variable. This makes deciding algorithm precedence easier and would allow for overrides if new algorithms want to be tested. Link: http://lkml.kernel.org/r/57EAD769.1090401@klondike.es Signed-off-by: Francisco Blas Izquierdo Riera (klondike) Cc: P J P Cc: Paul Bolle Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- usr/Kconfig | 10 ++++++++++ usr/Makefile | 20 +------------------- 2 files changed, 11 insertions(+), 19 deletions(-) diff --git a/usr/Kconfig b/usr/Kconfig index 572dcf7b6a44..bf8e8f15358f 100644 --- a/usr/Kconfig +++ b/usr/Kconfig @@ -98,3 +98,13 @@ config RD_LZ4 help Support loading of a LZ4 encoded initial ramdisk or cpio buffer If unsure, say N. + +config INITRAMFS_COMPRESSION + string + default ".gz" if RD_GZIP + default ".lz4" if RD_LZ4 + default ".lzo" if RD_LZO + default ".xz" if RD_XZ + default ".lzma" if RD_LZMA + default ".bz2" if RD_BZIP2 + default "" diff --git a/usr/Makefile b/usr/Makefile index e767f019accf..17a513268325 100644 --- a/usr/Makefile +++ b/usr/Makefile @@ -5,25 +5,7 @@ klibcdirs:; PHONY += klibcdirs - -# Bzip2 -suffix_$(CONFIG_RD_BZIP2) = .bz2 - -# Lzma -suffix_$(CONFIG_RD_LZMA) = .lzma - -# XZ -suffix_$(CONFIG_RD_XZ) = .xz - -# Lzo -suffix_$(CONFIG_RD_LZO) = .lzo - -# Lz4 -suffix_$(CONFIG_RD_LZ4) = .lz4 - -# Gzip -suffix_$(CONFIG_RD_GZIP) = .gz - +suffix_y = $(CONFIG_INITRAMFS_COMPRESSION) AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/initramfs_data.cpio$(suffix_y)" # Generate builtin.o based on initramfs_data.o -- cgit v1.2.3 From db2aa7fd15e857891cefbada8348c8d938c7a2bc Mon Sep 17 00:00:00 2001 From: "Francisco Blas Izquierdo Riera (klondike)" Date: Wed, 14 Dec 2016 15:06:04 -0800 Subject: initramfs: allow again choice of the embedded initram compression algorithm Choosing the appropriate compression option when using an embedded initramfs can result in significant size differences in the resulting data. This is caused by avoiding double compression of the initramfs contents. For example on my tests, choosing CONFIG_INITRAMFS_COMPRESSION_NONE when compressing the kernel using XZ) results in up to 500KiB differences (9MiB to 8.5MiB) in the kernel size as the dictionary will not get polluted with uncomprensible data and may reuse kernel data too. Despite embedding an uncompressed initramfs, a user may want to allow for a compressed extra initramfs to be passed using the rd system, for example to boot a recovery system. 9ba4bcb645898d ("initramfs: read CONFIG_RD_ variables for initramfs compression") broke that behavior by making the choice based on CONFIG_RD_* instead of adding CONFIG_INITRAMFS_COMPRESSION_LZ4. Saddly, CONFIG_RD_* is also used to choose the supported RD compression algorithms by the kernel and a user may want to support more than one. This patch also reverts commit 3e4e0f0a875 ("initramfs: remove "compression mode" choice") restoring back the "compression mode" choice and includes the CONFIG_INITRAMFS_COMPRESSION_LZ4 option which was never added. As a result the following options are added or readed affecting the embedded initramfs compression: INITRAMFS_COMPRESSION_NONE Do no compression INITRAMFS_COMPRESSION_GZIP Compress using gzip INITRAMFS_COMPRESSION_BZIP2 Compress using bzip2 INITRAMFS_COMPRESSION_LZMA Compress using lzma INITRAMFS_COMPRESSION_XZ Compress using xz INITRAMFS_COMPRESSION_LZO Compress using lzo INITRAMFS_COMPRESSION_LZ4 Compress using lz4 These depend on the corresponding CONFIG_RD_* option being set (except NONE which has no dependencies). This patch depends on the previous one (the previous version didn't) to simplify the way in which the algorithm is chosen and keep backwards compatibility with the behaviour introduced by 9ba4bcb645898 ("initramfs: read CONFIG_RD_ variables for initramfs compression"). Link: http://lkml.kernel.org/r/57EAD77B.7090607@klondike.es Signed-off-by: Francisco Blas Izquierdo Riera (klondike) Cc: P J P Cc: Paul Bolle Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- usr/Kconfig | 117 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 117 insertions(+) diff --git a/usr/Kconfig b/usr/Kconfig index bf8e8f15358f..6278f135256d 100644 --- a/usr/Kconfig +++ b/usr/Kconfig @@ -99,8 +99,125 @@ config RD_LZ4 Support loading of a LZ4 encoded initial ramdisk or cpio buffer If unsure, say N. +choice + prompt "Built-in initramfs compression mode" + depends on INITRAMFS_SOURCE!="" + optional + help + This option allows you to decide by which algorithm the builtin + initramfs will be compressed. Several compression algorithms are + available, which differ in efficiency, compression and + decompression speed. Compression speed is only relevant + when building a kernel. Decompression speed is relevant at + each boot. Also the memory usage during decompression may become + relevant on memory constrained systems. This is usually based on the + dictionary size of the algorithm with algorithms like XZ and LZMA + featuring large dictionary sizes. + + High compression options are mostly useful for users who are + low on RAM, since it reduces the memory consumption during + boot. + + Keep in mind that your build system needs to provide the appropriate + compression tool to compress the generated initram cpio file for + embedding. + + If in doubt, select 'None' + +config INITRAMFS_COMPRESSION_NONE + bool "None" + help + Do not compress the built-in initramfs at all. This may sound wasteful + in space, but, you should be aware that the built-in initramfs will be + compressed at a later stage anyways along with the rest of the kernel, + on those architectures that support this. However, not compressing the + initramfs may lead to slightly higher memory consumption during a + short time at boot, while both the cpio image and the unpacked + filesystem image will be present in memory simultaneously + +config INITRAMFS_COMPRESSION_GZIP + bool "Gzip" + depends on RD_GZIP + help + Use the old and well tested gzip compression algorithm. Gzip provides + a good balance between compression ratio and decompression speed and + has a reasonable compression speed. It is also more likely to be + supported by your build system as the gzip tool is present by default + on most distros. + +config INITRAMFS_COMPRESSION_BZIP2 + bool "Bzip2" + depends on RD_BZIP2 + help + It's compression ratio and speed is intermediate. Decompression speed + is slowest among the choices. The initramfs size is about 10% smaller + with bzip2, in comparison to gzip. Bzip2 uses a large amount of + memory. For modern kernels you will need at least 8MB RAM or more for + booting. + + If you choose this, keep in mind that you need to have the bzip2 tool + available to be able to compress the initram. + +config INITRAMFS_COMPRESSION_LZMA + bool "LZMA" + depends on RD_LZMA + help + This algorithm's compression ratio is best but has a large dictionary + size which might cause issues in memory constrained systems. + Decompression speed is between the other choices. Compression is + slowest. The initramfs size is about 33% smaller with LZMA in + comparison to gzip. + + If you choose this, keep in mind that you may need to install the xz + or lzma tools to be able to compress the initram. + +config INITRAMFS_COMPRESSION_XZ + bool "XZ" + depends on RD_XZ + help + XZ uses the LZMA2 algorithm and has a large dictionary which may cause + problems on memory constrained systems. The initramfs size is about + 30% smaller with XZ in comparison to gzip. Decompression speed is + better than that of bzip2 but worse than gzip and LZO. Compression is + slow. + + If you choose this, keep in mind that you may need to install the xz + tool to be able to compress the initram. + +config INITRAMFS_COMPRESSION_LZO + bool "LZO" + depends on RD_LZO + help + It's compression ratio is the second poorest amongst the choices. The + kernel size is about 10% bigger than gzip. Despite that, it's + decompression speed is the second fastest and it's compression speed + is quite fast too. + + If you choose this, keep in mind that you may need to install the lzop + tool to be able to compress the initram. + +config INITRAMFS_COMPRESSION_LZ4 + bool "LZ4" + depends on RD_LZ4 + help + It's compression ratio is the poorest amongst the choices. The kernel + size is about 15% bigger than gzip; however its decompression speed + is the fastest. + + If you choose this, keep in mind that most distros don't provide lz4 + by default which could cause a build failure. + +endchoice + config INITRAMFS_COMPRESSION string + default "" if INITRAMFS_COMPRESSION_NONE + default ".gz" if INITRAMFS_COMPRESSION_GZIP + default ".bz2" if INITRAMFS_COMPRESSION_BZIP2 + default ".lzma" if INITRAMFS_COMPRESSION_LZMA + default ".xz" if INITRAMFS_COMPRESSION_XZ + default ".lzo" if INITRAMFS_COMPRESSION_LZO + default ".lz4" if INITRAMFS_COMPRESSION_LZ4 default ".gz" if RD_GZIP default ".lz4" if RD_LZ4 default ".lzo" if RD_LZO -- cgit v1.2.3 From 999898355e08ae3b92dfd0a08db706e0c6703d30 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Wed, 14 Dec 2016 15:06:07 -0800 Subject: ipc: msg, make msgrcv work with LONG_MIN When LONG_MIN is passed to msgrcv, one would expect to recieve any message. But convert_mode does *msgtyp = -*msgtyp and -LONG_MIN is undefined. In particular, with my gcc -LONG_MIN produces -LONG_MIN again. So handle this case properly by assigning LONG_MAX to *msgtyp if LONG_MIN was specified as msgtyp to msgrcv. This code: long msg[] = { 100, 200 }; int m = msgget(IPC_PRIVATE, IPC_CREAT | 0644); msgsnd(m, &msg, sizeof(msg), 0); msgrcv(m, &msg, sizeof(msg), LONG_MIN, 0); produces currently nothing: msgget(IPC_PRIVATE, IPC_CREAT|0644) = 65538 msgsnd(65538, {100, "\310\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"}, 16, 0) = 0 msgrcv(65538, ... Except a UBSAN warning: UBSAN: Undefined behaviour in ipc/msg.c:745:13 negation of -9223372036854775808 cannot be represented in type 'long int': With the patch, I see what I expect: msgget(IPC_PRIVATE, IPC_CREAT|0644) = 0 msgsnd(0, {100, "\310\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"}, 16, 0) = 0 msgrcv(0, {100, "\310\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"}, 16, -9223372036854775808, 0) = 16 Link: http://lkml.kernel.org/r/20161024082633.10148-1-jslaby@suse.cz Signed-off-by: Jiri Slaby Cc: Davidlohr Bueso Cc: Manfred Spraul Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/msg.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ipc/msg.c b/ipc/msg.c index 32e9bd837cde..e3e52ce01123 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -763,7 +763,10 @@ static inline int convert_mode(long *msgtyp, int msgflg) if (*msgtyp == 0) return SEARCH_ANY; if (*msgtyp < 0) { - *msgtyp = -*msgtyp; + if (*msgtyp == LONG_MIN) /* -LONG_MIN is undefined */ + *msgtyp = LONG_MAX; + else + *msgtyp = -*msgtyp; return SEARCH_LESSEQUAL; } if (msgflg & MSG_EXCEPT) -- cgit v1.2.3 From 63980c80e1e99cb324942f3b5e59657025af47c0 Mon Sep 17 00:00:00 2001 From: Shailesh Pandey Date: Wed, 14 Dec 2016 15:06:10 -0800 Subject: ipc/shm.c: coding style fixes This patch fixes below warnings: WARNING: Missing a blank line after declarations WARNING: Block comments use a trailing */ on a separate line ERROR: spaces required around that '=' (ctx:WxV) Above warnings were reported by checkpatch.pl Link: http://lkml.kernel.org/r/1478604980-18062-1-git-send-email-p.shailesh@samsung.com Signed-off-by: Shailesh Pandey Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/shm.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/ipc/shm.c b/ipc/shm.c index dbac8860c721..81203e8ba013 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -89,6 +89,7 @@ void shm_init_ns(struct ipc_namespace *ns) static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { struct shmid_kernel *shp; + shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_nattch) { @@ -387,6 +388,7 @@ static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); int err = 0; + if (sfd->vm_ops->set_policy) err = sfd->vm_ops->set_policy(vma, new); return err; @@ -417,7 +419,7 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma) * In case of remap_file_pages() emulation, the file can represent * removed IPC ID: propogate shm_lock() error to caller. */ - ret =__shm_open(vma); + ret = __shm_open(vma); if (ret) return ret; @@ -468,6 +470,7 @@ static unsigned long shm_get_unmapped_area(struct file *file, unsigned long flags) { struct shm_file_data *sfd = shm_file_data(file); + return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, pgoff, flags); } @@ -766,6 +769,7 @@ static void shm_add_rss_swap(struct shmid_kernel *shp, } else { #ifdef CONFIG_SHMEM struct shmem_inode_info *info = SHMEM_I(inode); + spin_lock_irq(&info->lock); *rss_add += inode->i_mapping->nrpages; *swp_add += info->swapped; @@ -1028,6 +1032,7 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { kuid_t euid = current_euid(); + if (!uid_eq(euid, shp->shm_perm.uid) && !uid_eq(euid, shp->shm_perm.cuid)) { err = -EPERM; @@ -1045,6 +1050,7 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) if (cmd == SHM_LOCK) { struct user_struct *user = current_user(); + err = shmem_lock(shm_file, 1, user); if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { shp->shm_perm.mode |= SHM_LOCKED; @@ -1354,9 +1360,10 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) vma = next; } -#else /* CONFIG_MMU */ +#else /* CONFIG_MMU */ /* under NOMMU conditions, the exact address to be destroyed must be - * given */ + * given + */ if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); retval = 0; -- cgit v1.2.3 From b6f8a92c9ca835b4a079ecee8433d0d110398448 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Wed, 14 Dec 2016 15:06:13 -0800 Subject: posix-timers: give lazy compilers some help optimizing code away The OpenRISC compiler (so far) fails to optimize away a large portion of code containing a reference to posix_timer_event in alarmtimer.c when CONFIG_POSIX_TIMERS is unset. Let's give it a direct clue to let the build succeed. This fixes [linux-next:master 6682/7183] alarmtimer.c:undefined reference to `posix_timer_event' reported by kbuild test robot. Signed-off-by: Nicolas Pitre Cc: Thomas Gleixner Cc: Josh Triplett Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/time/alarmtimer.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 9b08ca391aed..3921cf7fea8e 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -516,7 +516,8 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm, spin_lock_irqsave(&ptr->it_lock, flags); if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) { - if (posix_timer_event(ptr, 0) != 0) + if (IS_ENABLED(CONFIG_POSIX_TIMERS) && + posix_timer_event(ptr, 0) != 0) ptr->it_overrun++; } -- cgit v1.2.3 From b5fc8c6c000aa083a671cd8f1736d04081bf65aa Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 14 Dec 2016 15:06:15 -0800 Subject: drivers/net/wireless/intel/iwlwifi/dvm/calib.c: simplfy min() expression This cast is no longer needed. Cc: Johannes Berg Cc: Emmanuel Grumbach Cc: Intel Linux Wireless Cc: Kalle Valo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/net/wireless/intel/iwlwifi/dvm/calib.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c index e9cef9de9ed8..c96f9b1d948a 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c @@ -900,8 +900,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv, /* bound gain by 2 bits value max, 3rd bit is sign */ data->delta_gain_code[i] = - min(abs(delta_g), - (s32) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); + min(abs(delta_g), CHAIN_NOISE_MAX_DELTA_GAIN_CODE); if (delta_g < 0) /* -- cgit v1.2.3 From 22722799de869912fb541c2c85b51cec4226c614 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Wed, 14 Dec 2016 15:06:18 -0800 Subject: ktest.pl: fix english Ajdust spelling to more common "mandatory". Variant "mandidory" is certainly wrong. Link: http://lkml.kernel.org/r/20161011073003.GA19476@amd Signed-off-by: Pavel Machek Acked-by: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/ktest/ktest.pl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index d08e214ec6e7..be93ab02b490 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -719,14 +719,14 @@ sub set_value { if ($buildonly && $lvalue =~ /^TEST_TYPE(\[.*\])?$/ && $prvalue ne "build") { # Note if a test is something other than build, then we - # will need other manditory options. + # will need other mandatory options. if ($prvalue ne "install") { # for bisect, we need to check BISECT_TYPE if ($prvalue ne "bisect") { $buildonly = 0; } } else { - # install still limits some manditory options. + # install still limits some mandatory options. $buildonly = 2; } } @@ -735,7 +735,7 @@ sub set_value { if ($prvalue ne "install") { $buildonly = 0; } else { - # install still limits some manditory options. + # install still limits some mandatory options. $buildonly = 2; } } @@ -3989,7 +3989,7 @@ sub make_min_config { } } - # Save off all the current mandidory configs + # Save off all the current mandatory configs open (OUT, ">$temp_config") or die "Can't write to $temp_config"; foreach my $config (keys %keep_configs) { -- cgit v1.2.3 From 249e52e35580fcfe5dad53a7dcd7c1252788749c Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Wed, 14 Dec 2016 15:06:21 -0800 Subject: kernel/watchdog.c: move shared definitions to nmi.h Patch series "Clean up watchdog handlers", v2. This is an attempt to cleanup watchdog handlers. Right now, kernel/watchdog.c implements both softlockup and hardlockup detectors. Softlockup code is generic. Hardlockup code is arch specific. Some architectures don't use hardlockup detectors. They use their own watchdog detectors. To make both these combination work, we have numerous #ifdefs in kernel/watchdog.c. We are trying here to make these handlers independent of each other. Also provide an interface for architectures to implement their own handlers. watchdog_nmi_enable and watchdog_nmi_disable will be defined as weak such that architectures can override its definitions. Thanks to Don Zickus for his suggestions. Here are our previous discussions http://www.spinics.net/lists/sparclinux/msg16543.html http://www.spinics.net/lists/sparclinux/msg16441.html This patch (of 3): Move shared macros and definitions to nmi.h so that watchdog.c, new file watchdog_hld.c or any other architecture specific handler can use those definitions. Link: http://lkml.kernel.org/r/1478034826-43888-2-git-send-email-babu.moger@oracle.com Signed-off-by: Babu Moger Acked-by: Don Zickus Cc: Ingo Molnar Cc: Jiri Kosina Cc: Andi Kleen Cc: Yaowei Bai Cc: Aaron Tomlin Cc: Ulrich Obergfell Cc: Tejun Heo Cc: Hidehiro Kawai Cc: Josh Hunt Cc: "David S. Miller" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/nmi.h | 24 ++++++++++++++++++++++++ kernel/watchdog.c | 28 ++++------------------------ 2 files changed, 28 insertions(+), 24 deletions(-) diff --git a/include/linux/nmi.h b/include/linux/nmi.h index a78c35cff1ae..aacca824a6ae 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -7,6 +7,23 @@ #include #include +/* + * The run state of the lockup detectors is controlled by the content of the + * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - + * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. + * + * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' + * are variables that are only used as an 'interface' between the parameters + * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The + * 'watchdog_thresh' variable is handled differently because its value is not + * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' + * is equal zero. + */ +#define NMI_WATCHDOG_ENABLED_BIT 0 +#define SOFT_WATCHDOG_ENABLED_BIT 1 +#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) +#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) + /** * touch_nmi_watchdog - restart NMI watchdog timeout. * @@ -91,9 +108,16 @@ extern int nmi_watchdog_enabled; extern int soft_watchdog_enabled; extern int watchdog_user_enabled; extern int watchdog_thresh; +extern unsigned long watchdog_enabled; extern unsigned long *watchdog_cpumask_bits; +#ifdef CONFIG_SMP extern int sysctl_softlockup_all_cpu_backtrace; extern int sysctl_hardlockup_all_cpu_backtrace; +#else +#define sysctl_softlockup_all_cpu_backtrace 0 +#define sysctl_hardlockup_all_cpu_backtrace 0 +#endif +extern bool is_hardlockup(void); struct ctl_table; extern int proc_watchdog(struct ctl_table *, int , void __user *, size_t *, loff_t *); diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 6d1020c03d41..d21049496e26 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -27,29 +27,12 @@ #include #include -/* - * The run state of the lockup detectors is controlled by the content of the - * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - - * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. - * - * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' - * are variables that are only used as an 'interface' between the parameters - * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The - * 'watchdog_thresh' variable is handled differently because its value is not - * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' - * is equal zero. - */ -#define NMI_WATCHDOG_ENABLED_BIT 0 -#define SOFT_WATCHDOG_ENABLED_BIT 1 -#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) -#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) - static DEFINE_MUTEX(watchdog_proc_mutex); -#ifdef CONFIG_HARDLOCKUP_DETECTOR -static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED; +#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) +unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED; #else -static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED; +unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED; #endif int __read_mostly nmi_watchdog_enabled; int __read_mostly soft_watchdog_enabled; @@ -59,9 +42,6 @@ int __read_mostly watchdog_thresh = 10; #ifdef CONFIG_SMP int __read_mostly sysctl_softlockup_all_cpu_backtrace; int __read_mostly sysctl_hardlockup_all_cpu_backtrace; -#else -#define sysctl_softlockup_all_cpu_backtrace 0 -#define sysctl_hardlockup_all_cpu_backtrace 0 #endif static struct cpumask watchdog_cpumask __read_mostly; unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); @@ -289,7 +269,7 @@ void touch_softlockup_watchdog_sync(void) #ifdef CONFIG_HARDLOCKUP_DETECTOR /* watchdog detector functions */ -static bool is_hardlockup(void) +bool is_hardlockup(void) { unsigned long hrint = __this_cpu_read(hrtimer_interrupts); -- cgit v1.2.3 From 73ce0511c43686095efd2f65ef564aab952e07bc Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Wed, 14 Dec 2016 15:06:24 -0800 Subject: kernel/watchdog.c: move hardlockup detector to separate file Separate hardlockup code from watchdog.c and move it to watchdog_hld.c. It is mostly straight forward. Remove everything inside CONFIG_HARDLOCKUP_DETECTORS. This code will go to file watchdog_hld.c. Also update the makefile accordigly. Link: http://lkml.kernel.org/r/1478034826-43888-3-git-send-email-babu.moger@oracle.com Signed-off-by: Babu Moger Acked-by: Don Zickus Cc: Ingo Molnar Cc: Jiri Kosina Cc: Andi Kleen Cc: Yaowei Bai Cc: Aaron Tomlin Cc: Ulrich Obergfell Cc: Tejun Heo Cc: Hidehiro Kawai Cc: Josh Hunt Cc: "David S. Miller" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/Makefile | 1 + kernel/watchdog.c | 241 +++----------------------------------------------- kernel/watchdog_hld.c | 227 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 239 insertions(+), 230 deletions(-) create mode 100644 kernel/watchdog_hld.c diff --git a/kernel/Makefile b/kernel/Makefile index eb26e12c6c2a..314e7d62f5f0 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -84,6 +84,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KGDB) += debug/ obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o +obj-$(CONFIG_HARDLOCKUP_DETECTOR) += watchdog_hld.o obj-$(CONFIG_SECCOMP) += seccomp.o obj-$(CONFIG_RELAY) += relay.o obj-$(CONFIG_SYSCTL) += utsname_sysctl.o diff --git a/kernel/watchdog.c b/kernel/watchdog.c index d21049496e26..d4b0fa01cae3 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -24,7 +24,6 @@ #include #include -#include #include static DEFINE_MUTEX(watchdog_proc_mutex); @@ -80,50 +79,9 @@ static DEFINE_PER_CPU(bool, soft_watchdog_warn); static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt); static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved); -#ifdef CONFIG_HARDLOCKUP_DETECTOR -static DEFINE_PER_CPU(bool, hard_watchdog_warn); -static DEFINE_PER_CPU(bool, watchdog_nmi_touch); static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); -static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); -#endif static unsigned long soft_lockup_nmi_warn; -/* boot commands */ -/* - * Should we panic when a soft-lockup or hard-lockup occurs: - */ -#ifdef CONFIG_HARDLOCKUP_DETECTOR -unsigned int __read_mostly hardlockup_panic = - CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; -static unsigned long hardlockup_allcpu_dumped; -/* - * We may not want to enable hard lockup detection by default in all cases, - * for example when running the kernel as a guest on a hypervisor. In these - * cases this function can be called to disable hard lockup detection. This - * function should only be executed once by the boot processor before the - * kernel command line parameters are parsed, because otherwise it is not - * possible to override this in hardlockup_panic_setup(). - */ -void hardlockup_detector_disable(void) -{ - watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; -} - -static int __init hardlockup_panic_setup(char *str) -{ - if (!strncmp(str, "panic", 5)) - hardlockup_panic = 1; - else if (!strncmp(str, "nopanic", 7)) - hardlockup_panic = 0; - else if (!strncmp(str, "0", 1)) - watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; - else if (!strncmp(str, "1", 1)) - watchdog_enabled |= NMI_WATCHDOG_ENABLED; - return 1; -} -__setup("nmi_watchdog=", hardlockup_panic_setup); -#endif - unsigned int __read_mostly softlockup_panic = CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; @@ -244,30 +202,12 @@ void touch_all_softlockup_watchdogs(void) wq_watchdog_touch(-1); } -#ifdef CONFIG_HARDLOCKUP_DETECTOR -void touch_nmi_watchdog(void) -{ - /* - * Using __raw here because some code paths have - * preemption enabled. If preemption is enabled - * then interrupts should be enabled too, in which - * case we shouldn't have to worry about the watchdog - * going off. - */ - raw_cpu_write(watchdog_nmi_touch, true); - touch_softlockup_watchdog(); -} -EXPORT_SYMBOL(touch_nmi_watchdog); - -#endif - void touch_softlockup_watchdog_sync(void) { __this_cpu_write(softlockup_touch_sync, true); __this_cpu_write(watchdog_touch_ts, 0); } -#ifdef CONFIG_HARDLOCKUP_DETECTOR /* watchdog detector functions */ bool is_hardlockup(void) { @@ -279,7 +219,6 @@ bool is_hardlockup(void) __this_cpu_write(hrtimer_interrupts_saved, hrint); return false; } -#endif static int is_softlockup(unsigned long touch_ts) { @@ -293,77 +232,22 @@ static int is_softlockup(unsigned long touch_ts) return 0; } -#ifdef CONFIG_HARDLOCKUP_DETECTOR - -static struct perf_event_attr wd_hw_attr = { - .type = PERF_TYPE_HARDWARE, - .config = PERF_COUNT_HW_CPU_CYCLES, - .size = sizeof(struct perf_event_attr), - .pinned = 1, - .disabled = 1, -}; - -/* Callback function for perf event subsystem */ -static void watchdog_overflow_callback(struct perf_event *event, - struct perf_sample_data *data, - struct pt_regs *regs) -{ - /* Ensure the watchdog never gets throttled */ - event->hw.interrupts = 0; - - if (__this_cpu_read(watchdog_nmi_touch) == true) { - __this_cpu_write(watchdog_nmi_touch, false); - return; - } - - /* check for a hardlockup - * This is done by making sure our timer interrupt - * is incrementing. The timer interrupt should have - * fired multiple times before we overflow'd. If it hasn't - * then this is a good indication the cpu is stuck - */ - if (is_hardlockup()) { - int this_cpu = smp_processor_id(); - - /* only print hardlockups once */ - if (__this_cpu_read(hard_watchdog_warn) == true) - return; - - pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu); - print_modules(); - print_irqtrace_events(current); - if (regs) - show_regs(regs); - else - dump_stack(); - - /* - * Perform all-CPU dump only once to avoid multiple hardlockups - * generating interleaving traces - */ - if (sysctl_hardlockup_all_cpu_backtrace && - !test_and_set_bit(0, &hardlockup_allcpu_dumped)) - trigger_allbutself_cpu_backtrace(); - - if (hardlockup_panic) - nmi_panic(regs, "Hard LOCKUP"); - - __this_cpu_write(hard_watchdog_warn, true); - return; - } - - __this_cpu_write(hard_watchdog_warn, false); - return; -} -#endif /* CONFIG_HARDLOCKUP_DETECTOR */ - static void watchdog_interrupt_count(void) { __this_cpu_inc(hrtimer_interrupts); } -static int watchdog_nmi_enable(unsigned int cpu); -static void watchdog_nmi_disable(unsigned int cpu); +/* + * These two functions are mostly architecture specific + * defining them as weak here. + */ +int __weak watchdog_nmi_enable(unsigned int cpu) +{ + return 0; +} +void __weak watchdog_nmi_disable(unsigned int cpu) +{ +} static int watchdog_enable_all_cpus(void); static void watchdog_disable_all_cpus(void); @@ -556,109 +440,6 @@ static void watchdog(unsigned int cpu) watchdog_nmi_disable(cpu); } -#ifdef CONFIG_HARDLOCKUP_DETECTOR -/* - * People like the simple clean cpu node info on boot. - * Reduce the watchdog noise by only printing messages - * that are different from what cpu0 displayed. - */ -static unsigned long cpu0_err; - -static int watchdog_nmi_enable(unsigned int cpu) -{ - struct perf_event_attr *wd_attr; - struct perf_event *event = per_cpu(watchdog_ev, cpu); - - /* nothing to do if the hard lockup detector is disabled */ - if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) - goto out; - - /* is it already setup and enabled? */ - if (event && event->state > PERF_EVENT_STATE_OFF) - goto out; - - /* it is setup but not enabled */ - if (event != NULL) - goto out_enable; - - wd_attr = &wd_hw_attr; - wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); - - /* Try to register using hardware perf events */ - event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); - - /* save cpu0 error for future comparision */ - if (cpu == 0 && IS_ERR(event)) - cpu0_err = PTR_ERR(event); - - if (!IS_ERR(event)) { - /* only print for cpu0 or different than cpu0 */ - if (cpu == 0 || cpu0_err) - pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); - goto out_save; - } - - /* - * Disable the hard lockup detector if _any_ CPU fails to set up - * set up the hardware perf event. The watchdog() function checks - * the NMI_WATCHDOG_ENABLED bit periodically. - * - * The barriers are for syncing up watchdog_enabled across all the - * cpus, as clear_bit() does not use barriers. - */ - smp_mb__before_atomic(); - clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled); - smp_mb__after_atomic(); - - /* skip displaying the same error again */ - if (cpu > 0 && (PTR_ERR(event) == cpu0_err)) - return PTR_ERR(event); - - /* vary the KERN level based on the returned errno */ - if (PTR_ERR(event) == -EOPNOTSUPP) - pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu); - else if (PTR_ERR(event) == -ENOENT) - pr_warn("disabled (cpu%i): hardware events not enabled\n", - cpu); - else - pr_err("disabled (cpu%i): unable to create perf event: %ld\n", - cpu, PTR_ERR(event)); - - pr_info("Shutting down hard lockup detector on all cpus\n"); - - return PTR_ERR(event); - - /* success path */ -out_save: - per_cpu(watchdog_ev, cpu) = event; -out_enable: - perf_event_enable(per_cpu(watchdog_ev, cpu)); -out: - return 0; -} - -static void watchdog_nmi_disable(unsigned int cpu) -{ - struct perf_event *event = per_cpu(watchdog_ev, cpu); - - if (event) { - perf_event_disable(event); - per_cpu(watchdog_ev, cpu) = NULL; - - /* should be in cleanup, but blocks oprofile */ - perf_event_release_kernel(event); - } - if (cpu == 0) { - /* watchdog_nmi_enable() expects this to be zero initially. */ - cpu0_err = 0; - } -} - -#else -static int watchdog_nmi_enable(unsigned int cpu) { return 0; } -static void watchdog_nmi_disable(unsigned int cpu) { return; } -#endif /* CONFIG_HARDLOCKUP_DETECTOR */ - static struct smp_hotplug_thread watchdog_threads = { .store = &softlockup_watchdog, .thread_should_run = watchdog_should_run, diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c new file mode 100644 index 000000000000..84016c8aee6b --- /dev/null +++ b/kernel/watchdog_hld.c @@ -0,0 +1,227 @@ +/* + * Detect hard lockups on a system + * + * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. + * + * Note: Most of this code is borrowed heavily from the original softlockup + * detector, so thanks to Ingo for the initial implementation. + * Some chunks also taken from the old x86-specific nmi watchdog code, thanks + * to those contributors as well. + */ + +#define pr_fmt(fmt) "NMI watchdog: " fmt + +#include +#include +#include +#include + +static DEFINE_PER_CPU(bool, hard_watchdog_warn); +static DEFINE_PER_CPU(bool, watchdog_nmi_touch); +static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); + +/* boot commands */ +/* + * Should we panic when a soft-lockup or hard-lockup occurs: + */ +unsigned int __read_mostly hardlockup_panic = + CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; +static unsigned long hardlockup_allcpu_dumped; +/* + * We may not want to enable hard lockup detection by default in all cases, + * for example when running the kernel as a guest on a hypervisor. In these + * cases this function can be called to disable hard lockup detection. This + * function should only be executed once by the boot processor before the + * kernel command line parameters are parsed, because otherwise it is not + * possible to override this in hardlockup_panic_setup(). + */ +void hardlockup_detector_disable(void) +{ + watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; +} + +static int __init hardlockup_panic_setup(char *str) +{ + if (!strncmp(str, "panic", 5)) + hardlockup_panic = 1; + else if (!strncmp(str, "nopanic", 7)) + hardlockup_panic = 0; + else if (!strncmp(str, "0", 1)) + watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; + else if (!strncmp(str, "1", 1)) + watchdog_enabled |= NMI_WATCHDOG_ENABLED; + return 1; +} +__setup("nmi_watchdog=", hardlockup_panic_setup); + +void touch_nmi_watchdog(void) +{ + /* + * Using __raw here because some code paths have + * preemption enabled. If preemption is enabled + * then interrupts should be enabled too, in which + * case we shouldn't have to worry about the watchdog + * going off. + */ + raw_cpu_write(watchdog_nmi_touch, true); + touch_softlockup_watchdog(); +} +EXPORT_SYMBOL(touch_nmi_watchdog); + +static struct perf_event_attr wd_hw_attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + .size = sizeof(struct perf_event_attr), + .pinned = 1, + .disabled = 1, +}; + +/* Callback function for perf event subsystem */ +static void watchdog_overflow_callback(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + /* Ensure the watchdog never gets throttled */ + event->hw.interrupts = 0; + + if (__this_cpu_read(watchdog_nmi_touch) == true) { + __this_cpu_write(watchdog_nmi_touch, false); + return; + } + + /* check for a hardlockup + * This is done by making sure our timer interrupt + * is incrementing. The timer interrupt should have + * fired multiple times before we overflow'd. If it hasn't + * then this is a good indication the cpu is stuck + */ + if (is_hardlockup()) { + int this_cpu = smp_processor_id(); + + /* only print hardlockups once */ + if (__this_cpu_read(hard_watchdog_warn) == true) + return; + + pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu); + print_modules(); + print_irqtrace_events(current); + if (regs) + show_regs(regs); + else + dump_stack(); + + /* + * Perform all-CPU dump only once to avoid multiple hardlockups + * generating interleaving traces + */ + if (sysctl_hardlockup_all_cpu_backtrace && + !test_and_set_bit(0, &hardlockup_allcpu_dumped)) + trigger_allbutself_cpu_backtrace(); + + if (hardlockup_panic) + nmi_panic(regs, "Hard LOCKUP"); + + __this_cpu_write(hard_watchdog_warn, true); + return; + } + + __this_cpu_write(hard_watchdog_warn, false); + return; +} + +/* + * People like the simple clean cpu node info on boot. + * Reduce the watchdog noise by only printing messages + * that are different from what cpu0 displayed. + */ +static unsigned long cpu0_err; + +int watchdog_nmi_enable(unsigned int cpu) +{ + struct perf_event_attr *wd_attr; + struct perf_event *event = per_cpu(watchdog_ev, cpu); + + /* nothing to do if the hard lockup detector is disabled */ + if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) + goto out; + + /* is it already setup and enabled? */ + if (event && event->state > PERF_EVENT_STATE_OFF) + goto out; + + /* it is setup but not enabled */ + if (event != NULL) + goto out_enable; + + wd_attr = &wd_hw_attr; + wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); + + /* Try to register using hardware perf events */ + event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); + + /* save cpu0 error for future comparision */ + if (cpu == 0 && IS_ERR(event)) + cpu0_err = PTR_ERR(event); + + if (!IS_ERR(event)) { + /* only print for cpu0 or different than cpu0 */ + if (cpu == 0 || cpu0_err) + pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); + goto out_save; + } + + /* + * Disable the hard lockup detector if _any_ CPU fails to set up + * set up the hardware perf event. The watchdog() function checks + * the NMI_WATCHDOG_ENABLED bit periodically. + * + * The barriers are for syncing up watchdog_enabled across all the + * cpus, as clear_bit() does not use barriers. + */ + smp_mb__before_atomic(); + clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled); + smp_mb__after_atomic(); + + /* skip displaying the same error again */ + if (cpu > 0 && (PTR_ERR(event) == cpu0_err)) + return PTR_ERR(event); + + /* vary the KERN level based on the returned errno */ + if (PTR_ERR(event) == -EOPNOTSUPP) + pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu); + else if (PTR_ERR(event) == -ENOENT) + pr_warn("disabled (cpu%i): hardware events not enabled\n", + cpu); + else + pr_err("disabled (cpu%i): unable to create perf event: %ld\n", + cpu, PTR_ERR(event)); + + pr_info("Shutting down hard lockup detector on all cpus\n"); + + return PTR_ERR(event); + + /* success path */ +out_save: + per_cpu(watchdog_ev, cpu) = event; +out_enable: + perf_event_enable(per_cpu(watchdog_ev, cpu)); +out: + return 0; +} + +void watchdog_nmi_disable(unsigned int cpu) +{ + struct perf_event *event = per_cpu(watchdog_ev, cpu); + + if (event) { + perf_event_disable(event); + per_cpu(watchdog_ev, cpu) = NULL; + + /* should be in cleanup, but blocks oprofile */ + perf_event_release_kernel(event); + } + if (cpu == 0) { + /* watchdog_nmi_enable() expects this to be zero initially. */ + cpu0_err = 0; + } +} -- cgit v1.2.3 From 7a5c8b57cec93196b3e84e3cad2ff81ae0faed78 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Wed, 14 Dec 2016 15:06:28 -0800 Subject: sparc: implement watchdog_nmi_enable and watchdog_nmi_disable Implement functions watchdog_nmi_enable and watchdog_nmi_disable to enable/disable nmi watchdog. Sparc uses arch specific nmi watchdog handler. Currently, we do not have a way to enable/disable nmi watchdog dynamically. With these patches we can enable or disable arch specific nmi watchdogs using proc or sysctl interface. Example commands. To enable: echo 1 > /proc/sys/kernel/nmi_watchdog To disable: echo 0 > /proc/sys/kernel/nmi_watchdog It can also achieved using the sysctl parameter kernel.nmi_watchdog Link: http://lkml.kernel.org/r/1478034826-43888-4-git-send-email-babu.moger@oracle.com Signed-off-by: Babu Moger Acked-by: Don Zickus Cc: Ingo Molnar Cc: Jiri Kosina Cc: Andi Kleen Cc: Yaowei Bai Cc: Aaron Tomlin Cc: Ulrich Obergfell Cc: Tejun Heo Cc: Hidehiro Kawai Cc: Josh Hunt Cc: "David S. Miller" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sparc/kernel/nmi.c | 44 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index a9973bb4a1b2..95e73c63c99d 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -42,7 +42,7 @@ static int panic_on_timeout; */ atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ EXPORT_SYMBOL(nmi_active); - +static int nmi_init_done; static unsigned int nmi_hz = HZ; static DEFINE_PER_CPU(short, wd_enabled); static int endflag __initdata; @@ -153,6 +153,8 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count) void stop_nmi_watchdog(void *unused) { + if (!__this_cpu_read(wd_enabled)) + return; pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); __this_cpu_write(wd_enabled, 0); atomic_dec(&nmi_active); @@ -207,6 +209,9 @@ error: void start_nmi_watchdog(void *unused) { + if (__this_cpu_read(wd_enabled)) + return; + __this_cpu_write(wd_enabled, 1); atomic_inc(&nmi_active); @@ -259,6 +264,8 @@ int __init nmi_init(void) } } + nmi_init_done = 1; + return err; } @@ -270,3 +277,38 @@ static int __init setup_nmi_watchdog(char *str) return 0; } __setup("nmi_watchdog=", setup_nmi_watchdog); + +/* + * sparc specific NMI watchdog enable function. + * Enables watchdog if it is not enabled already. + */ +int watchdog_nmi_enable(unsigned int cpu) +{ + if (atomic_read(&nmi_active) == -1) { + pr_warn("NMI watchdog cannot be enabled or disabled\n"); + return -1; + } + + /* + * watchdog thread could start even before nmi_init is called. + * Just Return in that case. Let nmi_init finish the init + * process first. + */ + if (!nmi_init_done) + return 0; + + smp_call_function_single(cpu, start_nmi_watchdog, NULL, 1); + + return 0; +} +/* + * sparc specific NMI watchdog disable function. + * Disables watchdog if it is not disabled already. + */ +void watchdog_nmi_disable(unsigned int cpu) +{ + if (atomic_read(&nmi_active) == -1) + pr_warn_once("NMI watchdog cannot be enabled or disabled\n"); + else + smp_call_function_single(cpu, stop_nmi_watchdog, NULL, 1); +} -- cgit v1.2.3 From 248e7357cf8ec50bdaca68dce7c488ce843b6b93 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Wed, 14 Dec 2016 15:06:31 -0800 Subject: ipc/sem: do not call wake_sem_queue_do() prematurely ... as this call should obviously be paired with its _prepare() counterpart. At least whenever possible, as there is no harm in calling it bogusly as we do now in a few places. Immediate error semop(2) paths that are far from ever having the task block can be simplified and avoid a few unnecessary loads on their way out of the call as it is not deeply nested. Link: http://lkml.kernel.org/r/1474225896-10066-2-git-send-email-dave@stgolabs.net Signed-off-by: Davidlohr Bueso Cc: Manfred Spraul Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/ipc/sem.c b/ipc/sem.c index 10b94bc59d4a..b33dd98fde69 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -1887,16 +1887,22 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, } error = -EFBIG; - if (max >= sma->sem_nsems) - goto out_rcu_wakeup; + if (max >= sma->sem_nsems) { + rcu_read_unlock(); + goto out_free; + } error = -EACCES; - if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) - goto out_rcu_wakeup; + if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) { + rcu_read_unlock(); + goto out_free; + } error = security_sem_semop(sma, sops, nsops, alter); - if (error) - goto out_rcu_wakeup; + if (error) { + rcu_read_unlock(); + goto out_free; + } error = -EIDRM; locknum = sem_lock(sma, sops, nsops); @@ -2039,7 +2045,6 @@ sleep_again: out_unlock_free: sem_unlock(sma, locknum); -out_rcu_wakeup: rcu_read_unlock(); wake_up_sem_queue_do(&tasks); out_free: -- cgit v1.2.3 From 9ae949fa382b080170f9d3c8bd9dea951cf52ee7 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Wed, 14 Dec 2016 15:06:34 -0800 Subject: ipc/sem: rework task wakeups Our sysv sems have been using the notion of lockless wakeups for a while, ever since commit 0a2b9d4c7967 ("ipc/sem.c: move wake_up_process out of the spinlock section"), in order to reduce the sem_lock hold times. This in-house pending queue can be replaced by wake_q (just like all the rest of ipc now), in that it provides the following advantages: o Simplifies and gets rid of unnecessary code. o We get rid of the IN_WAKEUP complexities. Given that wake_q_add() grabs reference to the task, if awoken due to an unrelated event, between the wake_q_add() and wake_up_q() window, we cannot race with sys_exit and the imminent call to wake_up_process(). o By not spinning IN_WAKEUP, we no longer need to disable preemption. In consequence, the wakeup paths (after schedule(), that is) must acknowledge an external signal/event, as well spurious wakeup occurring during the pending wakeup window. Obviously no changes in semantics that could be visible to the user. The fastpath is _only_ for when we know for sure that we were awoken due to a the waker's successful semop call (queue.status is not -EINTR). On a 48-core Haswell, running the ipcscale 'waitforzero' test, the following is seen with increasing thread counts: v4.8-rc5 v4.8-rc5 semopv2 Hmean sembench-sem-2 574733.00 ( 0.00%) 578322.00 ( 0.62%) Hmean sembench-sem-8 811708.00 ( 0.00%) 824689.00 ( 1.59%) Hmean sembench-sem-12 842448.00 ( 0.00%) 845409.00 ( 0.35%) Hmean sembench-sem-21 933003.00 ( 0.00%) 977748.00 ( 4.80%) Hmean sembench-sem-48 935910.00 ( 0.00%) 1004759.00 ( 7.36%) Hmean sembench-sem-79 937186.00 ( 0.00%) 983976.00 ( 4.99%) Hmean sembench-sem-234 974256.00 ( 0.00%) 1060294.00 ( 8.83%) Hmean sembench-sem-265 975468.00 ( 0.00%) 1016243.00 ( 4.18%) Hmean sembench-sem-296 991280.00 ( 0.00%) 1042659.00 ( 5.18%) Hmean sembench-sem-327 975415.00 ( 0.00%) 1029977.00 ( 5.59%) Hmean sembench-sem-358 1014286.00 ( 0.00%) 1049624.00 ( 3.48%) Hmean sembench-sem-389 972939.00 ( 0.00%) 1043127.00 ( 7.21%) Hmean sembench-sem-420 981909.00 ( 0.00%) 1056747.00 ( 7.62%) Hmean sembench-sem-451 990139.00 ( 0.00%) 1051609.00 ( 6.21%) Hmean sembench-sem-482 965735.00 ( 0.00%) 1040313.00 ( 7.72%) [akpm@linux-foundation.org: coding-style fixes] [sfr@canb.auug.org.au: merge fix for WAKE_Q to DEFINE_WAKE_Q rename] Link: http://lkml.kernel.org/r/20161122210410.5eca9fc2@canb.auug.org.au Link: http://lkml.kernel.org/r/1474225896-10066-3-git-send-email-dave@stgolabs.net Signed-off-by: Davidlohr Bueso Acked-by: Manfred Spraul Signed-off-by: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 266 ++++++++++++++++++++------------------------------------------ 1 file changed, 86 insertions(+), 180 deletions(-) diff --git a/ipc/sem.c b/ipc/sem.c index b33dd98fde69..1ff103299c28 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -11,6 +11,7 @@ * (c) 2001 Red Hat Inc * Lockless wakeup * (c) 2003 Manfred Spraul + * (c) 2016 Davidlohr Bueso * Further wakeup optimizations, documentation * (c) 2010 Manfred Spraul * @@ -53,15 +54,11 @@ * Semaphores are actively given to waiting tasks (necessary for FIFO). * (see update_queue()) * - To improve the scalability, the actual wake-up calls are performed after - * dropping all locks. (see wake_up_sem_queue_prepare(), - * wake_up_sem_queue_do()) + * dropping all locks. (see wake_up_sem_queue_prepare()) * - All work is done by the waker, the woken up task does not have to do * anything - not even acquiring a lock or dropping a refcount. * - A woken up task may not even touch the semaphore array anymore, it may * have been destroyed already by a semctl(RMID). - * - The synchronizations between wake-ups due to a timeout/signal and a - * wake-up due to a completed semaphore operation is achieved by using an - * intermediate state (IN_WAKEUP). * - UNDO values are stored in an array (one per process and per * semaphore array, lazily allocated). For backwards compatibility, multiple * modes for the UNDO variables are supported (per process, per thread) @@ -471,40 +468,6 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) ipc_rmid(&sem_ids(ns), &s->sem_perm); } -/* - * Lockless wakeup algorithm: - * Without the check/retry algorithm a lockless wakeup is possible: - * - queue.status is initialized to -EINTR before blocking. - * - wakeup is performed by - * * unlinking the queue entry from the pending list - * * setting queue.status to IN_WAKEUP - * This is the notification for the blocked thread that a - * result value is imminent. - * * call wake_up_process - * * set queue.status to the final value. - * - the previously blocked thread checks queue.status: - * * if it's IN_WAKEUP, then it must wait until the value changes - * * if it's not -EINTR, then the operation was completed by - * update_queue. semtimedop can return queue.status without - * performing any operation on the sem array. - * * otherwise it must acquire the spinlock and check what's up. - * - * The two-stage algorithm is necessary to protect against the following - * races: - * - if queue.status is set after wake_up_process, then the woken up idle - * thread could race forward and try (and fail) to acquire sma->lock - * before update_queue had a chance to set queue.status - * - if queue.status is written before wake_up_process and if the - * blocked process is woken up by a signal between writing - * queue.status and the wake_up_process, then the woken up - * process could return from semtimedop and die by calling - * sys_exit before wake_up_process is called. Then wake_up_process - * will oops, because the task structure is already invalid. - * (yes, this happened on s390 with sysv msg). - * - */ -#define IN_WAKEUP 1 - /** * newary - Create a new semaphore set * @ns: namespace @@ -703,51 +666,18 @@ undo: return result; } -/** wake_up_sem_queue_prepare(q, error): Prepare wake-up - * @q: queue entry that must be signaled - * @error: Error value for the signal - * - * Prepare the wake-up of the queue entry q. - */ -static void wake_up_sem_queue_prepare(struct list_head *pt, - struct sem_queue *q, int error) -{ - if (list_empty(pt)) { - /* - * Hold preempt off so that we don't get preempted and have the - * wakee busy-wait until we're scheduled back on. - */ - preempt_disable(); - } - q->status = IN_WAKEUP; - q->pid = error; - - list_add_tail(&q->list, pt); -} - -/** - * wake_up_sem_queue_do - do the actual wake-up - * @pt: list of tasks to be woken up - * - * Do the actual wake-up. - * The function is called without any locks held, thus the semaphore array - * could be destroyed already and the tasks can disappear as soon as the - * status is set to the actual return code. - */ -static void wake_up_sem_queue_do(struct list_head *pt) +static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error, + struct wake_q_head *wake_q) { - struct sem_queue *q, *t; - int did_something; - - did_something = !list_empty(pt); - list_for_each_entry_safe(q, t, pt, list) { - wake_up_process(q->sleeper); - /* q can disappear immediately after writing q->status. */ - smp_wmb(); - q->status = q->pid; - } - if (did_something) - preempt_enable(); + wake_q_add(wake_q, q->sleeper); + /* + * Rely on the above implicit barrier, such that we can + * ensure that we hold reference to the task before setting + * q->status. Otherwise we could race with do_exit if the + * task is awoken by an external event before calling + * wake_up_process(). + */ + WRITE_ONCE(q->status, error); } static void unlink_queue(struct sem_array *sma, struct sem_queue *q) @@ -795,18 +725,18 @@ static int check_restart(struct sem_array *sma, struct sem_queue *q) * wake_const_ops - wake up non-alter tasks * @sma: semaphore array. * @semnum: semaphore that was modified. - * @pt: list head for the tasks that must be woken up. + * @wake_q: lockless wake-queue head. * * wake_const_ops must be called after a semaphore in a semaphore array * was set to 0. If complex const operations are pending, wake_const_ops must * be called with semnum = -1, as well as with the number of each modified * semaphore. - * The tasks that must be woken up are added to @pt. The return code + * The tasks that must be woken up are added to @wake_q. The return code * is stored in q->pid. * The function returns 1 if at least one operation was completed successfully. */ static int wake_const_ops(struct sem_array *sma, int semnum, - struct list_head *pt) + struct wake_q_head *wake_q) { struct sem_queue *q; struct list_head *walk; @@ -832,7 +762,7 @@ static int wake_const_ops(struct sem_array *sma, int semnum, unlink_queue(sma, q); - wake_up_sem_queue_prepare(pt, q, error); + wake_up_sem_queue_prepare(q, error, wake_q); if (error == 0) semop_completed = 1; } @@ -845,14 +775,14 @@ static int wake_const_ops(struct sem_array *sma, int semnum, * @sma: semaphore array * @sops: operations that were performed * @nsops: number of operations - * @pt: list head of the tasks that must be woken up. + * @wake_q: lockless wake-queue head * * Checks all required queue for wait-for-zero operations, based * on the actual changes that were performed on the semaphore array. * The function returns 1 if at least one operation was completed successfully. */ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, - int nsops, struct list_head *pt) + int nsops, struct wake_q_head *wake_q) { int i; int semop_completed = 0; @@ -865,7 +795,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, if (sma->sem_base[num].semval == 0) { got_zero = 1; - semop_completed |= wake_const_ops(sma, num, pt); + semop_completed |= wake_const_ops(sma, num, wake_q); } } } else { @@ -876,7 +806,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, for (i = 0; i < sma->sem_nsems; i++) { if (sma->sem_base[i].semval == 0) { got_zero = 1; - semop_completed |= wake_const_ops(sma, i, pt); + semop_completed |= wake_const_ops(sma, i, wake_q); } } } @@ -885,7 +815,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, * then check the global queue, too. */ if (got_zero) - semop_completed |= wake_const_ops(sma, -1, pt); + semop_completed |= wake_const_ops(sma, -1, wake_q); return semop_completed; } @@ -895,19 +825,19 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, * update_queue - look for tasks that can be completed. * @sma: semaphore array. * @semnum: semaphore that was modified. - * @pt: list head for the tasks that must be woken up. + * @wake_q: lockless wake-queue head. * * update_queue must be called after a semaphore in a semaphore array * was modified. If multiple semaphores were modified, update_queue must * be called with semnum = -1, as well as with the number of each modified * semaphore. - * The tasks that must be woken up are added to @pt. The return code + * The tasks that must be woken up are added to @wake_q. The return code * is stored in q->pid. * The function internally checks if const operations can now succeed. * * The function return 1 if at least one semop was completed successfully. */ -static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt) +static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q) { struct sem_queue *q; struct list_head *walk; @@ -949,11 +879,11 @@ again: restart = 0; } else { semop_completed = 1; - do_smart_wakeup_zero(sma, q->sops, q->nsops, pt); + do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q); restart = check_restart(sma, q); } - wake_up_sem_queue_prepare(pt, q, error); + wake_up_sem_queue_prepare(q, error, wake_q); if (restart) goto again; } @@ -984,24 +914,24 @@ static void set_semotime(struct sem_array *sma, struct sembuf *sops) * @sops: operations that were performed * @nsops: number of operations * @otime: force setting otime - * @pt: list head of the tasks that must be woken up. + * @wake_q: lockless wake-queue head * * do_smart_update() does the required calls to update_queue and wakeup_zero, * based on the actual changes that were performed on the semaphore array. * Note that the function does not do the actual wake-up: the caller is - * responsible for calling wake_up_sem_queue_do(@pt). + * responsible for calling wake_up_q(). * It is safe to perform this call after dropping all locks. */ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops, - int otime, struct list_head *pt) + int otime, struct wake_q_head *wake_q) { int i; - otime |= do_smart_wakeup_zero(sma, sops, nsops, pt); + otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q); if (!list_empty(&sma->pending_alter)) { /* semaphore array uses the global queue - just process it. */ - otime |= update_queue(sma, -1, pt); + otime |= update_queue(sma, -1, wake_q); } else { if (!sops) { /* @@ -1009,7 +939,7 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop * known. Check all. */ for (i = 0; i < sma->sem_nsems; i++) - otime |= update_queue(sma, i, pt); + otime |= update_queue(sma, i, wake_q); } else { /* * Check the semaphores that were increased: @@ -1023,7 +953,7 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop for (i = 0; i < nsops; i++) { if (sops[i].sem_op > 0) { otime |= update_queue(sma, - sops[i].sem_num, pt); + sops[i].sem_num, wake_q); } } } @@ -1111,8 +1041,8 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) struct sem_undo *un, *tu; struct sem_queue *q, *tq; struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); - struct list_head tasks; int i; + DEFINE_WAKE_Q(wake_q); /* Free the existing undo structures for this semaphore set. */ ipc_assert_locked_object(&sma->sem_perm); @@ -1126,25 +1056,24 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) } /* Wake up all pending processes and let them fail with EIDRM. */ - INIT_LIST_HEAD(&tasks); list_for_each_entry_safe(q, tq, &sma->pending_const, list) { unlink_queue(sma, q); - wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); } list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { unlink_queue(sma, q); - wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); } for (i = 0; i < sma->sem_nsems; i++) { struct sem *sem = sma->sem_base + i; list_for_each_entry_safe(q, tq, &sem->pending_const, list) { unlink_queue(sma, q); - wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); } list_for_each_entry_safe(q, tq, &sem->pending_alter, list) { unlink_queue(sma, q); - wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); } } @@ -1153,7 +1082,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) sem_unlock(sma, -1); rcu_read_unlock(); - wake_up_sem_queue_do(&tasks); + wake_up_q(&wake_q); ns->used_sems -= sma->sem_nsems; ipc_rcu_putref(sma, sem_rcu_free); } @@ -1292,9 +1221,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, struct sem_undo *un; struct sem_array *sma; struct sem *curr; - int err; - struct list_head tasks; - int val; + int err, val; + DEFINE_WAKE_Q(wake_q); + #if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN) /* big-endian 64bit */ val = arg >> 32; @@ -1306,8 +1235,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, if (val > SEMVMX || val < 0) return -ERANGE; - INIT_LIST_HEAD(&tasks); - rcu_read_lock(); sma = sem_obtain_object_check(ns, semid); if (IS_ERR(sma)) { @@ -1350,10 +1277,10 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, curr->sempid = task_tgid_vnr(current); sma->sem_ctime = get_seconds(); /* maybe some queued-up processes were waiting for this */ - do_smart_update(sma, NULL, 0, 0, &tasks); + do_smart_update(sma, NULL, 0, 0, &wake_q); sem_unlock(sma, -1); rcu_read_unlock(); - wake_up_sem_queue_do(&tasks); + wake_up_q(&wake_q); return 0; } @@ -1365,9 +1292,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, int err, nsems; ushort fast_sem_io[SEMMSL_FAST]; ushort *sem_io = fast_sem_io; - struct list_head tasks; - - INIT_LIST_HEAD(&tasks); + DEFINE_WAKE_Q(wake_q); rcu_read_lock(); sma = sem_obtain_object_check(ns, semid); @@ -1478,7 +1403,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, } sma->sem_ctime = get_seconds(); /* maybe some queued-up processes were waiting for this */ - do_smart_update(sma, NULL, 0, 0, &tasks); + do_smart_update(sma, NULL, 0, 0, &wake_q); err = 0; goto out_unlock; } @@ -1514,7 +1439,7 @@ out_unlock: sem_unlock(sma, -1); out_rcu_wakeup: rcu_read_unlock(); - wake_up_sem_queue_do(&tasks); + wake_up_q(&wake_q); out_free: if (sem_io != fast_sem_io) ipc_free(sem_io); @@ -1787,32 +1712,6 @@ out: return un; } - -/** - * get_queue_result - retrieve the result code from sem_queue - * @q: Pointer to queue structure - * - * Retrieve the return code from the pending queue. If IN_WAKEUP is found in - * q->status, then we must loop until the value is replaced with the final - * value: This may happen if a task is woken up by an unrelated event (e.g. - * signal) and in parallel the task is woken up by another task because it got - * the requested semaphores. - * - * The function can be called with or without holding the semaphore spinlock. - */ -static int get_queue_result(struct sem_queue *q) -{ - int error; - - error = q->status; - while (unlikely(error == IN_WAKEUP)) { - cpu_relax(); - error = q->status; - } - - return error; -} - SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, unsigned, nsops, const struct timespec __user *, timeout) { @@ -1825,7 +1724,6 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, struct sem_queue queue; unsigned long jiffies_left = 0; struct ipc_namespace *ns; - struct list_head tasks; ns = current->nsproxy->ipc_ns; @@ -1865,7 +1763,6 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, alter = 1; } - INIT_LIST_HEAD(&tasks); if (undos) { /* On success, find_alloc_undo takes the rcu_read_lock */ @@ -1933,22 +1830,31 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, queue.alter = alter; error = perform_atomic_semop(sma, &queue); - if (error == 0) { - /* If the operation was successful, then do + if (error == 0) { /* non-blocking succesfull path */ + DEFINE_WAKE_Q(wake_q); + + /* + * If the operation was successful, then do * the required updates. */ if (alter) - do_smart_update(sma, sops, nsops, 1, &tasks); + do_smart_update(sma, sops, nsops, 1, &wake_q); else set_semotime(sma, sops); + + sem_unlock(sma, locknum); + rcu_read_unlock(); + wake_up_q(&wake_q); + + goto out_free; } - if (error <= 0) + if (error < 0) /* non-blocking error path */ goto out_unlock_free; - /* We need to sleep on this operation, so we put the current + /* + * We need to sleep on this operation, so we put the current * task into the pending queue and go to sleep. */ - if (nsops == 1) { struct sem *curr; curr = &sma->sem_base[sops->sem_num]; @@ -1977,10 +1883,10 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, sma->complex_count++; } +sleep_again: queue.status = -EINTR; queue.sleeper = current; -sleep_again: __set_current_state(TASK_INTERRUPTIBLE); sem_unlock(sma, locknum); rcu_read_unlock(); @@ -1990,28 +1896,31 @@ sleep_again: else schedule(); - error = get_queue_result(&queue); - + /* + * fastpath: the semop has completed, either successfully or not, from + * the syscall pov, is quite irrelevant to us at this point; we're done. + * + * We _do_ care, nonetheless, about being awoken by a signal or + * spuriously. The queue.status is checked again in the slowpath (aka + * after taking sem_lock), such that we can detect scenarios where we + * were awakened externally, during the window between wake_q_add() and + * wake_up_q(). + */ + error = READ_ONCE(queue.status); if (error != -EINTR) { - /* fast path: update_queue already obtained all requested - * resources. - * Perform a smp_mb(): User space could assume that semop() - * is a memory barrier: Without the mb(), the cpu could - * speculatively read in user space stale data that was - * overwritten by the previous owner of the semaphore. + /* + * User space could assume that semop() is a memory barrier: + * Without the mb(), the cpu could speculatively read in user + * space stale data that was overwritten by the previous owner + * of the semaphore. */ smp_mb(); - goto out_free; } rcu_read_lock(); sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum); - - /* - * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing. - */ - error = get_queue_result(&queue); + error = READ_ONCE(queue.status); /* * Array removed? If yes, leave without sem_unlock(). @@ -2021,7 +1930,6 @@ sleep_again: goto out_free; } - /* * If queue.status != -EINTR we are woken up by another process. * Leave without unlink_queue(), but with sem_unlock(). @@ -2030,13 +1938,13 @@ sleep_again: goto out_unlock_free; /* - * If an interrupt occurred we have to clean up the queue + * If an interrupt occurred we have to clean up the queue. */ if (timeout && jiffies_left == 0) error = -EAGAIN; /* - * If the wakeup was spurious, just retry + * If the wakeup was spurious, just retry. */ if (error == -EINTR && !signal_pending(current)) goto sleep_again; @@ -2046,7 +1954,6 @@ sleep_again: out_unlock_free: sem_unlock(sma, locknum); rcu_read_unlock(); - wake_up_sem_queue_do(&tasks); out_free: if (sops != fast_sops) kfree(sops); @@ -2107,8 +2014,8 @@ void exit_sem(struct task_struct *tsk) for (;;) { struct sem_array *sma; struct sem_undo *un; - struct list_head tasks; int semid, i; + DEFINE_WAKE_Q(wake_q); cond_resched(); @@ -2196,11 +2103,10 @@ void exit_sem(struct task_struct *tsk) } } /* maybe some queued-up processes were waiting for this */ - INIT_LIST_HEAD(&tasks); - do_smart_update(sma, NULL, 0, 1, &tasks); + do_smart_update(sma, NULL, 0, 1, &wake_q); sem_unlock(sma, -1); rcu_read_unlock(); - wake_up_sem_queue_do(&tasks); + wake_up_q(&wake_q); kfree_rcu(un, rcu); } -- cgit v1.2.3 From 4ce33ec2e42d4661bf05289e213bc088eecb9132 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Wed, 14 Dec 2016 15:06:37 -0800 Subject: ipc/sem: optimize perform_atomic_semop() This is the main workhorse that deals with semop user calls such that the waitforzero or semval update operations, on the set, can complete on not as the sma currently stands. Currently, the set is iterated twice (setting semval, then backwards for the sempid value). Slowpaths, and particularly SEM_UNDO calls, must undo any altered sem when it is detected that the caller must block or has errored-out. With larger sets, there can occur situations where this involves a lot of cycles and can obviously be a suboptimal use of cached resources in shared memory. Ie, discarding CPU caches that are also calling semop and have the sembuf cached (and can complete), while the current lock holder doing the semop will block, error, or does a waitforzero operation. This patch proposes still iterating the set twice, but the first scan is read-only, and we perform the actual updates afterward, once we know that the call will succeed. In order to not suffer from the overhead of dealing with sops that act on the same sem_num, such (rare) cases use perform_atomic_semop_slow(), which is exactly what we have now. Duplicates are detected before grabbing sem_lock, and uses simple a 32/64-bit hash array variable to based on the sem_num we are working on. In addition add some comments to when we expect to the caller to block. [akpm@linux-foundation.org: coding-style fixes] [colin.king@canonical.com: ensure we left shift a ULL rather than a 32 bit integer] Link: http://lkml.kernel.org/r/20161028181129.7311-1-colin.king@canonical.com Link: http://lkml.kernel.org/r/20160921194603.GB21438@linux-80c1.suse Signed-off-by: Davidlohr Bueso Cc: Manfred Spraul Signed-off-by: Colin Ian King Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 112 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 102 insertions(+), 10 deletions(-) diff --git a/ipc/sem.c b/ipc/sem.c index 1ff103299c28..c14883f5a4b5 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -115,7 +115,8 @@ struct sem_queue { struct sembuf *sops; /* array of pending operations */ struct sembuf *blocking; /* the operation that blocked */ int nsops; /* number of operations */ - int alter; /* does *sops alter the array? */ + bool alter; /* does *sops alter the array? */ + bool dupsop; /* sops on more than one sem_num */ }; /* Each task has a list of undo requests. They are executed automatically @@ -587,15 +588,23 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) } /** - * perform_atomic_semop - Perform (if possible) a semaphore operation + * perform_atomic_semop[_slow] - Attempt to perform semaphore + * operations on a given array. * @sma: semaphore array * @q: struct sem_queue that describes the operation * + * Caller blocking are as follows, based the value + * indicated by the semaphore operation (sem_op): + * + * (1) >0 never blocks. + * (2) 0 (wait-for-zero operation): semval is non-zero. + * (3) <0 attempting to decrement semval to a value smaller than zero. + * * Returns 0 if the operation was possible. * Returns 1 if the operation is impossible, the caller must sleep. - * Negative values are error codes. + * Returns <0 for error codes. */ -static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) +static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q) { int result, sem_op, nsops, pid; struct sembuf *sop; @@ -666,6 +675,72 @@ undo: return result; } +static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) +{ + int result, sem_op, nsops; + struct sembuf *sop; + struct sem *curr; + struct sembuf *sops; + struct sem_undo *un; + + sops = q->sops; + nsops = q->nsops; + un = q->undo; + + if (unlikely(q->dupsop)) + return perform_atomic_semop_slow(sma, q); + + /* + * We scan the semaphore set twice, first to ensure that the entire + * operation can succeed, therefore avoiding any pointless writes + * to shared memory and having to undo such changes in order to block + * until the operations can go through. + */ + for (sop = sops; sop < sops + nsops; sop++) { + curr = sma->sem_base + sop->sem_num; + sem_op = sop->sem_op; + result = curr->semval; + + if (!sem_op && result) + goto would_block; /* wait-for-zero */ + + result += sem_op; + if (result < 0) + goto would_block; + + if (result > SEMVMX) + return -ERANGE; + + if (sop->sem_flg & SEM_UNDO) { + int undo = un->semadj[sop->sem_num] - sem_op; + + /* Exceeding the undo range is an error. */ + if (undo < (-SEMAEM - 1) || undo > SEMAEM) + return -ERANGE; + } + } + + for (sop = sops; sop < sops + nsops; sop++) { + curr = sma->sem_base + sop->sem_num; + sem_op = sop->sem_op; + result = curr->semval; + + if (sop->sem_flg & SEM_UNDO) { + int undo = un->semadj[sop->sem_num] - sem_op; + + un->semadj[sop->sem_num] = undo; + } + curr->semval += sem_op; + curr->sempid = q->pid; + } + + return 0; + +would_block: + q->blocking = sop; + return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1; +} + static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error, struct wake_q_head *wake_q) { @@ -1720,9 +1795,10 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, struct sembuf fast_sops[SEMOPM_FAST]; struct sembuf *sops = fast_sops, *sop; struct sem_undo *un; - int undos = 0, alter = 0, max, locknum; + int max, locknum; + bool undos = false, alter = false, dupsop = false; struct sem_queue queue; - unsigned long jiffies_left = 0; + unsigned long dup = 0, jiffies_left = 0; struct ipc_namespace *ns; ns = current->nsproxy->ipc_ns; @@ -1736,10 +1812,12 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, if (sops == NULL) return -ENOMEM; } + if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) { error = -EFAULT; goto out_free; } + if (timeout) { struct timespec _timeout; if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) { @@ -1753,17 +1831,30 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, } jiffies_left = timespec_to_jiffies(&_timeout); } + max = 0; for (sop = sops; sop < sops + nsops; sop++) { + unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG); + if (sop->sem_num >= max) max = sop->sem_num; if (sop->sem_flg & SEM_UNDO) - undos = 1; - if (sop->sem_op != 0) - alter = 1; + undos = true; + if (dup & mask) { + /* + * There was a previous alter access that appears + * to have accessed the same semaphore, thus use + * the dupsop logic. "appears", because the detection + * can only check % BITS_PER_LONG. + */ + dupsop = true; + } + if (sop->sem_op != 0) { + alter = true; + dup |= mask; + } } - if (undos) { /* On success, find_alloc_undo takes the rcu_read_lock */ un = find_alloc_undo(ns, semid); @@ -1828,6 +1919,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, queue.undo = un; queue.pid = task_tgid_vnr(current); queue.alter = alter; + queue.dupsop = dupsop; error = perform_atomic_semop(sma, &queue); if (error == 0) { /* non-blocking succesfull path */ -- cgit v1.2.3 From 4663d3e8f21652f33c698fcc2bf20f61499d9c3e Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Wed, 14 Dec 2016 15:06:40 -0800 Subject: ipc/sem: explicitly inline check_restart The compiler already does this, but make it explicit. This helper is really small and also used in update_queue's main loop, which is O(N^2) scanning. Inline and avoid the function overhead. Link: http://lkml.kernel.org/r/1474225896-10066-5-git-send-email-dave@stgolabs.net Signed-off-by: Davidlohr Bueso Cc: Manfred Spraul Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipc/sem.c b/ipc/sem.c index c14883f5a4b5..084e1f55ca72 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -772,7 +772,7 @@ static void unlink_queue(struct sem_array *sma, struct sem_queue *q) * modified the array. * Note that wait-for-zero operations are handled without restart. */ -static int check_restart(struct sem_array *sma, struct sem_queue *q) +static inline int check_restart(struct sem_array *sma, struct sem_queue *q) { /* pending complex alter operations are too difficult to analyse */ if (!list_empty(&sma->pending_alter)) -- cgit v1.2.3 From f150f02cfbc7b6b980e260856555abd73235a6b0 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Wed, 14 Dec 2016 15:06:43 -0800 Subject: ipc/sem: use proper list api for pending_list wakeups ... saves some LoC and looks cleaner than re-implementing the calls. Link: http://lkml.kernel.org/r/1474225896-10066-6-git-send-email-dave@stgolabs.net Signed-off-by: Davidlohr Bueso Acked-by: Manfred Spraul Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 38 +++++++++++++------------------------- 1 file changed, 13 insertions(+), 25 deletions(-) diff --git a/ipc/sem.c b/ipc/sem.c index 084e1f55ca72..4f5af6e7d630 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -813,8 +813,7 @@ static inline int check_restart(struct sem_array *sma, struct sem_queue *q) static int wake_const_ops(struct sem_array *sma, int semnum, struct wake_q_head *wake_q) { - struct sem_queue *q; - struct list_head *walk; + struct sem_queue *q, *tmp; struct list_head *pending_list; int semop_completed = 0; @@ -823,25 +822,19 @@ static int wake_const_ops(struct sem_array *sma, int semnum, else pending_list = &sma->sem_base[semnum].pending_const; - walk = pending_list->next; - while (walk != pending_list) { - int error; - - q = container_of(walk, struct sem_queue, list); - walk = walk->next; - - error = perform_atomic_semop(sma, q); - - if (error <= 0) { - /* operation completed, remove from queue & wakeup */ + list_for_each_entry_safe(q, tmp, pending_list, list) { + int error = perform_atomic_semop(sma, q); - unlink_queue(sma, q); + if (error > 0) + continue; + /* operation completed, remove from queue & wakeup */ + unlink_queue(sma, q); - wake_up_sem_queue_prepare(q, error, wake_q); - if (error == 0) - semop_completed = 1; - } + wake_up_sem_queue_prepare(q, error, wake_q); + if (error == 0) + semop_completed = 1; } + return semop_completed; } @@ -914,8 +907,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, */ static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q) { - struct sem_queue *q; - struct list_head *walk; + struct sem_queue *q, *tmp; struct list_head *pending_list; int semop_completed = 0; @@ -925,13 +917,9 @@ static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *w pending_list = &sma->sem_base[semnum].pending_alter; again: - walk = pending_list->next; - while (walk != pending_list) { + list_for_each_entry_safe(q, tmp, pending_list, list) { int error, restart; - q = container_of(walk, struct sem_queue, list); - walk = walk->next; - /* If we are scanning the single sop, per-semaphore list of * one semaphore and that semaphore is 0, then it is not * necessary to scan further: simple increments -- cgit v1.2.3 From b5fa01a22e4ba9e3ca6de7cb94c3d21e42da449c Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Wed, 14 Dec 2016 15:06:46 -0800 Subject: ipc/sem: simplify wait-wake loop Instead of using the reverse goto, we can simplify the flow and make it more language natural by just doing do-while instead. One would hope this is the standard way (or obviously just with a while bucle) that we do wait/wakeup handling in the kernel. The exact same logic is kept, just more indented. [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/1478708774-28826-2-git-send-email-dave@stgolabs.net Signed-off-by: Davidlohr Bueso Cc: Manfred Spraul Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 108 ++++++++++++++++++++++++++++++-------------------------------- 1 file changed, 52 insertions(+), 56 deletions(-) diff --git a/ipc/sem.c b/ipc/sem.c index 4f5af6e7d630..a82c88d0900f 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -1963,71 +1963,67 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, sma->complex_count++; } -sleep_again: - queue.status = -EINTR; - queue.sleeper = current; + do { + queue.status = -EINTR; + queue.sleeper = current; - __set_current_state(TASK_INTERRUPTIBLE); - sem_unlock(sma, locknum); - rcu_read_unlock(); + __set_current_state(TASK_INTERRUPTIBLE); + sem_unlock(sma, locknum); + rcu_read_unlock(); - if (timeout) - jiffies_left = schedule_timeout(jiffies_left); - else - schedule(); + if (timeout) + jiffies_left = schedule_timeout(jiffies_left); + else + schedule(); - /* - * fastpath: the semop has completed, either successfully or not, from - * the syscall pov, is quite irrelevant to us at this point; we're done. - * - * We _do_ care, nonetheless, about being awoken by a signal or - * spuriously. The queue.status is checked again in the slowpath (aka - * after taking sem_lock), such that we can detect scenarios where we - * were awakened externally, during the window between wake_q_add() and - * wake_up_q(). - */ - error = READ_ONCE(queue.status); - if (error != -EINTR) { /* - * User space could assume that semop() is a memory barrier: - * Without the mb(), the cpu could speculatively read in user - * space stale data that was overwritten by the previous owner - * of the semaphore. + * fastpath: the semop has completed, either successfully or + * not, from the syscall pov, is quite irrelevant to us at this + * point; we're done. + * + * We _do_ care, nonetheless, about being awoken by a signal or + * spuriously. The queue.status is checked again in the + * slowpath (aka after taking sem_lock), such that we can detect + * scenarios where we were awakened externally, during the + * window between wake_q_add() and wake_up_q(). */ - smp_mb(); - goto out_free; - } - - rcu_read_lock(); - sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum); - error = READ_ONCE(queue.status); + error = READ_ONCE(queue.status); + if (error != -EINTR) { + /* + * User space could assume that semop() is a memory + * barrier: Without the mb(), the cpu could + * speculatively read in userspace stale data that was + * overwritten by the previous owner of the semaphore. + */ + smp_mb(); + goto out_free; + } - /* - * Array removed? If yes, leave without sem_unlock(). - */ - if (IS_ERR(sma)) { - rcu_read_unlock(); - goto out_free; - } + rcu_read_lock(); + sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum); + error = READ_ONCE(queue.status); - /* - * If queue.status != -EINTR we are woken up by another process. - * Leave without unlink_queue(), but with sem_unlock(). - */ - if (error != -EINTR) - goto out_unlock_free; + /* + * Array removed? If yes, leave without sem_unlock(). + */ + if (IS_ERR(sma)) { + rcu_read_unlock(); + goto out_free; + } - /* - * If an interrupt occurred we have to clean up the queue. - */ - if (timeout && jiffies_left == 0) - error = -EAGAIN; + /* + * If queue.status != -EINTR we are woken up by another process. + * Leave without unlink_queue(), but with sem_unlock(). + */ + if (error != -EINTR) + goto out_unlock_free; - /* - * If the wakeup was spurious, just retry. - */ - if (error == -EINTR && !signal_pending(current)) - goto sleep_again; + /* + * If an interrupt occurred we have to clean up the queue. + */ + if (timeout && jiffies_left == 0) + error = -EAGAIN; + } while (error == -EINTR && !signal_pending(current)); /* spurious */ unlink_queue(sma, &queue); -- cgit v1.2.3 From 370b262c896e5565b271a3ea3abee4d0914ba443 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Wed, 14 Dec 2016 15:06:49 -0800 Subject: ipc/sem: avoid idr tree lookup for interrupted semop We can avoid the idr tree lookup (albeit possibly avoiding idr_find_fast()) when being awoken in EINTR, as the semid will not change in this context while blocked. Use the sma pointer directly and take the sem_lock, then re-check for RMID races. We continue to re-check the queue.status with the lock held such that we can detect situations where we where are dealing with a spurious wakeup but another task that holds the sem_lock updated the queue.status while we were spinning for it. Once we take the lock it obviously won't change again. Being the only caller, get rid of sem_obtain_lock() altogether. Link: http://lkml.kernel.org/r/1478708774-28826-3-git-send-email-dave@stgolabs.net Signed-off-by: Davidlohr Bueso Cc: Manfred Spraul Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 37 +++++-------------------------------- 1 file changed, 5 insertions(+), 32 deletions(-) diff --git a/ipc/sem.c b/ipc/sem.c index a82c88d0900f..e08b94851922 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -414,29 +414,6 @@ static inline void sem_unlock(struct sem_array *sma, int locknum) * * The caller holds the RCU read lock. */ -static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, - int id, struct sembuf *sops, int nsops, int *locknum) -{ - struct kern_ipc_perm *ipcp; - struct sem_array *sma; - - ipcp = ipc_obtain_object_idr(&sem_ids(ns), id); - if (IS_ERR(ipcp)) - return ERR_CAST(ipcp); - - sma = container_of(ipcp, struct sem_array, sem_perm); - *locknum = sem_lock(sma, sops, nsops); - - /* ipc_rmid() may have already freed the ID while sem_lock - * was spinning: verify that the structure is still valid - */ - if (ipc_valid_object(ipcp)) - return container_of(ipcp, struct sem_array, sem_perm); - - sem_unlock(sma, *locknum); - return ERR_PTR(-EINVAL); -} - static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id); @@ -2000,16 +1977,12 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, } rcu_read_lock(); - sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum); - error = READ_ONCE(queue.status); + sem_lock(sma, sops, nsops); - /* - * Array removed? If yes, leave without sem_unlock(). - */ - if (IS_ERR(sma)) { - rcu_read_unlock(); - goto out_free; - } + if (!ipc_valid_object(&sma->sem_perm)) + goto out_unlock_free; + + error = READ_ONCE(queue.status); /* * If queue.status != -EINTR we are woken up by another process. -- cgit v1.2.3 From 5b56d49fc31dbb0487e14ead790fc81ca9fb2c99 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Wed, 14 Dec 2016 15:06:52 -0800 Subject: mm: add locked parameter to get_user_pages_remote() Patch series "mm: unexport __get_user_pages_unlocked()". This patch series continues the cleanup of get_user_pages*() functions taking advantage of the fact we can now pass gup_flags as we please. It firstly adds an additional 'locked' parameter to get_user_pages_remote() to allow for its callers to utilise VM_FAULT_RETRY functionality. This is necessary as the invocation of __get_user_pages_unlocked() in process_vm_rw_single_vec() makes use of this and no other existing higher level function would allow it to do so. Secondly existing callers of __get_user_pages_unlocked() are replaced with the appropriate higher-level replacement - get_user_pages_unlocked() if the current task and memory descriptor are referenced, or get_user_pages_remote() if other task/memory descriptors are referenced (having acquiring mmap_sem.) This patch (of 2): Add a int *locked parameter to get_user_pages_remote() to allow VM_FAULT_RETRY faulting behaviour similar to get_user_pages_[un]locked(). Taking into account the previous adjustments to get_user_pages*() functions allowing for the passing of gup_flags, we are now in a position where __get_user_pages_unlocked() need only be exported for his ability to allow VM_FAULT_RETRY behaviour, this adjustment allows us to subsequently unexport __get_user_pages_unlocked() as well as allowing for future flexibility in the use of get_user_pages_remote(). [sfr@canb.auug.org.au: merge fix for get_user_pages_remote API change] Link: http://lkml.kernel.org/r/20161122210511.024ec341@canb.auug.org.au Link: http://lkml.kernel.org/r/20161027095141.2569-2-lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Acked-by: Michal Hocko Cc: Jan Kara Cc: Hugh Dickins Cc: Dave Hansen Cc: Rik van Riel Cc: Mel Gorman Cc: Paolo Bonzini Cc: Radim Krcmar Signed-off-by: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 2 +- drivers/gpu/drm/i915/i915_gem_userptr.c | 2 +- drivers/infiniband/core/umem_odp.c | 2 +- drivers/vfio/vfio_iommu_type1.c | 2 +- fs/exec.c | 2 +- include/linux/mm.h | 2 +- kernel/events/uprobes.c | 4 ++-- mm/gup.c | 12 ++++++++---- mm/memory.c | 2 +- security/tomoyo/domain.c | 2 +- 10 files changed, 18 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 7d066a91d778..6f5dfabb3096 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -759,7 +759,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages( down_read(&mm->mmap_sem); while (pinned < npages) { ret = get_user_pages_remote(task, mm, ptr, npages - pinned, - flags, pvec + pinned, NULL); + flags, pvec + pinned, NULL, NULL); if (ret < 0) break; diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 107ddf51065e..d068af2ec3a3 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -515,7 +515,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) obj->userptr.ptr + pinned * PAGE_SIZE, npages - pinned, flags, - pvec + pinned, NULL); + pvec + pinned, NULL, NULL); if (ret < 0) break; diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 1f0fe3217f23..6b079a31dced 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -578,7 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, */ npages = get_user_pages_remote(owning_process, owning_mm, user_virt, gup_num_pages, - flags, local_page_list, NULL); + flags, local_page_list, NULL, NULL); up_read(&owning_mm->mmap_sem); if (npages < 0) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 9815e45c23c4..f3726ba12aa6 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -362,7 +362,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, down_read(&mm->mmap_sem); ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, - NULL); + NULL, NULL); up_read(&mm->mmap_sem); } diff --git a/fs/exec.c b/fs/exec.c index 923c57d96899..eac60886b0ab 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -209,7 +209,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, * doing the exec and bprm->mm is the new process's mm. */ ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags, - &page, NULL); + &page, NULL, NULL); if (ret <= 0) return NULL; diff --git a/include/linux/mm.h b/include/linux/mm.h index a92c8d73aeaf..cc154454675a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1274,7 +1274,7 @@ extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas); + struct vm_area_struct **vmas, int *locked); long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas); diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index f9ec9add2164..215871bda3a2 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -301,7 +301,7 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, retry: /* Read the page with vaddr into memory */ ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page, - &vma); + &vma, NULL); if (ret <= 0) return ret; @@ -1712,7 +1712,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) * essentially a kernel access to the memory. */ result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page, - NULL); + NULL, NULL); if (result < 0) return result; diff --git a/mm/gup.c b/mm/gup.c index e50178c58b97..b64c907aa4f0 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -917,6 +917,9 @@ EXPORT_SYMBOL(get_user_pages_unlocked); * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. + * @locked: pointer to lock flag indicating whether lock is held and + * subsequently whether VM_FAULT_RETRY functionality can be + * utilised. Lock must initially be held. * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages @@ -960,10 +963,10 @@ EXPORT_SYMBOL(get_user_pages_unlocked); long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas) + struct vm_area_struct **vmas, int *locked) { return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, - NULL, false, + locked, true, gup_flags | FOLL_TOUCH | FOLL_REMOTE); } EXPORT_SYMBOL(get_user_pages_remote); @@ -971,8 +974,9 @@ EXPORT_SYMBOL(get_user_pages_remote); /* * This is the same as get_user_pages_remote(), just with a * less-flexible calling convention where we assume that the task - * and mm being operated on are the current task's. We also - * obviously don't pass FOLL_REMOTE in here. + * and mm being operated on are the current task's and don't allow + * passing of a locked parameter. We also obviously don't pass + * FOLL_REMOTE in here. */ long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, diff --git a/mm/memory.c b/mm/memory.c index c264f7cd3e47..3a6a1239c42b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3919,7 +3919,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, struct page *page = NULL; ret = get_user_pages_remote(tsk, mm, addr, 1, - gup_flags, &page, &vma); + gup_flags, &page, &vma, NULL); if (ret <= 0) { #ifndef CONFIG_HAVE_IOREMAP_PROT break; diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index 682b73af7766..838ffa78cfda 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -881,7 +881,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos, * the execve(). */ if (get_user_pages_remote(current, bprm->mm, pos, 1, - FOLL_FORCE, &page, NULL) <= 0) + FOLL_FORCE, &page, NULL, NULL) <= 0) return false; #else page = bprm->page[pos / PAGE_SIZE]; -- cgit v1.2.3 From 8b7457ef9a9eb46cd1675d40d8e1fd3c47a38395 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Wed, 14 Dec 2016 15:06:55 -0800 Subject: mm: unexport __get_user_pages_unlocked() Unexport the low-level __get_user_pages_unlocked() function and replaces invocations with calls to more appropriate higher-level functions. In hva_to_pfn_slow() we are able to replace __get_user_pages_unlocked() with get_user_pages_unlocked() since we can now pass gup_flags. In async_pf_execute() and process_vm_rw_single_vec() we need to pass different tsk, mm arguments so get_user_pages_remote() is the sane replacement in these cases (having added manual acquisition and release of mmap_sem.) Additionally get_user_pages_remote() reintroduces use of the FOLL_TOUCH flag. However, this flag was originally silently dropped by commit 1e9877902dc7 ("mm/gup: Introduce get_user_pages_remote()"), so this appears to have been unintentional and reintroducing it is therefore not an issue. [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/20161027095141.2569-3-lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Acked-by: Michal Hocko Cc: Jan Kara Cc: Hugh Dickins Cc: Dave Hansen Cc: Rik van Riel Cc: Mel Gorman Cc: Paolo Bonzini Cc: Radim Krcmar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 3 --- mm/gup.c | 8 ++++---- mm/nommu.c | 8 ++++---- mm/process_vm_access.c | 12 ++++++++---- virt/kvm/async_pf.c | 10 +++++++--- virt/kvm/kvm_main.c | 5 ++--- 6 files changed, 25 insertions(+), 21 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index cc154454675a..7b2d14ed3815 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1280,9 +1280,6 @@ long get_user_pages(unsigned long start, unsigned long nr_pages, struct vm_area_struct **vmas); long get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked); -long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, - struct page **pages, unsigned int gup_flags); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); int get_user_pages_fast(unsigned long start, int nr_pages, int write, diff --git a/mm/gup.c b/mm/gup.c index b64c907aa4f0..55315555489d 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -865,9 +865,10 @@ EXPORT_SYMBOL(get_user_pages_locked); * caller if required (just like with __get_user_pages). "FOLL_GET" * is set implicitly if "pages" is non-NULL. */ -__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, - struct page **pages, unsigned int gup_flags) +static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, + struct mm_struct *mm, unsigned long start, + unsigned long nr_pages, struct page **pages, + unsigned int gup_flags) { long ret; int locked = 1; @@ -879,7 +880,6 @@ __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct m up_read(&mm->mmap_sem); return ret; } -EXPORT_SYMBOL(__get_user_pages_unlocked); /* * get_user_pages_unlocked() is suitable to replace the form: diff --git a/mm/nommu.c b/mm/nommu.c index 9720e0bab029..c299a7fbca70 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -176,9 +176,10 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, } EXPORT_SYMBOL(get_user_pages_locked); -long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, - struct page **pages, unsigned int gup_flags) +static long __get_user_pages_unlocked(struct task_struct *tsk, + struct mm_struct *mm, unsigned long start, + unsigned long nr_pages, struct page **pages, + unsigned int gup_flags) { long ret; down_read(&mm->mmap_sem); @@ -187,7 +188,6 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, up_read(&mm->mmap_sem); return ret; } -EXPORT_SYMBOL(__get_user_pages_unlocked); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index be8dc8d1edb9..84d0c7eada2b 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -88,7 +88,7 @@ static int process_vm_rw_single_vec(unsigned long addr, ssize_t rc = 0; unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES / sizeof(struct pages *); - unsigned int flags = FOLL_REMOTE; + unsigned int flags = 0; /* Work out address and page range required */ if (len == 0) @@ -100,15 +100,19 @@ static int process_vm_rw_single_vec(unsigned long addr, while (!rc && nr_pages && iov_iter_count(iter)) { int pages = min(nr_pages, max_pages_per_loop); + int locked = 1; size_t bytes; /* * Get the pages we're interested in. We must - * add FOLL_REMOTE because task/mm might not + * access remotely because task/mm might not * current/current->mm */ - pages = __get_user_pages_unlocked(task, mm, pa, pages, - process_pages, flags); + down_read(&mm->mmap_sem); + pages = get_user_pages_remote(task, mm, pa, pages, flags, + process_pages, NULL, &locked); + if (locked) + up_read(&mm->mmap_sem); if (pages <= 0) return -EFAULT; diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index efeceb0a222d..3815e940fbea 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -76,16 +76,20 @@ static void async_pf_execute(struct work_struct *work) struct kvm_vcpu *vcpu = apf->vcpu; unsigned long addr = apf->addr; gva_t gva = apf->gva; + int locked = 1; might_sleep(); /* * This work is run asynchromously to the task which owns * mm and might be done in another context, so we must - * use FOLL_REMOTE. + * access remotely. */ - __get_user_pages_unlocked(NULL, mm, addr, 1, NULL, - FOLL_WRITE | FOLL_REMOTE); + down_read(&mm->mmap_sem); + get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL, + &locked); + if (locked) + up_read(&mm->mmap_sem); kvm_async_page_present_sync(vcpu, apf); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 823544c166be..de102cae7125 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1418,13 +1418,12 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, npages = get_user_page_nowait(addr, write_fault, page); up_read(¤t->mm->mmap_sem); } else { - unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON; + unsigned int flags = FOLL_HWPOISON; if (write_fault) flags |= FOLL_WRITE; - npages = __get_user_pages_unlocked(current, current->mm, addr, 1, - page, flags); + npages = get_user_pages_unlocked(addr, 1, page, flags); } if (npages != 1) return npages; -- cgit v1.2.3 From 82b0f8c39a3869b6fd2a10e180a862248736ec6f Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:06:58 -0800 Subject: mm: join struct fault_env and vm_fault Currently we have two different structures for passing fault information around - struct vm_fault and struct fault_env. DAX will need more information in struct vm_fault to handle its faults so the content of that structure would become event closer to fault_env. Furthermore it would need to generate struct fault_env to be able to call some of the generic functions. So at this point I don't think there's much use in keeping these two structures separate. Just embed into struct vm_fault all that is needed to use it for both purposes. Link: http://lkml.kernel.org/r/1479460644-25076-2-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Acked-by: Kirill A. Shutemov Cc: Ross Zwisler Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/Locking | 2 +- fs/userfaultfd.c | 22 +- include/linux/huge_mm.h | 10 +- include/linux/mm.h | 28 +- include/linux/userfaultfd_k.h | 4 +- mm/filemap.c | 14 +- mm/huge_memory.c | 173 ++++++------ mm/internal.h | 2 +- mm/khugepaged.c | 20 +- mm/memory.c | 568 +++++++++++++++++++------------------- mm/nommu.c | 2 +- 11 files changed, 423 insertions(+), 422 deletions(-) diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 1b5f15653b1b..69e2387ca278 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -556,7 +556,7 @@ till "end_pgoff". ->map_pages() is called with page table locked and must not block. If it's not possible to reach a page without blocking, filesystem should skip it. Filesystem should use do_set_pte() to setup page table entry. Pointer to entry associated with the page is passed in -"pte" field in fault_env structure. Pointers to entries for other offsets +"pte" field in vm_fault structure. Pointers to entries for other offsets should be calculated relative to "pte". ->page_mkwrite() is called when a previously read-only pte is diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 85959d8324df..d96e2f30084b 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -257,9 +257,9 @@ out: * fatal_signal_pending()s, and the mmap_sem must be released before * returning it. */ -int handle_userfault(struct fault_env *fe, unsigned long reason) +int handle_userfault(struct vm_fault *vmf, unsigned long reason) { - struct mm_struct *mm = fe->vma->vm_mm; + struct mm_struct *mm = vmf->vma->vm_mm; struct userfaultfd_ctx *ctx; struct userfaultfd_wait_queue uwq; int ret; @@ -268,7 +268,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); ret = VM_FAULT_SIGBUS; - ctx = fe->vma->vm_userfaultfd_ctx.ctx; + ctx = vmf->vma->vm_userfaultfd_ctx.ctx; if (!ctx) goto out; @@ -301,17 +301,18 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) * without first stopping userland access to the memory. For * VM_UFFD_MISSING userfaults this is enough for now. */ - if (unlikely(!(fe->flags & FAULT_FLAG_ALLOW_RETRY))) { + if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) { /* * Validate the invariant that nowait must allow retry * to be sure not to return SIGBUS erroneously on * nowait invocations. */ - BUG_ON(fe->flags & FAULT_FLAG_RETRY_NOWAIT); + BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT); #ifdef CONFIG_DEBUG_VM if (printk_ratelimit()) { printk(KERN_WARNING - "FAULT_FLAG_ALLOW_RETRY missing %x\n", fe->flags); + "FAULT_FLAG_ALLOW_RETRY missing %x\n", + vmf->flags); dump_stack(); } #endif @@ -323,7 +324,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) * and wait. */ ret = VM_FAULT_RETRY; - if (fe->flags & FAULT_FLAG_RETRY_NOWAIT) + if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) goto out; /* take the reference before dropping the mmap_sem */ @@ -331,11 +332,11 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); uwq.wq.private = current; - uwq.msg = userfault_msg(fe->address, fe->flags, reason); + uwq.msg = userfault_msg(vmf->address, vmf->flags, reason); uwq.ctx = ctx; return_to_userland = - (fe->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == + (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); spin_lock(&ctx->fault_pending_wqh.lock); @@ -353,7 +354,8 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) TASK_KILLABLE); spin_unlock(&ctx->fault_pending_wqh.lock); - must_wait = userfaultfd_must_wait(ctx, fe->address, fe->flags, reason); + must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, + reason); up_read(&mm->mmap_sem); if (likely(must_wait && !ACCESS_ONCE(ctx->released) && diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 1f782aa1d8e6..97e478d6b690 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -1,12 +1,12 @@ #ifndef _LINUX_HUGE_MM_H #define _LINUX_HUGE_MM_H -extern int do_huge_pmd_anonymous_page(struct fault_env *fe); +extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf); extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *vma); -extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd); -extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd); +extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); +extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, @@ -142,7 +142,7 @@ static inline int hpage_nr_pages(struct page *page) return 1; } -extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd); +extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); extern struct page *huge_zero_page; @@ -212,7 +212,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, return NULL; } -static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd) +static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd) { return 0; } diff --git a/include/linux/mm.h b/include/linux/mm.h index 7b2d14ed3815..de5bcead2511 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -292,10 +292,16 @@ extern pgprot_t protection_map[16]; * pgoff should be used in favour of virtual_address, if possible. */ struct vm_fault { + struct vm_area_struct *vma; /* Target VMA */ unsigned int flags; /* FAULT_FLAG_xxx flags */ gfp_t gfp_mask; /* gfp mask to be used for allocations */ pgoff_t pgoff; /* Logical page offset based on vma */ - void __user *virtual_address; /* Faulting virtual address */ + unsigned long address; /* Faulting virtual address */ + void __user *virtual_address; /* Faulting virtual address masked by + * PAGE_MASK */ + pmd_t *pmd; /* Pointer to pmd entry matching + * the 'address' + */ struct page *cow_page; /* Handler may choose to COW */ struct page *page; /* ->fault handlers should return a @@ -309,19 +315,7 @@ struct vm_fault { * VM_FAULT_DAX_LOCKED and fill in * entry here. */ -}; - -/* - * Page fault context: passes though page fault handler instead of endless list - * of function arguments. - */ -struct fault_env { - struct vm_area_struct *vma; /* Target VMA */ - unsigned long address; /* Faulting virtual address */ - unsigned int flags; /* FAULT_FLAG_xxx flags */ - pmd_t *pmd; /* Pointer to pmd entry matching - * the 'address' - */ + /* These three entries are valid only while holding ptl lock */ pte_t *pte; /* Pointer to pte entry matching * the 'address'. NULL if the page * table hasn't been allocated. @@ -351,7 +345,7 @@ struct vm_operations_struct { int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); int (*pmd_fault)(struct vm_area_struct *, unsigned long address, pmd_t *, unsigned int flags); - void (*map_pages)(struct fault_env *fe, + void (*map_pages)(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); /* notification that a previously read-only page is about to become @@ -625,7 +619,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) return pte; } -int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, +int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, struct page *page); #endif @@ -2094,7 +2088,7 @@ extern void truncate_inode_pages_final(struct address_space *); /* generic vm_area_ops exported for stackable file systems */ extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); -extern void filemap_map_pages(struct fault_env *fe, +extern void filemap_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index dd66a952e8cd..11b92b047a1e 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -27,7 +27,7 @@ #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS) -extern int handle_userfault(struct fault_env *fe, unsigned long reason); +extern int handle_userfault(struct vm_fault *vmf, unsigned long reason); extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len); @@ -55,7 +55,7 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) #else /* CONFIG_USERFAULTFD */ /* mm helpers */ -static inline int handle_userfault(struct fault_env *fe, unsigned long reason) +static inline int handle_userfault(struct vm_fault *vmf, unsigned long reason) { return VM_FAULT_SIGBUS; } diff --git a/mm/filemap.c b/mm/filemap.c index 69568388c699..235021e361eb 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2164,12 +2164,12 @@ page_not_uptodate: } EXPORT_SYMBOL(filemap_fault); -void filemap_map_pages(struct fault_env *fe, +void filemap_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff) { struct radix_tree_iter iter; void **slot; - struct file *file = fe->vma->vm_file; + struct file *file = vmf->vma->vm_file; struct address_space *mapping = file->f_mapping; pgoff_t last_pgoff = start_pgoff; loff_t size; @@ -2225,11 +2225,11 @@ repeat: if (file->f_ra.mmap_miss > 0) file->f_ra.mmap_miss--; - fe->address += (iter.index - last_pgoff) << PAGE_SHIFT; - if (fe->pte) - fe->pte += iter.index - last_pgoff; + vmf->address += (iter.index - last_pgoff) << PAGE_SHIFT; + if (vmf->pte) + vmf->pte += iter.index - last_pgoff; last_pgoff = iter.index; - if (alloc_set_pte(fe, NULL, page)) + if (alloc_set_pte(vmf, NULL, page)) goto unlock; unlock_page(page); goto next; @@ -2239,7 +2239,7 @@ skip: put_page(page); next: /* Huge page is mapped? No need to proceed. */ - if (pmd_trans_huge(*fe->pmd)) + if (pmd_trans_huge(*vmf->pmd)) break; if (iter.index == end_pgoff) break; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index cee42cf05477..10eedbf14421 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -542,13 +542,13 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, } EXPORT_SYMBOL_GPL(thp_get_unmapped_area); -static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page, +static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page, gfp_t gfp) { - struct vm_area_struct *vma = fe->vma; + struct vm_area_struct *vma = vmf->vma; struct mem_cgroup *memcg; pgtable_t pgtable; - unsigned long haddr = fe->address & HPAGE_PMD_MASK; + unsigned long haddr = vmf->address & HPAGE_PMD_MASK; VM_BUG_ON_PAGE(!PageCompound(page), page); @@ -573,9 +573,9 @@ static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page, */ __SetPageUptodate(page); - fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); - if (unlikely(!pmd_none(*fe->pmd))) { - spin_unlock(fe->ptl); + vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); + if (unlikely(!pmd_none(*vmf->pmd))) { + spin_unlock(vmf->ptl); mem_cgroup_cancel_charge(page, memcg, true); put_page(page); pte_free(vma->vm_mm, pgtable); @@ -586,11 +586,11 @@ static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page, if (userfaultfd_missing(vma)) { int ret; - spin_unlock(fe->ptl); + spin_unlock(vmf->ptl); mem_cgroup_cancel_charge(page, memcg, true); put_page(page); pte_free(vma->vm_mm, pgtable); - ret = handle_userfault(fe, VM_UFFD_MISSING); + ret = handle_userfault(vmf, VM_UFFD_MISSING); VM_BUG_ON(ret & VM_FAULT_FALLBACK); return ret; } @@ -600,11 +600,11 @@ static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page, page_add_new_anon_rmap(page, vma, haddr, true); mem_cgroup_commit_charge(page, memcg, false, true); lru_cache_add_active_or_unevictable(page, vma); - pgtable_trans_huge_deposit(vma->vm_mm, fe->pmd, pgtable); - set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry); + pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); + set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); atomic_long_inc(&vma->vm_mm->nr_ptes); - spin_unlock(fe->ptl); + spin_unlock(vmf->ptl); count_vm_event(THP_FAULT_ALLOC); } @@ -651,12 +651,12 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, return true; } -int do_huge_pmd_anonymous_page(struct fault_env *fe) +int do_huge_pmd_anonymous_page(struct vm_fault *vmf) { - struct vm_area_struct *vma = fe->vma; + struct vm_area_struct *vma = vmf->vma; gfp_t gfp; struct page *page; - unsigned long haddr = fe->address & HPAGE_PMD_MASK; + unsigned long haddr = vmf->address & HPAGE_PMD_MASK; if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) return VM_FAULT_FALLBACK; @@ -664,7 +664,7 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe) return VM_FAULT_OOM; if (unlikely(khugepaged_enter(vma, vma->vm_flags))) return VM_FAULT_OOM; - if (!(fe->flags & FAULT_FLAG_WRITE) && + if (!(vmf->flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(vma->vm_mm) && transparent_hugepage_use_zero_page()) { pgtable_t pgtable; @@ -680,22 +680,22 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe) count_vm_event(THP_FAULT_FALLBACK); return VM_FAULT_FALLBACK; } - fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); + vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); ret = 0; set = false; - if (pmd_none(*fe->pmd)) { + if (pmd_none(*vmf->pmd)) { if (userfaultfd_missing(vma)) { - spin_unlock(fe->ptl); - ret = handle_userfault(fe, VM_UFFD_MISSING); + spin_unlock(vmf->ptl); + ret = handle_userfault(vmf, VM_UFFD_MISSING); VM_BUG_ON(ret & VM_FAULT_FALLBACK); } else { set_huge_zero_page(pgtable, vma->vm_mm, vma, - haddr, fe->pmd, zero_page); - spin_unlock(fe->ptl); + haddr, vmf->pmd, zero_page); + spin_unlock(vmf->ptl); set = true; } } else - spin_unlock(fe->ptl); + spin_unlock(vmf->ptl); if (!set) pte_free(vma->vm_mm, pgtable); return ret; @@ -707,7 +707,7 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe) return VM_FAULT_FALLBACK; } prep_transhuge_page(page); - return __do_huge_pmd_anonymous_page(fe, page, gfp); + return __do_huge_pmd_anonymous_page(vmf, page, gfp); } static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, @@ -879,30 +879,30 @@ out: return ret; } -void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd) +void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd) { pmd_t entry; unsigned long haddr; - fe->ptl = pmd_lock(fe->vma->vm_mm, fe->pmd); - if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) + vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); + if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) goto unlock; entry = pmd_mkyoung(orig_pmd); - haddr = fe->address & HPAGE_PMD_MASK; - if (pmdp_set_access_flags(fe->vma, haddr, fe->pmd, entry, - fe->flags & FAULT_FLAG_WRITE)) - update_mmu_cache_pmd(fe->vma, fe->address, fe->pmd); + haddr = vmf->address & HPAGE_PMD_MASK; + if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, + vmf->flags & FAULT_FLAG_WRITE)) + update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); unlock: - spin_unlock(fe->ptl); + spin_unlock(vmf->ptl); } -static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd, +static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd, struct page *page) { - struct vm_area_struct *vma = fe->vma; - unsigned long haddr = fe->address & HPAGE_PMD_MASK; + struct vm_area_struct *vma = vmf->vma; + unsigned long haddr = vmf->address & HPAGE_PMD_MASK; struct mem_cgroup *memcg; pgtable_t pgtable; pmd_t _pmd; @@ -921,7 +921,7 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd, for (i = 0; i < HPAGE_PMD_NR; i++) { pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | __GFP_OTHER_NODE, vma, - fe->address, page_to_nid(page)); + vmf->address, page_to_nid(page)); if (unlikely(!pages[i] || mem_cgroup_try_charge(pages[i], vma->vm_mm, GFP_KERNEL, &memcg, false))) { @@ -952,15 +952,15 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd, mmun_end = haddr + HPAGE_PMD_SIZE; mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); - fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); - if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) + vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); + if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) goto out_free_pages; VM_BUG_ON_PAGE(!PageHead(page), page); - pmdp_huge_clear_flush_notify(vma, haddr, fe->pmd); + pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); /* leave pmd empty until pte is filled */ - pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, fe->pmd); + pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd); pmd_populate(vma->vm_mm, &_pmd, pgtable); for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { @@ -969,20 +969,20 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd, entry = maybe_mkwrite(pte_mkdirty(entry), vma); memcg = (void *)page_private(pages[i]); set_page_private(pages[i], 0); - page_add_new_anon_rmap(pages[i], fe->vma, haddr, false); + page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false); mem_cgroup_commit_charge(pages[i], memcg, false, false); lru_cache_add_active_or_unevictable(pages[i], vma); - fe->pte = pte_offset_map(&_pmd, haddr); - VM_BUG_ON(!pte_none(*fe->pte)); - set_pte_at(vma->vm_mm, haddr, fe->pte, entry); - pte_unmap(fe->pte); + vmf->pte = pte_offset_map(&_pmd, haddr); + VM_BUG_ON(!pte_none(*vmf->pte)); + set_pte_at(vma->vm_mm, haddr, vmf->pte, entry); + pte_unmap(vmf->pte); } kfree(pages); smp_wmb(); /* make pte visible before pmd */ - pmd_populate(vma->vm_mm, fe->pmd, pgtable); + pmd_populate(vma->vm_mm, vmf->pmd, pgtable); page_remove_rmap(page, true); - spin_unlock(fe->ptl); + spin_unlock(vmf->ptl); mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); @@ -993,7 +993,7 @@ out: return ret; out_free_pages: - spin_unlock(fe->ptl); + spin_unlock(vmf->ptl); mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); for (i = 0; i < HPAGE_PMD_NR; i++) { memcg = (void *)page_private(pages[i]); @@ -1005,23 +1005,23 @@ out_free_pages: goto out; } -int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd) +int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) { - struct vm_area_struct *vma = fe->vma; + struct vm_area_struct *vma = vmf->vma; struct page *page = NULL, *new_page; struct mem_cgroup *memcg; - unsigned long haddr = fe->address & HPAGE_PMD_MASK; + unsigned long haddr = vmf->address & HPAGE_PMD_MASK; unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_end; /* For mmu_notifiers */ gfp_t huge_gfp; /* for allocation and charge */ int ret = 0; - fe->ptl = pmd_lockptr(vma->vm_mm, fe->pmd); + vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); VM_BUG_ON_VMA(!vma->anon_vma, vma); if (is_huge_zero_pmd(orig_pmd)) goto alloc; - spin_lock(fe->ptl); - if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) + spin_lock(vmf->ptl); + if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) goto out_unlock; page = pmd_page(orig_pmd); @@ -1034,13 +1034,13 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd) pmd_t entry; entry = pmd_mkyoung(orig_pmd); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); - if (pmdp_set_access_flags(vma, haddr, fe->pmd, entry, 1)) - update_mmu_cache_pmd(vma, fe->address, fe->pmd); + if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) + update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); ret |= VM_FAULT_WRITE; goto out_unlock; } get_page(page); - spin_unlock(fe->ptl); + spin_unlock(vmf->ptl); alloc: if (transparent_hugepage_enabled(vma) && !transparent_hugepage_debug_cow()) { @@ -1053,12 +1053,12 @@ alloc: prep_transhuge_page(new_page); } else { if (!page) { - split_huge_pmd(vma, fe->pmd, fe->address); + split_huge_pmd(vma, vmf->pmd, vmf->address); ret |= VM_FAULT_FALLBACK; } else { - ret = do_huge_pmd_wp_page_fallback(fe, orig_pmd, page); + ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page); if (ret & VM_FAULT_OOM) { - split_huge_pmd(vma, fe->pmd, fe->address); + split_huge_pmd(vma, vmf->pmd, vmf->address); ret |= VM_FAULT_FALLBACK; } put_page(page); @@ -1070,7 +1070,7 @@ alloc: if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm, huge_gfp, &memcg, true))) { put_page(new_page); - split_huge_pmd(vma, fe->pmd, fe->address); + split_huge_pmd(vma, vmf->pmd, vmf->address); if (page) put_page(page); ret |= VM_FAULT_FALLBACK; @@ -1090,11 +1090,11 @@ alloc: mmun_end = haddr + HPAGE_PMD_SIZE; mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); - spin_lock(fe->ptl); + spin_lock(vmf->ptl); if (page) put_page(page); - if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) { - spin_unlock(fe->ptl); + if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { + spin_unlock(vmf->ptl); mem_cgroup_cancel_charge(new_page, memcg, true); put_page(new_page); goto out_mn; @@ -1102,12 +1102,12 @@ alloc: pmd_t entry; entry = mk_huge_pmd(new_page, vma->vm_page_prot); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); - pmdp_huge_clear_flush_notify(vma, haddr, fe->pmd); + pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); page_add_new_anon_rmap(new_page, vma, haddr, true); mem_cgroup_commit_charge(new_page, memcg, false, true); lru_cache_add_active_or_unevictable(new_page, vma); - set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry); - update_mmu_cache_pmd(vma, fe->address, fe->pmd); + set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); + update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); if (!page) { add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); } else { @@ -1117,13 +1117,13 @@ alloc: } ret |= VM_FAULT_WRITE; } - spin_unlock(fe->ptl); + spin_unlock(vmf->ptl); out_mn: mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); out: return ret; out_unlock: - spin_unlock(fe->ptl); + spin_unlock(vmf->ptl); return ret; } @@ -1196,12 +1196,12 @@ out: } /* NUMA hinting page fault entry point for trans huge pmds */ -int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) +int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) { - struct vm_area_struct *vma = fe->vma; + struct vm_area_struct *vma = vmf->vma; struct anon_vma *anon_vma = NULL; struct page *page; - unsigned long haddr = fe->address & HPAGE_PMD_MASK; + unsigned long haddr = vmf->address & HPAGE_PMD_MASK; int page_nid = -1, this_nid = numa_node_id(); int target_nid, last_cpupid = -1; bool page_locked; @@ -1209,8 +1209,8 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) bool was_writable; int flags = 0; - fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); - if (unlikely(!pmd_same(pmd, *fe->pmd))) + vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); + if (unlikely(!pmd_same(pmd, *vmf->pmd))) goto out_unlock; /* @@ -1218,9 +1218,9 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) * without disrupting NUMA hinting information. Do not relock and * check_same as the page may no longer be mapped. */ - if (unlikely(pmd_trans_migrating(*fe->pmd))) { - page = pmd_page(*fe->pmd); - spin_unlock(fe->ptl); + if (unlikely(pmd_trans_migrating(*vmf->pmd))) { + page = pmd_page(*vmf->pmd); + spin_unlock(vmf->ptl); wait_on_page_locked(page); goto out; } @@ -1253,7 +1253,7 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) /* Migration could have started since the pmd_trans_migrating check */ if (!page_locked) { - spin_unlock(fe->ptl); + spin_unlock(vmf->ptl); wait_on_page_locked(page); page_nid = -1; goto out; @@ -1264,12 +1264,12 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) * to serialises splits */ get_page(page); - spin_unlock(fe->ptl); + spin_unlock(vmf->ptl); anon_vma = page_lock_anon_vma_read(page); /* Confirm the PMD did not change while page_table_lock was released */ - spin_lock(fe->ptl); - if (unlikely(!pmd_same(pmd, *fe->pmd))) { + spin_lock(vmf->ptl); + if (unlikely(!pmd_same(pmd, *vmf->pmd))) { unlock_page(page); put_page(page); page_nid = -1; @@ -1287,9 +1287,9 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) * Migrate the THP to the requested node, returns with page unlocked * and access rights restored. */ - spin_unlock(fe->ptl); + spin_unlock(vmf->ptl); migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, - fe->pmd, pmd, fe->address, page, target_nid); + vmf->pmd, pmd, vmf->address, page, target_nid); if (migrated) { flags |= TNF_MIGRATED; page_nid = target_nid; @@ -1304,18 +1304,19 @@ clear_pmdnuma: pmd = pmd_mkyoung(pmd); if (was_writable) pmd = pmd_mkwrite(pmd); - set_pmd_at(vma->vm_mm, haddr, fe->pmd, pmd); - update_mmu_cache_pmd(vma, fe->address, fe->pmd); + set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); + update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); unlock_page(page); out_unlock: - spin_unlock(fe->ptl); + spin_unlock(vmf->ptl); out: if (anon_vma) page_unlock_anon_vma_read(anon_vma); if (page_nid != -1) - task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, fe->flags); + task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, + vmf->flags); return 0; } diff --git a/mm/internal.h b/mm/internal.h index 537ac9951f5f..093b1eacc91b 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -36,7 +36,7 @@ /* Do not use these with a slab allocator */ #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) -int do_swap_page(struct fault_env *fe, pte_t orig_pte); +int do_swap_page(struct vm_fault *vmf, pte_t orig_pte); void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 09460955e818..d950c2509161 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -877,7 +877,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, { pte_t pteval; int swapped_in = 0, ret = 0; - struct fault_env fe = { + struct vm_fault vmf = { .vma = vma, .address = address, .flags = FAULT_FLAG_ALLOW_RETRY, @@ -889,19 +889,19 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); return false; } - fe.pte = pte_offset_map(pmd, address); - for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE; - fe.pte++, fe.address += PAGE_SIZE) { - pteval = *fe.pte; + vmf.pte = pte_offset_map(pmd, address); + for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE; + vmf.pte++, vmf.address += PAGE_SIZE) { + pteval = *vmf.pte; if (!is_swap_pte(pteval)) continue; swapped_in++; - ret = do_swap_page(&fe, pteval); + ret = do_swap_page(&vmf, pteval); /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ if (ret & VM_FAULT_RETRY) { down_read(&mm->mmap_sem); - if (hugepage_vma_revalidate(mm, address, &fe.vma)) { + if (hugepage_vma_revalidate(mm, address, &vmf.vma)) { /* vma is no longer available, don't continue to swapin */ trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); return false; @@ -915,10 +915,10 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, return false; } /* pte is unmapped now, we need to map it */ - fe.pte = pte_offset_map(pmd, fe.address); + vmf.pte = pte_offset_map(pmd, vmf.address); } - fe.pte--; - pte_unmap(fe.pte); + vmf.pte--; + pte_unmap(vmf.pte); trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1); return true; } diff --git a/mm/memory.c b/mm/memory.c index 3a6a1239c42b..512e1c359193 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2070,11 +2070,11 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, * case, all we need to do here is to mark the page as writable and update * any related book-keeping. */ -static inline int wp_page_reuse(struct fault_env *fe, pte_t orig_pte, +static inline int wp_page_reuse(struct vm_fault *vmf, pte_t orig_pte, struct page *page, int page_mkwrite, int dirty_shared) - __releases(fe->ptl) + __releases(vmf->ptl) { - struct vm_area_struct *vma = fe->vma; + struct vm_area_struct *vma = vmf->vma; pte_t entry; /* * Clear the pages cpupid information as the existing @@ -2084,12 +2084,12 @@ static inline int wp_page_reuse(struct fault_env *fe, pte_t orig_pte, if (page) page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1); - flush_cache_page(vma, fe->address, pte_pfn(orig_pte)); + flush_cache_page(vma, vmf->address, pte_pfn(orig_pte)); entry = pte_mkyoung(orig_pte); entry = maybe_mkwrite(pte_mkdirty(entry), vma); - if (ptep_set_access_flags(vma, fe->address, fe->pte, entry, 1)) - update_mmu_cache(vma, fe->address, fe->pte); - pte_unmap_unlock(fe->pte, fe->ptl); + if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) + update_mmu_cache(vma, vmf->address, vmf->pte); + pte_unmap_unlock(vmf->pte, vmf->ptl); if (dirty_shared) { struct address_space *mapping; @@ -2135,15 +2135,15 @@ static inline int wp_page_reuse(struct fault_env *fe, pte_t orig_pte, * held to the old page, as well as updating the rmap. * - In any case, unlock the PTL and drop the reference we took to the old page. */ -static int wp_page_copy(struct fault_env *fe, pte_t orig_pte, +static int wp_page_copy(struct vm_fault *vmf, pte_t orig_pte, struct page *old_page) { - struct vm_area_struct *vma = fe->vma; + struct vm_area_struct *vma = vmf->vma; struct mm_struct *mm = vma->vm_mm; struct page *new_page = NULL; pte_t entry; int page_copied = 0; - const unsigned long mmun_start = fe->address & PAGE_MASK; + const unsigned long mmun_start = vmf->address & PAGE_MASK; const unsigned long mmun_end = mmun_start + PAGE_SIZE; struct mem_cgroup *memcg; @@ -2151,15 +2151,16 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte, goto oom; if (is_zero_pfn(pte_pfn(orig_pte))) { - new_page = alloc_zeroed_user_highpage_movable(vma, fe->address); + new_page = alloc_zeroed_user_highpage_movable(vma, + vmf->address); if (!new_page) goto oom; } else { new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, - fe->address); + vmf->address); if (!new_page) goto oom; - cow_user_page(new_page, old_page, fe->address, vma); + cow_user_page(new_page, old_page, vmf->address, vma); } if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) @@ -2172,8 +2173,8 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte, /* * Re-check the pte - we dropped the lock */ - fe->pte = pte_offset_map_lock(mm, fe->pmd, fe->address, &fe->ptl); - if (likely(pte_same(*fe->pte, orig_pte))) { + vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); + if (likely(pte_same(*vmf->pte, orig_pte))) { if (old_page) { if (!PageAnon(old_page)) { dec_mm_counter_fast(mm, @@ -2183,7 +2184,7 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte, } else { inc_mm_counter_fast(mm, MM_ANONPAGES); } - flush_cache_page(vma, fe->address, pte_pfn(orig_pte)); + flush_cache_page(vma, vmf->address, pte_pfn(orig_pte)); entry = mk_pte(new_page, vma->vm_page_prot); entry = maybe_mkwrite(pte_mkdirty(entry), vma); /* @@ -2192,8 +2193,8 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte, * seen in the presence of one thread doing SMC and another * thread doing COW. */ - ptep_clear_flush_notify(vma, fe->address, fe->pte); - page_add_new_anon_rmap(new_page, vma, fe->address, false); + ptep_clear_flush_notify(vma, vmf->address, vmf->pte); + page_add_new_anon_rmap(new_page, vma, vmf->address, false); mem_cgroup_commit_charge(new_page, memcg, false, false); lru_cache_add_active_or_unevictable(new_page, vma); /* @@ -2201,8 +2202,8 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte, * mmu page tables (such as kvm shadow page tables), we want the * new page to be mapped directly into the secondary page table. */ - set_pte_at_notify(mm, fe->address, fe->pte, entry); - update_mmu_cache(vma, fe->address, fe->pte); + set_pte_at_notify(mm, vmf->address, vmf->pte, entry); + update_mmu_cache(vma, vmf->address, vmf->pte); if (old_page) { /* * Only after switching the pte to the new page may @@ -2239,7 +2240,7 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte, if (new_page) put_page(new_page); - pte_unmap_unlock(fe->pte, fe->ptl); + pte_unmap_unlock(vmf->pte, vmf->ptl); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); if (old_page) { /* @@ -2267,43 +2268,43 @@ oom: * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED * mapping */ -static int wp_pfn_shared(struct fault_env *fe, pte_t orig_pte) +static int wp_pfn_shared(struct vm_fault *vmf, pte_t orig_pte) { - struct vm_area_struct *vma = fe->vma; + struct vm_area_struct *vma = vmf->vma; if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { - struct vm_fault vmf = { + struct vm_fault vmf2 = { .page = NULL, - .pgoff = linear_page_index(vma, fe->address), + .pgoff = linear_page_index(vma, vmf->address), .virtual_address = - (void __user *)(fe->address & PAGE_MASK), + (void __user *)(vmf->address & PAGE_MASK), .flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE, }; int ret; - pte_unmap_unlock(fe->pte, fe->ptl); - ret = vma->vm_ops->pfn_mkwrite(vma, &vmf); + pte_unmap_unlock(vmf->pte, vmf->ptl); + ret = vma->vm_ops->pfn_mkwrite(vma, &vmf2); if (ret & VM_FAULT_ERROR) return ret; - fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, - &fe->ptl); + vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); /* * We might have raced with another page fault while we * released the pte_offset_map_lock. */ - if (!pte_same(*fe->pte, orig_pte)) { - pte_unmap_unlock(fe->pte, fe->ptl); + if (!pte_same(*vmf->pte, orig_pte)) { + pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } } - return wp_page_reuse(fe, orig_pte, NULL, 0, 0); + return wp_page_reuse(vmf, orig_pte, NULL, 0, 0); } -static int wp_page_shared(struct fault_env *fe, pte_t orig_pte, +static int wp_page_shared(struct vm_fault *vmf, pte_t orig_pte, struct page *old_page) - __releases(fe->ptl) + __releases(vmf->ptl) { - struct vm_area_struct *vma = fe->vma; + struct vm_area_struct *vma = vmf->vma; int page_mkwrite = 0; get_page(old_page); @@ -2311,8 +2312,8 @@ static int wp_page_shared(struct fault_env *fe, pte_t orig_pte, if (vma->vm_ops && vma->vm_ops->page_mkwrite) { int tmp; - pte_unmap_unlock(fe->pte, fe->ptl); - tmp = do_page_mkwrite(vma, old_page, fe->address); + pte_unmap_unlock(vmf->pte, vmf->ptl); + tmp = do_page_mkwrite(vma, old_page, vmf->address); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { put_page(old_page); @@ -2324,18 +2325,18 @@ static int wp_page_shared(struct fault_env *fe, pte_t orig_pte, * they did, we just return, as we can count on the * MMU to tell us if they didn't also make it writable. */ - fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, - &fe->ptl); - if (!pte_same(*fe->pte, orig_pte)) { + vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); + if (!pte_same(*vmf->pte, orig_pte)) { unlock_page(old_page); - pte_unmap_unlock(fe->pte, fe->ptl); + pte_unmap_unlock(vmf->pte, vmf->ptl); put_page(old_page); return 0; } page_mkwrite = 1; } - return wp_page_reuse(fe, orig_pte, old_page, page_mkwrite, 1); + return wp_page_reuse(vmf, orig_pte, old_page, page_mkwrite, 1); } /* @@ -2356,13 +2357,13 @@ static int wp_page_shared(struct fault_env *fe, pte_t orig_pte, * but allow concurrent faults), with pte both mapped and locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ -static int do_wp_page(struct fault_env *fe, pte_t orig_pte) - __releases(fe->ptl) +static int do_wp_page(struct vm_fault *vmf, pte_t orig_pte) + __releases(vmf->ptl) { - struct vm_area_struct *vma = fe->vma; + struct vm_area_struct *vma = vmf->vma; struct page *old_page; - old_page = vm_normal_page(vma, fe->address, orig_pte); + old_page = vm_normal_page(vma, vmf->address, orig_pte); if (!old_page) { /* * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a @@ -2373,10 +2374,10 @@ static int do_wp_page(struct fault_env *fe, pte_t orig_pte) */ if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) - return wp_pfn_shared(fe, orig_pte); + return wp_pfn_shared(vmf, orig_pte); - pte_unmap_unlock(fe->pte, fe->ptl); - return wp_page_copy(fe, orig_pte, old_page); + pte_unmap_unlock(vmf->pte, vmf->ptl); + return wp_page_copy(vmf, orig_pte, old_page); } /* @@ -2387,13 +2388,13 @@ static int do_wp_page(struct fault_env *fe, pte_t orig_pte) int total_mapcount; if (!trylock_page(old_page)) { get_page(old_page); - pte_unmap_unlock(fe->pte, fe->ptl); + pte_unmap_unlock(vmf->pte, vmf->ptl); lock_page(old_page); - fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, - fe->address, &fe->ptl); - if (!pte_same(*fe->pte, orig_pte)) { + vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); + if (!pte_same(*vmf->pte, orig_pte)) { unlock_page(old_page); - pte_unmap_unlock(fe->pte, fe->ptl); + pte_unmap_unlock(vmf->pte, vmf->ptl); put_page(old_page); return 0; } @@ -2411,12 +2412,12 @@ static int do_wp_page(struct fault_env *fe, pte_t orig_pte) page_move_anon_rmap(old_page, vma); } unlock_page(old_page); - return wp_page_reuse(fe, orig_pte, old_page, 0, 0); + return wp_page_reuse(vmf, orig_pte, old_page, 0, 0); } unlock_page(old_page); } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { - return wp_page_shared(fe, orig_pte, old_page); + return wp_page_shared(vmf, orig_pte, old_page); } /* @@ -2424,8 +2425,8 @@ static int do_wp_page(struct fault_env *fe, pte_t orig_pte) */ get_page(old_page); - pte_unmap_unlock(fe->pte, fe->ptl); - return wp_page_copy(fe, orig_pte, old_page); + pte_unmap_unlock(vmf->pte, vmf->ptl); + return wp_page_copy(vmf, orig_pte, old_page); } static void unmap_mapping_range_vma(struct vm_area_struct *vma, @@ -2513,9 +2514,9 @@ EXPORT_SYMBOL(unmap_mapping_range); * We return with the mmap_sem locked or unlocked in the same cases * as does filemap_fault(). */ -int do_swap_page(struct fault_env *fe, pte_t orig_pte) +int do_swap_page(struct vm_fault *vmf, pte_t orig_pte) { - struct vm_area_struct *vma = fe->vma; + struct vm_area_struct *vma = vmf->vma; struct page *page, *swapcache; struct mem_cgroup *memcg; swp_entry_t entry; @@ -2524,17 +2525,18 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte) int exclusive = 0; int ret = 0; - if (!pte_unmap_same(vma->vm_mm, fe->pmd, fe->pte, orig_pte)) + if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, orig_pte)) goto out; entry = pte_to_swp_entry(orig_pte); if (unlikely(non_swap_entry(entry))) { if (is_migration_entry(entry)) { - migration_entry_wait(vma->vm_mm, fe->pmd, fe->address); + migration_entry_wait(vma->vm_mm, vmf->pmd, + vmf->address); } else if (is_hwpoison_entry(entry)) { ret = VM_FAULT_HWPOISON; } else { - print_bad_pte(vma, fe->address, orig_pte, NULL); + print_bad_pte(vma, vmf->address, orig_pte, NULL); ret = VM_FAULT_SIGBUS; } goto out; @@ -2542,16 +2544,16 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte) delayacct_set_flag(DELAYACCT_PF_SWAPIN); page = lookup_swap_cache(entry); if (!page) { - page = swapin_readahead(entry, - GFP_HIGHUSER_MOVABLE, vma, fe->address); + page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, vma, + vmf->address); if (!page) { /* * Back out if somebody else faulted in this pte * while we released the pte lock. */ - fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, - fe->address, &fe->ptl); - if (likely(pte_same(*fe->pte, orig_pte))) + vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); + if (likely(pte_same(*vmf->pte, orig_pte))) ret = VM_FAULT_OOM; delayacct_clear_flag(DELAYACCT_PF_SWAPIN); goto unlock; @@ -2573,7 +2575,7 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte) } swapcache = page; - locked = lock_page_or_retry(page, vma->vm_mm, fe->flags); + locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags); delayacct_clear_flag(DELAYACCT_PF_SWAPIN); if (!locked) { @@ -2590,7 +2592,7 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte) if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val)) goto out_page; - page = ksm_might_need_to_copy(page, vma, fe->address); + page = ksm_might_need_to_copy(page, vma, vmf->address); if (unlikely(!page)) { ret = VM_FAULT_OOM; page = swapcache; @@ -2606,9 +2608,9 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte) /* * Back out if somebody else already faulted in this pte. */ - fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, - &fe->ptl); - if (unlikely(!pte_same(*fe->pte, orig_pte))) + vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, + &vmf->ptl); + if (unlikely(!pte_same(*vmf->pte, orig_pte))) goto out_nomap; if (unlikely(!PageUptodate(page))) { @@ -2629,22 +2631,22 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte) inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); pte = mk_pte(page, vma->vm_page_prot); - if ((fe->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) { + if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) { pte = maybe_mkwrite(pte_mkdirty(pte), vma); - fe->flags &= ~FAULT_FLAG_WRITE; + vmf->flags &= ~FAULT_FLAG_WRITE; ret |= VM_FAULT_WRITE; exclusive = RMAP_EXCLUSIVE; } flush_icache_page(vma, page); if (pte_swp_soft_dirty(orig_pte)) pte = pte_mksoft_dirty(pte); - set_pte_at(vma->vm_mm, fe->address, fe->pte, pte); + set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); if (page == swapcache) { - do_page_add_anon_rmap(page, vma, fe->address, exclusive); + do_page_add_anon_rmap(page, vma, vmf->address, exclusive); mem_cgroup_commit_charge(page, memcg, true, false); activate_page(page); } else { /* ksm created a completely new copy */ - page_add_new_anon_rmap(page, vma, fe->address, false); + page_add_new_anon_rmap(page, vma, vmf->address, false); mem_cgroup_commit_charge(page, memcg, false, false); lru_cache_add_active_or_unevictable(page, vma); } @@ -2667,22 +2669,22 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte) put_page(swapcache); } - if (fe->flags & FAULT_FLAG_WRITE) { - ret |= do_wp_page(fe, pte); + if (vmf->flags & FAULT_FLAG_WRITE) { + ret |= do_wp_page(vmf, pte); if (ret & VM_FAULT_ERROR) ret &= VM_FAULT_ERROR; goto out; } /* No need to invalidate - it was non-present before */ - update_mmu_cache(vma, fe->address, fe->pte); + update_mmu_cache(vma, vmf->address, vmf->pte); unlock: - pte_unmap_unlock(fe->pte, fe->ptl); + pte_unmap_unlock(vmf->pte, vmf->ptl); out: return ret; out_nomap: mem_cgroup_cancel_charge(page, memcg, false); - pte_unmap_unlock(fe->pte, fe->ptl); + pte_unmap_unlock(vmf->pte, vmf->ptl); out_page: unlock_page(page); out_release: @@ -2733,9 +2735,9 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ -static int do_anonymous_page(struct fault_env *fe) +static int do_anonymous_page(struct vm_fault *vmf) { - struct vm_area_struct *vma = fe->vma; + struct vm_area_struct *vma = vmf->vma; struct mem_cgroup *memcg; struct page *page; pte_t entry; @@ -2745,7 +2747,7 @@ static int do_anonymous_page(struct fault_env *fe) return VM_FAULT_SIGBUS; /* Check if we need to add a guard page to the stack */ - if (check_stack_guard_page(vma, fe->address) < 0) + if (check_stack_guard_page(vma, vmf->address) < 0) return VM_FAULT_SIGSEGV; /* @@ -2758,26 +2760,26 @@ static int do_anonymous_page(struct fault_env *fe) * * Here we only have down_read(mmap_sem). */ - if (pte_alloc(vma->vm_mm, fe->pmd, fe->address)) + if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address)) return VM_FAULT_OOM; /* See the comment in pte_alloc_one_map() */ - if (unlikely(pmd_trans_unstable(fe->pmd))) + if (unlikely(pmd_trans_unstable(vmf->pmd))) return 0; /* Use the zero-page for reads */ - if (!(fe->flags & FAULT_FLAG_WRITE) && + if (!(vmf->flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(vma->vm_mm)) { - entry = pte_mkspecial(pfn_pte(my_zero_pfn(fe->address), + entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), vma->vm_page_prot)); - fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, - &fe->ptl); - if (!pte_none(*fe->pte)) + vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); + if (!pte_none(*vmf->pte)) goto unlock; /* Deliver the page fault to userland, check inside PT lock */ if (userfaultfd_missing(vma)) { - pte_unmap_unlock(fe->pte, fe->ptl); - return handle_userfault(fe, VM_UFFD_MISSING); + pte_unmap_unlock(vmf->pte, vmf->ptl); + return handle_userfault(vmf, VM_UFFD_MISSING); } goto setpte; } @@ -2785,7 +2787,7 @@ static int do_anonymous_page(struct fault_env *fe) /* Allocate our own private page. */ if (unlikely(anon_vma_prepare(vma))) goto oom; - page = alloc_zeroed_user_highpage_movable(vma, fe->address); + page = alloc_zeroed_user_highpage_movable(vma, vmf->address); if (!page) goto oom; @@ -2803,30 +2805,30 @@ static int do_anonymous_page(struct fault_env *fe) if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry)); - fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, - &fe->ptl); - if (!pte_none(*fe->pte)) + vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, + &vmf->ptl); + if (!pte_none(*vmf->pte)) goto release; /* Deliver the page fault to userland, check inside PT lock */ if (userfaultfd_missing(vma)) { - pte_unmap_unlock(fe->pte, fe->ptl); + pte_unmap_unlock(vmf->pte, vmf->ptl); mem_cgroup_cancel_charge(page, memcg, false); put_page(page); - return handle_userfault(fe, VM_UFFD_MISSING); + return handle_userfault(vmf, VM_UFFD_MISSING); } inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); - page_add_new_anon_rmap(page, vma, fe->address, false); + page_add_new_anon_rmap(page, vma, vmf->address, false); mem_cgroup_commit_charge(page, memcg, false, false); lru_cache_add_active_or_unevictable(page, vma); setpte: - set_pte_at(vma->vm_mm, fe->address, fe->pte, entry); + set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); /* No need to invalidate - it was non-present before */ - update_mmu_cache(vma, fe->address, fe->pte); + update_mmu_cache(vma, vmf->address, vmf->pte); unlock: - pte_unmap_unlock(fe->pte, fe->ptl); + pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; release: mem_cgroup_cancel_charge(page, memcg, false); @@ -2843,62 +2845,62 @@ oom: * released depending on flags and vma->vm_ops->fault() return value. * See filemap_fault() and __lock_page_retry(). */ -static int __do_fault(struct fault_env *fe, pgoff_t pgoff, +static int __do_fault(struct vm_fault *vmf, pgoff_t pgoff, struct page *cow_page, struct page **page, void **entry) { - struct vm_area_struct *vma = fe->vma; - struct vm_fault vmf; + struct vm_area_struct *vma = vmf->vma; + struct vm_fault vmf2; int ret; - vmf.virtual_address = (void __user *)(fe->address & PAGE_MASK); - vmf.pgoff = pgoff; - vmf.flags = fe->flags; - vmf.page = NULL; - vmf.gfp_mask = __get_fault_gfp_mask(vma); - vmf.cow_page = cow_page; + vmf2.virtual_address = (void __user *)(vmf->address & PAGE_MASK); + vmf2.pgoff = pgoff; + vmf2.flags = vmf->flags; + vmf2.page = NULL; + vmf2.gfp_mask = __get_fault_gfp_mask(vma); + vmf2.cow_page = cow_page; - ret = vma->vm_ops->fault(vma, &vmf); + ret = vma->vm_ops->fault(vma, &vmf2); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; if (ret & VM_FAULT_DAX_LOCKED) { - *entry = vmf.entry; + *entry = vmf2.entry; return ret; } - if (unlikely(PageHWPoison(vmf.page))) { + if (unlikely(PageHWPoison(vmf2.page))) { if (ret & VM_FAULT_LOCKED) - unlock_page(vmf.page); - put_page(vmf.page); + unlock_page(vmf2.page); + put_page(vmf2.page); return VM_FAULT_HWPOISON; } if (unlikely(!(ret & VM_FAULT_LOCKED))) - lock_page(vmf.page); + lock_page(vmf2.page); else - VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page); + VM_BUG_ON_PAGE(!PageLocked(vmf2.page), vmf2.page); - *page = vmf.page; + *page = vmf2.page; return ret; } -static int pte_alloc_one_map(struct fault_env *fe) +static int pte_alloc_one_map(struct vm_fault *vmf) { - struct vm_area_struct *vma = fe->vma; + struct vm_area_struct *vma = vmf->vma; - if (!pmd_none(*fe->pmd)) + if (!pmd_none(*vmf->pmd)) goto map_pte; - if (fe->prealloc_pte) { - fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); - if (unlikely(!pmd_none(*fe->pmd))) { - spin_unlock(fe->ptl); + if (vmf->prealloc_pte) { + vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); + if (unlikely(!pmd_none(*vmf->pmd))) { + spin_unlock(vmf->ptl); goto map_pte; } atomic_long_inc(&vma->vm_mm->nr_ptes); - pmd_populate(vma->vm_mm, fe->pmd, fe->prealloc_pte); - spin_unlock(fe->ptl); - fe->prealloc_pte = 0; - } else if (unlikely(pte_alloc(vma->vm_mm, fe->pmd, fe->address))) { + pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); + spin_unlock(vmf->ptl); + vmf->prealloc_pte = 0; + } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) { return VM_FAULT_OOM; } map_pte: @@ -2913,11 +2915,11 @@ map_pte: * through an atomic read in C, which is what pmd_trans_unstable() * provides. */ - if (pmd_trans_unstable(fe->pmd) || pmd_devmap(*fe->pmd)) + if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd)) return VM_FAULT_NOPAGE; - fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, - &fe->ptl); + vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, + &vmf->ptl); return 0; } @@ -2935,24 +2937,24 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, return true; } -static void deposit_prealloc_pte(struct fault_env *fe) +static void deposit_prealloc_pte(struct vm_fault *vmf) { - struct vm_area_struct *vma = fe->vma; + struct vm_area_struct *vma = vmf->vma; - pgtable_trans_huge_deposit(vma->vm_mm, fe->pmd, fe->prealloc_pte); + pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); /* * We are going to consume the prealloc table, * count that as nr_ptes. */ atomic_long_inc(&vma->vm_mm->nr_ptes); - fe->prealloc_pte = 0; + vmf->prealloc_pte = 0; } -static int do_set_pmd(struct fault_env *fe, struct page *page) +static int do_set_pmd(struct vm_fault *vmf, struct page *page) { - struct vm_area_struct *vma = fe->vma; - bool write = fe->flags & FAULT_FLAG_WRITE; - unsigned long haddr = fe->address & HPAGE_PMD_MASK; + struct vm_area_struct *vma = vmf->vma; + bool write = vmf->flags & FAULT_FLAG_WRITE; + unsigned long haddr = vmf->address & HPAGE_PMD_MASK; pmd_t entry; int i, ret; @@ -2966,15 +2968,15 @@ static int do_set_pmd(struct fault_env *fe, struct page *page) * Archs like ppc64 need additonal space to store information * related to pte entry. Use the preallocated table for that. */ - if (arch_needs_pgtable_deposit() && !fe->prealloc_pte) { - fe->prealloc_pte = pte_alloc_one(vma->vm_mm, fe->address); - if (!fe->prealloc_pte) + if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { + vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address); + if (!vmf->prealloc_pte) return VM_FAULT_OOM; smp_wmb(); /* See comment in __pte_alloc() */ } - fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); - if (unlikely(!pmd_none(*fe->pmd))) + vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); + if (unlikely(!pmd_none(*vmf->pmd))) goto out; for (i = 0; i < HPAGE_PMD_NR; i++) @@ -2990,11 +2992,11 @@ static int do_set_pmd(struct fault_env *fe, struct page *page) * deposit and withdraw with pmd lock held */ if (arch_needs_pgtable_deposit()) - deposit_prealloc_pte(fe); + deposit_prealloc_pte(vmf); - set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry); + set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); - update_mmu_cache_pmd(vma, haddr, fe->pmd); + update_mmu_cache_pmd(vma, haddr, vmf->pmd); /* fault is handled */ ret = 0; @@ -3005,13 +3007,13 @@ out: * withdraw with pmd lock held. */ if (arch_needs_pgtable_deposit() && ret == VM_FAULT_FALLBACK) - fe->prealloc_pte = pgtable_trans_huge_withdraw(vma->vm_mm, - fe->pmd); - spin_unlock(fe->ptl); + vmf->prealloc_pte = pgtable_trans_huge_withdraw(vma->vm_mm, + vmf->pmd); + spin_unlock(vmf->ptl); return ret; } #else -static int do_set_pmd(struct fault_env *fe, struct page *page) +static int do_set_pmd(struct vm_fault *vmf, struct page *page) { BUILD_BUG(); return 0; @@ -3022,41 +3024,42 @@ static int do_set_pmd(struct fault_env *fe, struct page *page) * alloc_set_pte - setup new PTE entry for given page and add reverse page * mapping. If needed, the fucntion allocates page table or use pre-allocated. * - * @fe: fault environment + * @vmf: fault environment * @memcg: memcg to charge page (only for private mappings) * @page: page to map * - * Caller must take care of unlocking fe->ptl, if fe->pte is non-NULL on return. + * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on + * return. * * Target users are page handler itself and implementations of * vm_ops->map_pages. */ -int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, +int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, struct page *page) { - struct vm_area_struct *vma = fe->vma; - bool write = fe->flags & FAULT_FLAG_WRITE; + struct vm_area_struct *vma = vmf->vma; + bool write = vmf->flags & FAULT_FLAG_WRITE; pte_t entry; int ret; - if (pmd_none(*fe->pmd) && PageTransCompound(page) && + if (pmd_none(*vmf->pmd) && PageTransCompound(page) && IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { /* THP on COW? */ VM_BUG_ON_PAGE(memcg, page); - ret = do_set_pmd(fe, page); + ret = do_set_pmd(vmf, page); if (ret != VM_FAULT_FALLBACK) goto fault_handled; } - if (!fe->pte) { - ret = pte_alloc_one_map(fe); + if (!vmf->pte) { + ret = pte_alloc_one_map(vmf); if (ret) goto fault_handled; } /* Re-check under ptl */ - if (unlikely(!pte_none(*fe->pte))) { + if (unlikely(!pte_none(*vmf->pte))) { ret = VM_FAULT_NOPAGE; goto fault_handled; } @@ -3068,24 +3071,24 @@ int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, /* copy-on-write page */ if (write && !(vma->vm_flags & VM_SHARED)) { inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); - page_add_new_anon_rmap(page, vma, fe->address, false); + page_add_new_anon_rmap(page, vma, vmf->address, false); mem_cgroup_commit_charge(page, memcg, false, false); lru_cache_add_active_or_unevictable(page, vma); } else { inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); page_add_file_rmap(page, false); } - set_pte_at(vma->vm_mm, fe->address, fe->pte, entry); + set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); /* no need to invalidate: a not-present page won't be cached */ - update_mmu_cache(vma, fe->address, fe->pte); + update_mmu_cache(vma, vmf->address, vmf->pte); ret = 0; fault_handled: /* preallocated pagetable is unused: free it */ - if (fe->prealloc_pte) { - pte_free(fe->vma->vm_mm, fe->prealloc_pte); - fe->prealloc_pte = 0; + if (vmf->prealloc_pte) { + pte_free(vmf->vma->vm_mm, vmf->prealloc_pte); + vmf->prealloc_pte = 0; } return ret; } @@ -3154,17 +3157,17 @@ late_initcall(fault_around_debugfs); * fault_around_pages() value (and therefore to page order). This way it's * easier to guarantee that we don't cross page table boundaries. */ -static int do_fault_around(struct fault_env *fe, pgoff_t start_pgoff) +static int do_fault_around(struct vm_fault *vmf, pgoff_t start_pgoff) { - unsigned long address = fe->address, nr_pages, mask; + unsigned long address = vmf->address, nr_pages, mask; pgoff_t end_pgoff; int off, ret = 0; nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT; mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK; - fe->address = max(address & mask, fe->vma->vm_start); - off = ((address - fe->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); + vmf->address = max(address & mask, vmf->vma->vm_start); + off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); start_pgoff -= off; /* @@ -3172,44 +3175,45 @@ static int do_fault_around(struct fault_env *fe, pgoff_t start_pgoff) * or fault_around_pages() from start_pgoff, depending what is nearest. */ end_pgoff = start_pgoff - - ((fe->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + + ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + PTRS_PER_PTE - 1; - end_pgoff = min3(end_pgoff, vma_pages(fe->vma) + fe->vma->vm_pgoff - 1, + end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1, start_pgoff + nr_pages - 1); - if (pmd_none(*fe->pmd)) { - fe->prealloc_pte = pte_alloc_one(fe->vma->vm_mm, fe->address); - if (!fe->prealloc_pte) + if (pmd_none(*vmf->pmd)) { + vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm, + vmf->address); + if (!vmf->prealloc_pte) goto out; smp_wmb(); /* See comment in __pte_alloc() */ } - fe->vma->vm_ops->map_pages(fe, start_pgoff, end_pgoff); + vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff); /* Huge page is mapped? Page fault is solved */ - if (pmd_trans_huge(*fe->pmd)) { + if (pmd_trans_huge(*vmf->pmd)) { ret = VM_FAULT_NOPAGE; goto out; } /* ->map_pages() haven't done anything useful. Cold page cache? */ - if (!fe->pte) + if (!vmf->pte) goto out; /* check if the page fault is solved */ - fe->pte -= (fe->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT); - if (!pte_none(*fe->pte)) + vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT); + if (!pte_none(*vmf->pte)) ret = VM_FAULT_NOPAGE; - pte_unmap_unlock(fe->pte, fe->ptl); + pte_unmap_unlock(vmf->pte, vmf->ptl); out: - fe->address = address; - fe->pte = NULL; + vmf->address = address; + vmf->pte = NULL; return ret; } -static int do_read_fault(struct fault_env *fe, pgoff_t pgoff) +static int do_read_fault(struct vm_fault *vmf, pgoff_t pgoff) { - struct vm_area_struct *vma = fe->vma; + struct vm_area_struct *vma = vmf->vma; struct page *fault_page; int ret = 0; @@ -3219,27 +3223,27 @@ static int do_read_fault(struct fault_env *fe, pgoff_t pgoff) * something). */ if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { - ret = do_fault_around(fe, pgoff); + ret = do_fault_around(vmf, pgoff); if (ret) return ret; } - ret = __do_fault(fe, pgoff, NULL, &fault_page, NULL); + ret = __do_fault(vmf, pgoff, NULL, &fault_page, NULL); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; - ret |= alloc_set_pte(fe, NULL, fault_page); - if (fe->pte) - pte_unmap_unlock(fe->pte, fe->ptl); + ret |= alloc_set_pte(vmf, NULL, fault_page); + if (vmf->pte) + pte_unmap_unlock(vmf->pte, vmf->ptl); unlock_page(fault_page); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) put_page(fault_page); return ret; } -static int do_cow_fault(struct fault_env *fe, pgoff_t pgoff) +static int do_cow_fault(struct vm_fault *vmf, pgoff_t pgoff) { - struct vm_area_struct *vma = fe->vma; + struct vm_area_struct *vma = vmf->vma; struct page *fault_page, *new_page; void *fault_entry; struct mem_cgroup *memcg; @@ -3248,7 +3252,7 @@ static int do_cow_fault(struct fault_env *fe, pgoff_t pgoff) if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; - new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, fe->address); + new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); if (!new_page) return VM_FAULT_OOM; @@ -3258,17 +3262,17 @@ static int do_cow_fault(struct fault_env *fe, pgoff_t pgoff) return VM_FAULT_OOM; } - ret = __do_fault(fe, pgoff, new_page, &fault_page, &fault_entry); + ret = __do_fault(vmf, pgoff, new_page, &fault_page, &fault_entry); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; if (!(ret & VM_FAULT_DAX_LOCKED)) - copy_user_highpage(new_page, fault_page, fe->address, vma); + copy_user_highpage(new_page, fault_page, vmf->address, vma); __SetPageUptodate(new_page); - ret |= alloc_set_pte(fe, memcg, new_page); - if (fe->pte) - pte_unmap_unlock(fe->pte, fe->ptl); + ret |= alloc_set_pte(vmf, memcg, new_page); + if (vmf->pte) + pte_unmap_unlock(vmf->pte, vmf->ptl); if (!(ret & VM_FAULT_DAX_LOCKED)) { unlock_page(fault_page); put_page(fault_page); @@ -3284,15 +3288,15 @@ uncharge_out: return ret; } -static int do_shared_fault(struct fault_env *fe, pgoff_t pgoff) +static int do_shared_fault(struct vm_fault *vmf, pgoff_t pgoff) { - struct vm_area_struct *vma = fe->vma; + struct vm_area_struct *vma = vmf->vma; struct page *fault_page; struct address_space *mapping; int dirtied = 0; int ret, tmp; - ret = __do_fault(fe, pgoff, NULL, &fault_page, NULL); + ret = __do_fault(vmf, pgoff, NULL, &fault_page, NULL); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; @@ -3302,7 +3306,7 @@ static int do_shared_fault(struct fault_env *fe, pgoff_t pgoff) */ if (vma->vm_ops->page_mkwrite) { unlock_page(fault_page); - tmp = do_page_mkwrite(vma, fault_page, fe->address); + tmp = do_page_mkwrite(vma, fault_page, vmf->address); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { put_page(fault_page); @@ -3310,9 +3314,9 @@ static int do_shared_fault(struct fault_env *fe, pgoff_t pgoff) } } - ret |= alloc_set_pte(fe, NULL, fault_page); - if (fe->pte) - pte_unmap_unlock(fe->pte, fe->ptl); + ret |= alloc_set_pte(vmf, NULL, fault_page); + if (vmf->pte) + pte_unmap_unlock(vmf->pte, vmf->ptl); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) { unlock_page(fault_page); @@ -3350,19 +3354,19 @@ static int do_shared_fault(struct fault_env *fe, pgoff_t pgoff) * The mmap_sem may have been released depending on flags and our * return value. See filemap_fault() and __lock_page_or_retry(). */ -static int do_fault(struct fault_env *fe) +static int do_fault(struct vm_fault *vmf) { - struct vm_area_struct *vma = fe->vma; - pgoff_t pgoff = linear_page_index(vma, fe->address); + struct vm_area_struct *vma = vmf->vma; + pgoff_t pgoff = linear_page_index(vma, vmf->address); /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ if (!vma->vm_ops->fault) return VM_FAULT_SIGBUS; - if (!(fe->flags & FAULT_FLAG_WRITE)) - return do_read_fault(fe, pgoff); + if (!(vmf->flags & FAULT_FLAG_WRITE)) + return do_read_fault(vmf, pgoff); if (!(vma->vm_flags & VM_SHARED)) - return do_cow_fault(fe, pgoff); - return do_shared_fault(fe, pgoff); + return do_cow_fault(vmf, pgoff); + return do_shared_fault(vmf, pgoff); } static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, @@ -3380,9 +3384,9 @@ static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, return mpol_misplaced(page, vma, addr); } -static int do_numa_page(struct fault_env *fe, pte_t pte) +static int do_numa_page(struct vm_fault *vmf, pte_t pte) { - struct vm_area_struct *vma = fe->vma; + struct vm_area_struct *vma = vmf->vma; struct page *page = NULL; int page_nid = -1; int last_cpupid; @@ -3400,10 +3404,10 @@ static int do_numa_page(struct fault_env *fe, pte_t pte) * page table entry is not accessible, so there would be no * concurrent hardware modifications to the PTE. */ - fe->ptl = pte_lockptr(vma->vm_mm, fe->pmd); - spin_lock(fe->ptl); - if (unlikely(!pte_same(*fe->pte, pte))) { - pte_unmap_unlock(fe->pte, fe->ptl); + vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); + spin_lock(vmf->ptl); + if (unlikely(!pte_same(*vmf->pte, pte))) { + pte_unmap_unlock(vmf->pte, vmf->ptl); goto out; } @@ -3412,18 +3416,18 @@ static int do_numa_page(struct fault_env *fe, pte_t pte) pte = pte_mkyoung(pte); if (was_writable) pte = pte_mkwrite(pte); - set_pte_at(vma->vm_mm, fe->address, fe->pte, pte); - update_mmu_cache(vma, fe->address, fe->pte); + set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); + update_mmu_cache(vma, vmf->address, vmf->pte); - page = vm_normal_page(vma, fe->address, pte); + page = vm_normal_page(vma, vmf->address, pte); if (!page) { - pte_unmap_unlock(fe->pte, fe->ptl); + pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } /* TODO: handle PTE-mapped THP */ if (PageCompound(page)) { - pte_unmap_unlock(fe->pte, fe->ptl); + pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } @@ -3447,9 +3451,9 @@ static int do_numa_page(struct fault_env *fe, pte_t pte) last_cpupid = page_cpupid_last(page); page_nid = page_to_nid(page); - target_nid = numa_migrate_prep(page, vma, fe->address, page_nid, + target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, &flags); - pte_unmap_unlock(fe->pte, fe->ptl); + pte_unmap_unlock(vmf->pte, vmf->ptl); if (target_nid == -1) { put_page(page); goto out; @@ -3469,28 +3473,28 @@ out: return 0; } -static int create_huge_pmd(struct fault_env *fe) +static int create_huge_pmd(struct vm_fault *vmf) { - struct vm_area_struct *vma = fe->vma; + struct vm_area_struct *vma = vmf->vma; if (vma_is_anonymous(vma)) - return do_huge_pmd_anonymous_page(fe); + return do_huge_pmd_anonymous_page(vmf); if (vma->vm_ops->pmd_fault) - return vma->vm_ops->pmd_fault(vma, fe->address, fe->pmd, - fe->flags); + return vma->vm_ops->pmd_fault(vma, vmf->address, vmf->pmd, + vmf->flags); return VM_FAULT_FALLBACK; } -static int wp_huge_pmd(struct fault_env *fe, pmd_t orig_pmd) +static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) { - if (vma_is_anonymous(fe->vma)) - return do_huge_pmd_wp_page(fe, orig_pmd); - if (fe->vma->vm_ops->pmd_fault) - return fe->vma->vm_ops->pmd_fault(fe->vma, fe->address, fe->pmd, - fe->flags); + if (vma_is_anonymous(vmf->vma)) + return do_huge_pmd_wp_page(vmf, orig_pmd); + if (vmf->vma->vm_ops->pmd_fault) + return vmf->vma->vm_ops->pmd_fault(vmf->vma, vmf->address, + vmf->pmd, vmf->flags); /* COW handled on pte level: split pmd */ - VM_BUG_ON_VMA(fe->vma->vm_flags & VM_SHARED, fe->vma); - __split_huge_pmd(fe->vma, fe->pmd, fe->address, false, NULL); + VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma); + __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); return VM_FAULT_FALLBACK; } @@ -3515,21 +3519,21 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma) * The mmap_sem may have been released depending on flags and our return value. * See filemap_fault() and __lock_page_or_retry(). */ -static int handle_pte_fault(struct fault_env *fe) +static int handle_pte_fault(struct vm_fault *vmf) { pte_t entry; - if (unlikely(pmd_none(*fe->pmd))) { + if (unlikely(pmd_none(*vmf->pmd))) { /* * Leave __pte_alloc() until later: because vm_ops->fault may * want to allocate huge page, and if we expose page table * for an instant, it will be difficult to retract from * concurrent faults and from rmap lookups. */ - fe->pte = NULL; + vmf->pte = NULL; } else { /* See comment in pte_alloc_one_map() */ - if (pmd_trans_unstable(fe->pmd) || pmd_devmap(*fe->pmd)) + if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd)) return 0; /* * A regular pmd is established and it can't morph into a huge @@ -3537,9 +3541,9 @@ static int handle_pte_fault(struct fault_env *fe) * mmap_sem read mode and khugepaged takes it in write mode. * So now it's safe to run pte_offset_map(). */ - fe->pte = pte_offset_map(fe->pmd, fe->address); + vmf->pte = pte_offset_map(vmf->pmd, vmf->address); - entry = *fe->pte; + entry = *vmf->pte; /* * some architectures can have larger ptes than wordsize, @@ -3551,37 +3555,37 @@ static int handle_pte_fault(struct fault_env *fe) */ barrier(); if (pte_none(entry)) { - pte_unmap(fe->pte); - fe->pte = NULL; + pte_unmap(vmf->pte); + vmf->pte = NULL; } } - if (!fe->pte) { - if (vma_is_anonymous(fe->vma)) - return do_anonymous_page(fe); + if (!vmf->pte) { + if (vma_is_anonymous(vmf->vma)) + return do_anonymous_page(vmf); else - return do_fault(fe); + return do_fault(vmf); } if (!pte_present(entry)) - return do_swap_page(fe, entry); + return do_swap_page(vmf, entry); - if (pte_protnone(entry) && vma_is_accessible(fe->vma)) - return do_numa_page(fe, entry); + if (pte_protnone(entry) && vma_is_accessible(vmf->vma)) + return do_numa_page(vmf, entry); - fe->ptl = pte_lockptr(fe->vma->vm_mm, fe->pmd); - spin_lock(fe->ptl); - if (unlikely(!pte_same(*fe->pte, entry))) + vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); + spin_lock(vmf->ptl); + if (unlikely(!pte_same(*vmf->pte, entry))) goto unlock; - if (fe->flags & FAULT_FLAG_WRITE) { + if (vmf->flags & FAULT_FLAG_WRITE) { if (!pte_write(entry)) - return do_wp_page(fe, entry); + return do_wp_page(vmf, entry); entry = pte_mkdirty(entry); } entry = pte_mkyoung(entry); - if (ptep_set_access_flags(fe->vma, fe->address, fe->pte, entry, - fe->flags & FAULT_FLAG_WRITE)) { - update_mmu_cache(fe->vma, fe->address, fe->pte); + if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, + vmf->flags & FAULT_FLAG_WRITE)) { + update_mmu_cache(vmf->vma, vmf->address, vmf->pte); } else { /* * This is needed only for protection faults but the arch code @@ -3589,11 +3593,11 @@ static int handle_pte_fault(struct fault_env *fe) * This still avoids useless tlb flushes for .text page faults * with threads. */ - if (fe->flags & FAULT_FLAG_WRITE) - flush_tlb_fix_spurious_fault(fe->vma, fe->address); + if (vmf->flags & FAULT_FLAG_WRITE) + flush_tlb_fix_spurious_fault(vmf->vma, vmf->address); } unlock: - pte_unmap_unlock(fe->pte, fe->ptl); + pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } @@ -3606,7 +3610,7 @@ unlock: static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags) { - struct fault_env fe = { + struct vm_fault vmf = { .vma = vma, .address = address, .flags = flags, @@ -3619,35 +3623,35 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, pud = pud_alloc(mm, pgd, address); if (!pud) return VM_FAULT_OOM; - fe.pmd = pmd_alloc(mm, pud, address); - if (!fe.pmd) + vmf.pmd = pmd_alloc(mm, pud, address); + if (!vmf.pmd) return VM_FAULT_OOM; - if (pmd_none(*fe.pmd) && transparent_hugepage_enabled(vma)) { - int ret = create_huge_pmd(&fe); + if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) { + int ret = create_huge_pmd(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { - pmd_t orig_pmd = *fe.pmd; + pmd_t orig_pmd = *vmf.pmd; int ret; barrier(); if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) { if (pmd_protnone(orig_pmd) && vma_is_accessible(vma)) - return do_huge_pmd_numa_page(&fe, orig_pmd); + return do_huge_pmd_numa_page(&vmf, orig_pmd); - if ((fe.flags & FAULT_FLAG_WRITE) && + if ((vmf.flags & FAULT_FLAG_WRITE) && !pmd_write(orig_pmd)) { - ret = wp_huge_pmd(&fe, orig_pmd); + ret = wp_huge_pmd(&vmf, orig_pmd); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { - huge_pmd_set_accessed(&fe, orig_pmd); + huge_pmd_set_accessed(&vmf, orig_pmd); return 0; } } } - return handle_pte_fault(&fe); + return handle_pte_fault(&vmf); } /* diff --git a/mm/nommu.c b/mm/nommu.c index c299a7fbca70..9902d811ff4f 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1801,7 +1801,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } EXPORT_SYMBOL(filemap_fault); -void filemap_map_pages(struct fault_env *fe, +void filemap_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff) { BUG(); -- cgit v1.2.3 From 1a29d85eb0f19b7d8271923d8917d7b4f5540b3e Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:01 -0800 Subject: mm: use vmf->address instead of of vmf->virtual_address Every single user of vmf->virtual_address typed that entry to unsigned long before doing anything with it so the type of virtual_address does not really provide us any additional safety. Just use masked vmf->address which already has the appropriate type. Link: http://lkml.kernel.org/r/1479460644-25076-3-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Acked-by: Kirill A. Shutemov Cc: Dan Williams Cc: Ross Zwisler Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/platforms/cell/spufs/file.c | 8 +++----- arch/x86/entry/vdso/vma.c | 4 ++-- drivers/char/agp/alpha-agp.c | 3 +-- drivers/char/mspec.c | 2 +- drivers/dax/dax.c | 3 +-- drivers/gpu/drm/armada/armada_gem.c | 5 ++--- drivers/gpu/drm/drm_vm.c | 10 +++++----- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 7 +++---- drivers/gpu/drm/exynos/exynos_drm_gem.c | 6 ++---- drivers/gpu/drm/gma500/framebuffer.c | 2 +- drivers/gpu/drm/gma500/gem.c | 5 ++--- drivers/gpu/drm/i915/i915_gem.c | 3 +-- drivers/gpu/drm/msm/msm_gem.c | 8 +++----- drivers/gpu/drm/omapdrm/omap_gem.c | 20 ++++++++------------ drivers/gpu/drm/tegra/gem.c | 4 ++-- drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 +- drivers/gpu/drm/udl/udl_gem.c | 5 ++--- drivers/gpu/drm/vgem/vgem_drv.c | 2 +- drivers/media/v4l2-core/videobuf-dma-sg.c | 5 ++--- drivers/misc/cxl/context.c | 5 ++--- drivers/misc/sgi-gru/grumain.c | 2 +- drivers/staging/android/ion/ion.c | 2 +- drivers/staging/lustre/lustre/llite/vvp_io.c | 6 +++--- drivers/xen/privcmd.c | 2 +- fs/dax.c | 4 ++-- include/linux/mm.h | 2 -- mm/memory.c | 9 ++++----- 27 files changed, 57 insertions(+), 79 deletions(-) diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 06254467e4dd..3a147122bc98 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -236,7 +236,6 @@ static int spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct spu_context *ctx = vma->vm_file->private_data; - unsigned long address = (unsigned long)vmf->virtual_address; unsigned long pfn, offset; offset = vmf->pgoff << PAGE_SHIFT; @@ -244,7 +243,7 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_SIGBUS; pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n", - address, offset); + vmf->address, offset); if (spu_acquire(ctx)) return VM_FAULT_NOPAGE; @@ -256,7 +255,7 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; } - vm_insert_pfn(vma, address, pfn); + vm_insert_pfn(vma, vmf->address, pfn); spu_release(ctx); @@ -355,8 +354,7 @@ static int spufs_ps_fault(struct vm_area_struct *vma, down_read(¤t->mm->mmap_sem); } else { area = ctx->spu->problem_phys + ps_offs; - vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, - (area + offset) >> PAGE_SHIFT); + vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT); spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu); } diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index e739002427ed..40121d14d34d 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -109,7 +109,7 @@ static int vvar_fault(const struct vm_special_mapping *sm, return VM_FAULT_SIGBUS; if (sym_offset == image->sym_vvar_page) { - ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, + ret = vm_insert_pfn(vma, vmf->address, __pa_symbol(&__vvar_page) >> PAGE_SHIFT); } else if (sym_offset == image->sym_pvclock_page) { struct pvclock_vsyscall_time_info *pvti = @@ -117,7 +117,7 @@ static int vvar_fault(const struct vm_special_mapping *sm, if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) { ret = vm_insert_pfn( vma, - (unsigned long)vmf->virtual_address, + vmf->address, __pa(pvti) >> PAGE_SHIFT); } } diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c index 199b8e99f7d7..737187865269 100644 --- a/drivers/char/agp/alpha-agp.c +++ b/drivers/char/agp/alpha-agp.c @@ -19,8 +19,7 @@ static int alpha_core_agp_vm_fault(struct vm_area_struct *vma, unsigned long pa; struct page *page; - dma_addr = (unsigned long)vmf->virtual_address - vma->vm_start - + agp->aperture.bus_base; + dma_addr = vmf->address - vma->vm_start + agp->aperture.bus_base; pa = agp->ops->translate(agp, dma_addr); if (pa == (unsigned long)-EINVAL) diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c index f3f92d5fcda0..a697ca0cab1e 100644 --- a/drivers/char/mspec.c +++ b/drivers/char/mspec.c @@ -227,7 +227,7 @@ mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf) * be because another thread has installed the pte first, so it * is no problem. */ - vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); + vm_insert_pfn(vma, vmf->address, pfn); return VM_FAULT_NOPAGE; } diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c index 286447a83dab..26ec39ddf21f 100644 --- a/drivers/dax/dax.c +++ b/drivers/dax/dax.c @@ -328,7 +328,6 @@ static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff, static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma, struct vm_fault *vmf) { - unsigned long vaddr = (unsigned long) vmf->virtual_address; struct device *dev = &dax_dev->dev; struct dax_region *dax_region; int rc = VM_FAULT_SIGBUS; @@ -353,7 +352,7 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma, pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); - rc = vm_insert_mixed(vma, vaddr, pfn); + rc = vm_insert_mixed(vma, vmf->address, pfn); if (rc == -ENOMEM) return VM_FAULT_OOM; diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 768087ddb046..a293c8be232c 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -17,12 +17,11 @@ static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data); - unsigned long addr = (unsigned long)vmf->virtual_address; unsigned long pfn = obj->phys_addr >> PAGE_SHIFT; int ret; - pfn += (addr - vma->vm_start) >> PAGE_SHIFT; - ret = vm_insert_pfn(vma, addr, pfn); + pfn += (vmf->address - vma->vm_start) >> PAGE_SHIFT; + ret = vm_insert_pfn(vma, vmf->address, pfn); switch (ret) { case 0: diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index caa4e4ca616d..bd311c77c254 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -124,8 +124,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) * Using vm_pgoff as a selector forces us to use this unusual * addressing scheme. */ - resource_size_t offset = (unsigned long)vmf->virtual_address - - vma->vm_start; + resource_size_t offset = vmf->address - vma->vm_start; resource_size_t baddr = map->offset + offset; struct drm_agp_mem *agpmem; struct page *page; @@ -195,7 +194,7 @@ static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (!map) return VM_FAULT_SIGBUS; /* Nothing allocated */ - offset = (unsigned long)vmf->virtual_address - vma->vm_start; + offset = vmf->address - vma->vm_start; i = (unsigned long)map->handle + offset; page = vmalloc_to_page((void *)i); if (!page) @@ -301,7 +300,8 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (!dma->pagelist) return VM_FAULT_SIGBUS; /* Nothing allocated */ - offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ + offset = vmf->address - vma->vm_start; + /* vm_[pg]off[set] should be 0 */ page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */ page = virt_to_page((void *)dma->pagelist[page_nr]); @@ -337,7 +337,7 @@ static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (!entry->pagelist) return VM_FAULT_SIGBUS; /* Nothing allocated */ - offset = (unsigned long)vmf->virtual_address - vma->vm_start; + offset = vmf->address - vma->vm_start; map_offset = map->offset - (unsigned long)dev->sg->virtual; page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); page = entry->pagelist[page_offset]; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 6f5dfabb3096..114dddbd297b 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -202,15 +202,14 @@ int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } /* We don't use vmf->pgoff since that has the fake offset: */ - pgoff = ((unsigned long)vmf->virtual_address - - vma->vm_start) >> PAGE_SHIFT; + pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; page = pages[pgoff]; - VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, + VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT); - ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); + ret = vm_insert_page(vma, vmf->address, page); out: switch (ret) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index ea7a18230888..57b81460fec8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -455,8 +455,7 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) pgoff_t page_offset; int ret; - page_offset = ((unsigned long)vmf->virtual_address - - vma->vm_start) >> PAGE_SHIFT; + page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) { DRM_ERROR("invalid page offset\n"); @@ -465,8 +464,7 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } pfn = page_to_pfn(exynos_gem->pages[page_offset]); - ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, - __pfn_to_pfn_t(pfn, PFN_DEV)); + ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); out: switch (ret) { diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 4071b2d1e8cf..8b44fa542562 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -125,7 +125,7 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) psbfb->gtt->offset; page_num = vma_pages(vma); - address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT); + address = vmf->address - (vmf->pgoff << PAGE_SHIFT); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index 6d1cb6b370b1..527c62917660 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -197,15 +197,14 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Page relative to the VMA start - we must calculate this ourselves because vmf->pgoff is the fake GEM offset */ - page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start) - >> PAGE_SHIFT; + page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; /* CPU view of the page, don't go via the GART for CPU writes */ if (r->stolen) pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT; else pfn = page_to_pfn(r->pages[page_offset]); - ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); + ret = vm_insert_pfn(vma, vmf->address, pfn); fail: mutex_unlock(&dev_priv->mmap_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d0dcaf35b429..412f3513f269 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1796,8 +1796,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) int ret; /* We don't use vmf->pgoff since that has the fake offset */ - page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >> - PAGE_SHIFT; + page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; trace_i915_gem_object_fault(obj, page_offset, true, write); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index cd06cfd94687..d8bc59c7e261 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -225,16 +225,14 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } /* We don't use vmf->pgoff since that has the fake offset: */ - pgoff = ((unsigned long)vmf->virtual_address - - vma->vm_start) >> PAGE_SHIFT; + pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; pfn = page_to_pfn(pages[pgoff]); - VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, + VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, pfn, pfn << PAGE_SHIFT); - ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, - __pfn_to_pfn_t(pfn, PFN_DEV)); + ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); out_unlock: mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index d4e1e11466f8..4a90c690f09e 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -398,8 +398,7 @@ static int fault_1d(struct drm_gem_object *obj, pgoff_t pgoff; /* We don't use vmf->pgoff since that has the fake offset: */ - pgoff = ((unsigned long)vmf->virtual_address - - vma->vm_start) >> PAGE_SHIFT; + pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; if (omap_obj->pages) { omap_gem_cpu_sync(obj, pgoff); @@ -409,11 +408,10 @@ static int fault_1d(struct drm_gem_object *obj, pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff; } - VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, + VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, pfn, pfn << PAGE_SHIFT); - return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, - __pfn_to_pfn_t(pfn, PFN_DEV)); + return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); } /* Special handling for the case of faulting in 2d tiled buffers */ @@ -427,7 +425,7 @@ static int fault_2d(struct drm_gem_object *obj, struct page *pages[64]; /* XXX is this too much to have on stack? */ unsigned long pfn; pgoff_t pgoff, base_pgoff; - void __user *vaddr; + unsigned long vaddr; int i, ret, slots; /* @@ -447,8 +445,7 @@ static int fault_2d(struct drm_gem_object *obj, const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); /* We don't use vmf->pgoff since that has the fake offset: */ - pgoff = ((unsigned long)vmf->virtual_address - - vma->vm_start) >> PAGE_SHIFT; + pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; /* * Actual address we start mapping at is rounded down to previous slot @@ -459,7 +456,7 @@ static int fault_2d(struct drm_gem_object *obj, /* figure out buffer width in slots */ slots = omap_obj->width >> priv->usergart[fmt].slot_shift; - vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT); + vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT); entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last]; @@ -503,12 +500,11 @@ static int fault_2d(struct drm_gem_object *obj, pfn = entry->paddr >> PAGE_SHIFT; - VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, + VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, pfn, pfn << PAGE_SHIFT); for (i = n; i > 0; i--) { - vm_insert_mixed(vma, (unsigned long)vaddr, - __pfn_to_pfn_t(pfn, PFN_DEV)); + vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV)); pfn += priv->usergart[fmt].stride_pfn; vaddr += PAGE_SIZE * m; } diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index c08e5279eeac..7d853e6b5ff0 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -452,10 +452,10 @@ static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (!bo->pages) return VM_FAULT_SIGBUS; - offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT; + offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; page = bo->pages[offset]; - err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); + err = vm_insert_page(vma, vmf->address, page); switch (err) { case -EAGAIN: case 0: diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 4748aedc933a..68ef993ab431 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -101,7 +101,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct page *page; int ret; int i; - unsigned long address = (unsigned long)vmf->virtual_address; + unsigned long address = vmf->address; int retval = VM_FAULT_NOPAGE; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 818e70712b18..3c0c4bd3f750 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -107,14 +107,13 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) unsigned int page_offset; int ret = 0; - page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> - PAGE_SHIFT; + page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; if (!obj->pages) return VM_FAULT_SIGBUS; page = obj->pages[page_offset]; - ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); + ret = vm_insert_page(vma, vmf->address, page); switch (ret) { case -EAGAIN: case 0: diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index f36c14729b55..477e07f0ecb6 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -54,7 +54,7 @@ static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_vgem_gem_object *obj = vma->vm_private_data; /* We don't use vmf->pgoff since that has the fake offset */ - unsigned long vaddr = (unsigned long)vmf->virtual_address; + unsigned long vaddr = vmf->address; struct page *page; page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping, diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index 1db0af6c7f94..ba63ca57ed7e 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -439,13 +439,12 @@ static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct page *page; dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n", - (unsigned long)vmf->virtual_address, - vma->vm_start, vma->vm_end); + vmf->address, vma->vm_start, vma->vm_end); page = alloc_page(GFP_USER | __GFP_DMA32); if (!page) return VM_FAULT_OOM; - clear_user_highpage(page, (unsigned long)vmf->virtual_address); + clear_user_highpage(page, vmf->address); vmf->page = page; return 0; diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 5e506c19108a..5d36dcc7f47e 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -117,13 +117,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct cxl_context *ctx = vma->vm_file->private_data; - unsigned long address = (unsigned long)vmf->virtual_address; u64 area, offset; offset = vmf->pgoff << PAGE_SHIFT; pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n", - __func__, ctx->pe, address, offset); + __func__, ctx->pe, vmf->address, offset); if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { area = ctx->afu->psn_phys; @@ -155,7 +154,7 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } - vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); + vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT); mutex_unlock(&ctx->status_mutex); diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index 33741ad4a74a..af2e077da4b8 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c @@ -932,7 +932,7 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) unsigned long paddr, vaddr; unsigned long expires; - vaddr = (unsigned long)vmf->virtual_address; + vaddr = vmf->address; gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", vma, vaddr, GSEG_BASE(vaddr)); STAT(nopfn); diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index d5cc3070e83f..b653451843c8 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -882,7 +882,7 @@ static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); - ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); + ret = vm_insert_pfn(vma, vmf->address, pfn); mutex_unlock(&buffer->lock); if (ret) return VM_FAULT_ERROR; diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c index 0b6d388d8aa4..697cbfbe9374 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_io.c +++ b/drivers/staging/lustre/lustre/llite/vvp_io.c @@ -1014,7 +1014,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n", vmf->page, vmf->page->mapping, vmf->page->index, (long)vmf->page->flags, page_count(vmf->page), - page_private(vmf->page), vmf->virtual_address); + page_private(vmf->page), (void *)vmf->address); if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) { lock_page(vmf->page); cfio->ft_flags |= VM_FAULT_LOCKED; @@ -1025,12 +1025,12 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) } if (cfio->ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { - CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); + CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", (void *)vmf->address); return -EFAULT; } if (cfio->ft_flags & VM_FAULT_OOM) { - CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address); + CDEBUG(D_PAGE, "got addr %p - OOM\n", (void *)vmf->address); return -ENOMEM; } diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 702040fe2001..6e3306f4a525 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -602,7 +602,7 @@ static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", vma, vma->vm_start, vma->vm_end, - vmf->pgoff, vmf->virtual_address); + vmf->pgoff, (void *)vmf->address); return VM_FAULT_SIGBUS; } diff --git a/fs/dax.c b/fs/dax.c index 5ae8e11ad786..f1dfae64b11a 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -738,7 +738,7 @@ static int dax_insert_mapping(struct address_space *mapping, struct block_device *bdev, sector_t sector, size_t size, void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf) { - unsigned long vaddr = (unsigned long)vmf->virtual_address; + unsigned long vaddr = vmf->address; struct blk_dax_ctl dax = { .sector = sector, .size = size, @@ -948,7 +948,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, { struct address_space *mapping = vma->vm_file->f_mapping; struct inode *inode = mapping->host; - unsigned long vaddr = (unsigned long)vmf->virtual_address; + unsigned long vaddr = vmf->address; loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; sector_t sector; struct iomap iomap = { 0 }; diff --git a/include/linux/mm.h b/include/linux/mm.h index de5bcead2511..75fda0de64bf 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -297,8 +297,6 @@ struct vm_fault { gfp_t gfp_mask; /* gfp mask to be used for allocations */ pgoff_t pgoff; /* Logical page offset based on vma */ unsigned long address; /* Faulting virtual address */ - void __user *virtual_address; /* Faulting virtual address masked by - * PAGE_MASK */ pmd_t *pmd; /* Pointer to pmd entry matching * the 'address' */ diff --git a/mm/memory.c b/mm/memory.c index 512e1c359193..379836261d4a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2040,7 +2040,7 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, struct vm_fault vmf; int ret; - vmf.virtual_address = (void __user *)(address & PAGE_MASK); + vmf.address = address & PAGE_MASK; vmf.pgoff = page->index; vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; vmf.gfp_mask = __get_fault_gfp_mask(vma); @@ -2276,8 +2276,7 @@ static int wp_pfn_shared(struct vm_fault *vmf, pte_t orig_pte) struct vm_fault vmf2 = { .page = NULL, .pgoff = linear_page_index(vma, vmf->address), - .virtual_address = - (void __user *)(vmf->address & PAGE_MASK), + .address = vmf->address, .flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE, }; int ret; @@ -2852,7 +2851,7 @@ static int __do_fault(struct vm_fault *vmf, pgoff_t pgoff, struct vm_fault vmf2; int ret; - vmf2.virtual_address = (void __user *)(vmf->address & PAGE_MASK); + vmf2.address = vmf->address; vmf2.pgoff = pgoff; vmf2.flags = vmf->flags; vmf2.page = NULL; @@ -3612,7 +3611,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, { struct vm_fault vmf = { .vma = vma, - .address = address, + .address = address & PAGE_MASK, .flags = flags, }; struct mm_struct *mm = vma->vm_mm; -- cgit v1.2.3 From 0721ec8bc156fafc9057ec1df95cdb3bbc3cbae8 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:04 -0800 Subject: mm: use pgoff in struct vm_fault instead of passing it separately struct vm_fault has already pgoff entry. Use it instead of passing pgoff as a separate argument and then assigning it later. Link: http://lkml.kernel.org/r/1479460644-25076-4-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Reviewed-by: Ross Zwisler Acked-by: Kirill A. Shutemov Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/khugepaged.c | 1 + mm/memory.c | 35 ++++++++++++++++++----------------- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index d950c2509161..f062d54c9683 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -882,6 +882,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, .address = address, .flags = FAULT_FLAG_ALLOW_RETRY, .pmd = pmd, + .pgoff = linear_page_index(vma, address), }; /* we only decide to swapin, if there is enough young ptes */ diff --git a/mm/memory.c b/mm/memory.c index 379836261d4a..c514b4a07a7a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2275,7 +2275,7 @@ static int wp_pfn_shared(struct vm_fault *vmf, pte_t orig_pte) if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { struct vm_fault vmf2 = { .page = NULL, - .pgoff = linear_page_index(vma, vmf->address), + .pgoff = vmf->pgoff, .address = vmf->address, .flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE, }; @@ -2844,15 +2844,15 @@ oom: * released depending on flags and vma->vm_ops->fault() return value. * See filemap_fault() and __lock_page_retry(). */ -static int __do_fault(struct vm_fault *vmf, pgoff_t pgoff, - struct page *cow_page, struct page **page, void **entry) +static int __do_fault(struct vm_fault *vmf, struct page *cow_page, + struct page **page, void **entry) { struct vm_area_struct *vma = vmf->vma; struct vm_fault vmf2; int ret; vmf2.address = vmf->address; - vmf2.pgoff = pgoff; + vmf2.pgoff = vmf->pgoff; vmf2.flags = vmf->flags; vmf2.page = NULL; vmf2.gfp_mask = __get_fault_gfp_mask(vma); @@ -3156,9 +3156,10 @@ late_initcall(fault_around_debugfs); * fault_around_pages() value (and therefore to page order). This way it's * easier to guarantee that we don't cross page table boundaries. */ -static int do_fault_around(struct vm_fault *vmf, pgoff_t start_pgoff) +static int do_fault_around(struct vm_fault *vmf) { unsigned long address = vmf->address, nr_pages, mask; + pgoff_t start_pgoff = vmf->pgoff; pgoff_t end_pgoff; int off, ret = 0; @@ -3210,7 +3211,7 @@ out: return ret; } -static int do_read_fault(struct vm_fault *vmf, pgoff_t pgoff) +static int do_read_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct page *fault_page; @@ -3222,12 +3223,12 @@ static int do_read_fault(struct vm_fault *vmf, pgoff_t pgoff) * something). */ if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { - ret = do_fault_around(vmf, pgoff); + ret = do_fault_around(vmf); if (ret) return ret; } - ret = __do_fault(vmf, pgoff, NULL, &fault_page, NULL); + ret = __do_fault(vmf, NULL, &fault_page, NULL); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; @@ -3240,7 +3241,7 @@ static int do_read_fault(struct vm_fault *vmf, pgoff_t pgoff) return ret; } -static int do_cow_fault(struct vm_fault *vmf, pgoff_t pgoff) +static int do_cow_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct page *fault_page, *new_page; @@ -3261,7 +3262,7 @@ static int do_cow_fault(struct vm_fault *vmf, pgoff_t pgoff) return VM_FAULT_OOM; } - ret = __do_fault(vmf, pgoff, new_page, &fault_page, &fault_entry); + ret = __do_fault(vmf, new_page, &fault_page, &fault_entry); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; @@ -3276,7 +3277,7 @@ static int do_cow_fault(struct vm_fault *vmf, pgoff_t pgoff) unlock_page(fault_page); put_page(fault_page); } else { - dax_unlock_mapping_entry(vma->vm_file->f_mapping, pgoff); + dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff); } if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; @@ -3287,7 +3288,7 @@ uncharge_out: return ret; } -static int do_shared_fault(struct vm_fault *vmf, pgoff_t pgoff) +static int do_shared_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct page *fault_page; @@ -3295,7 +3296,7 @@ static int do_shared_fault(struct vm_fault *vmf, pgoff_t pgoff) int dirtied = 0; int ret, tmp; - ret = __do_fault(vmf, pgoff, NULL, &fault_page, NULL); + ret = __do_fault(vmf, NULL, &fault_page, NULL); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; @@ -3356,16 +3357,15 @@ static int do_shared_fault(struct vm_fault *vmf, pgoff_t pgoff) static int do_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - pgoff_t pgoff = linear_page_index(vma, vmf->address); /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ if (!vma->vm_ops->fault) return VM_FAULT_SIGBUS; if (!(vmf->flags & FAULT_FLAG_WRITE)) - return do_read_fault(vmf, pgoff); + return do_read_fault(vmf); if (!(vma->vm_flags & VM_SHARED)) - return do_cow_fault(vmf, pgoff); - return do_shared_fault(vmf, pgoff); + return do_cow_fault(vmf); + return do_shared_fault(vmf); } static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, @@ -3613,6 +3613,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, .vma = vma, .address = address & PAGE_MASK, .flags = flags, + .pgoff = linear_page_index(vma, address), }; struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; -- cgit v1.2.3 From 667240e0f2e13e792a5af99b3c34dfab12ef125b Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:07 -0800 Subject: mm: use passed vm_fault structure in __do_fault() Instead of creating another vm_fault structure, use the one passed to __do_fault() for passing arguments into fault handler. Link: http://lkml.kernel.org/r/1479460644-25076-5-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Reviewed-by: Ross Zwisler Acked-by: Kirill A. Shutemov Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index c514b4a07a7a..cbc6d47fda73 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2848,37 +2848,31 @@ static int __do_fault(struct vm_fault *vmf, struct page *cow_page, struct page **page, void **entry) { struct vm_area_struct *vma = vmf->vma; - struct vm_fault vmf2; int ret; - vmf2.address = vmf->address; - vmf2.pgoff = vmf->pgoff; - vmf2.flags = vmf->flags; - vmf2.page = NULL; - vmf2.gfp_mask = __get_fault_gfp_mask(vma); - vmf2.cow_page = cow_page; + vmf->cow_page = cow_page; - ret = vma->vm_ops->fault(vma, &vmf2); + ret = vma->vm_ops->fault(vma, vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; if (ret & VM_FAULT_DAX_LOCKED) { - *entry = vmf2.entry; + *entry = vmf->entry; return ret; } - if (unlikely(PageHWPoison(vmf2.page))) { + if (unlikely(PageHWPoison(vmf->page))) { if (ret & VM_FAULT_LOCKED) - unlock_page(vmf2.page); - put_page(vmf2.page); + unlock_page(vmf->page); + put_page(vmf->page); return VM_FAULT_HWPOISON; } if (unlikely(!(ret & VM_FAULT_LOCKED))) - lock_page(vmf2.page); + lock_page(vmf->page); else - VM_BUG_ON_PAGE(!PageLocked(vmf2.page), vmf2.page); + VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page); - *page = vmf2.page; + *page = vmf->page; return ret; } @@ -3614,6 +3608,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, .address = address & PAGE_MASK, .flags = flags, .pgoff = linear_page_index(vma, address), + .gfp_mask = __get_fault_gfp_mask(vma), }; struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; -- cgit v1.2.3 From 936ca80d3773bd9b6dda8a0dfd54425f9ec1be9d Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:10 -0800 Subject: mm: trim __do_fault() arguments Use vm_fault structure to pass cow_page, page, and entry in and out of the function. That reduces number of __do_fault() arguments from 4 to 1. Link: http://lkml.kernel.org/r/1479460644-25076-6-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Reviewed-by: Ross Zwisler Acked-by: Kirill A. Shutemov Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 67 ++++++++++++++++++++++++++----------------------------------- 1 file changed, 29 insertions(+), 38 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index cbc6d47fda73..78b81e8984df 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2844,26 +2844,22 @@ oom: * released depending on flags and vma->vm_ops->fault() return value. * See filemap_fault() and __lock_page_retry(). */ -static int __do_fault(struct vm_fault *vmf, struct page *cow_page, - struct page **page, void **entry) +static int __do_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; int ret; - vmf->cow_page = cow_page; - ret = vma->vm_ops->fault(vma, vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; - if (ret & VM_FAULT_DAX_LOCKED) { - *entry = vmf->entry; + if (ret & VM_FAULT_DAX_LOCKED) return ret; - } if (unlikely(PageHWPoison(vmf->page))) { if (ret & VM_FAULT_LOCKED) unlock_page(vmf->page); put_page(vmf->page); + vmf->page = NULL; return VM_FAULT_HWPOISON; } @@ -2872,7 +2868,6 @@ static int __do_fault(struct vm_fault *vmf, struct page *cow_page, else VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page); - *page = vmf->page; return ret; } @@ -3208,7 +3203,6 @@ out: static int do_read_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - struct page *fault_page; int ret = 0; /* @@ -3222,54 +3216,52 @@ static int do_read_fault(struct vm_fault *vmf) return ret; } - ret = __do_fault(vmf, NULL, &fault_page, NULL); + ret = __do_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; - ret |= alloc_set_pte(vmf, NULL, fault_page); + ret |= alloc_set_pte(vmf, NULL, vmf->page); if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); - unlock_page(fault_page); + unlock_page(vmf->page); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) - put_page(fault_page); + put_page(vmf->page); return ret; } static int do_cow_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - struct page *fault_page, *new_page; - void *fault_entry; struct mem_cgroup *memcg; int ret; if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; - new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); - if (!new_page) + vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); + if (!vmf->cow_page) return VM_FAULT_OOM; - if (mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, + if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL, &memcg, false)) { - put_page(new_page); + put_page(vmf->cow_page); return VM_FAULT_OOM; } - ret = __do_fault(vmf, new_page, &fault_page, &fault_entry); + ret = __do_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; if (!(ret & VM_FAULT_DAX_LOCKED)) - copy_user_highpage(new_page, fault_page, vmf->address, vma); - __SetPageUptodate(new_page); + copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); + __SetPageUptodate(vmf->cow_page); - ret |= alloc_set_pte(vmf, memcg, new_page); + ret |= alloc_set_pte(vmf, memcg, vmf->cow_page); if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); if (!(ret & VM_FAULT_DAX_LOCKED)) { - unlock_page(fault_page); - put_page(fault_page); + unlock_page(vmf->page); + put_page(vmf->page); } else { dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff); } @@ -3277,20 +3269,19 @@ static int do_cow_fault(struct vm_fault *vmf) goto uncharge_out; return ret; uncharge_out: - mem_cgroup_cancel_charge(new_page, memcg, false); - put_page(new_page); + mem_cgroup_cancel_charge(vmf->cow_page, memcg, false); + put_page(vmf->cow_page); return ret; } static int do_shared_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - struct page *fault_page; struct address_space *mapping; int dirtied = 0; int ret, tmp; - ret = __do_fault(vmf, NULL, &fault_page, NULL); + ret = __do_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; @@ -3299,26 +3290,26 @@ static int do_shared_fault(struct vm_fault *vmf) * about to become writable */ if (vma->vm_ops->page_mkwrite) { - unlock_page(fault_page); - tmp = do_page_mkwrite(vma, fault_page, vmf->address); + unlock_page(vmf->page); + tmp = do_page_mkwrite(vma, vmf->page, vmf->address); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { - put_page(fault_page); + put_page(vmf->page); return tmp; } } - ret |= alloc_set_pte(vmf, NULL, fault_page); + ret |= alloc_set_pte(vmf, NULL, vmf->page); if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) { - unlock_page(fault_page); - put_page(fault_page); + unlock_page(vmf->page); + put_page(vmf->page); return ret; } - if (set_page_dirty(fault_page)) + if (set_page_dirty(vmf->page)) dirtied = 1; /* * Take a local copy of the address_space - page.mapping may be zeroed @@ -3326,8 +3317,8 @@ static int do_shared_fault(struct vm_fault *vmf) * pinned by vma->vm_file's reference. We rely on unlock_page()'s * release semantics to prevent the compiler from undoing this copying. */ - mapping = page_rmapping(fault_page); - unlock_page(fault_page); + mapping = page_rmapping(vmf->page); + unlock_page(vmf->page); if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) { /* * Some device drivers do not set page.mapping but still -- cgit v1.2.3 From fe82221f57ea6840a4238a8e077e3f93f257a03f Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:13 -0800 Subject: mm: use passed vm_fault structure for in wp_pfn_shared() Instead of creating another vm_fault structure, use the one passed to wp_pfn_shared() for passing arguments into pfn_mkwrite handler. Link: http://lkml.kernel.org/r/1479460644-25076-7-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Reviewed-by: Ross Zwisler Acked-by: Kirill A. Shutemov Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 78b81e8984df..7ba9cc58dddd 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2273,16 +2273,11 @@ static int wp_pfn_shared(struct vm_fault *vmf, pte_t orig_pte) struct vm_area_struct *vma = vmf->vma; if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { - struct vm_fault vmf2 = { - .page = NULL, - .pgoff = vmf->pgoff, - .address = vmf->address, - .flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE, - }; int ret; pte_unmap_unlock(vmf->pte, vmf->ptl); - ret = vma->vm_ops->pfn_mkwrite(vma, &vmf2); + vmf->flags |= FAULT_FLAG_MKWRITE; + ret = vma->vm_ops->pfn_mkwrite(vma, vmf); if (ret & VM_FAULT_ERROR) return ret; vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, -- cgit v1.2.3 From 2994302bc8a17180788fac66a47102d338d5d0ec Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:16 -0800 Subject: mm: add orig_pte field into vm_fault Add orig_pte field to vm_fault structure to allow ->page_mkwrite handlers to fully handle the fault. This also allows us to save some passing of extra arguments around. Link: http://lkml.kernel.org/r/1479460644-25076-8-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Reviewed-by: Ross Zwisler Acked-by: Kirill A. Shutemov Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 4 +-- mm/internal.h | 2 +- mm/khugepaged.c | 7 ++--- mm/memory.c | 82 +++++++++++++++++++++++++++--------------------------- 4 files changed, 47 insertions(+), 48 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 75fda0de64bf..39c17a2efcea 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -298,8 +298,8 @@ struct vm_fault { pgoff_t pgoff; /* Logical page offset based on vma */ unsigned long address; /* Faulting virtual address */ pmd_t *pmd; /* Pointer to pmd entry matching - * the 'address' - */ + * the 'address' */ + pte_t orig_pte; /* Value of PTE at the time of fault */ struct page *cow_page; /* Handler may choose to COW */ struct page *page; /* ->fault handlers should return a diff --git a/mm/internal.h b/mm/internal.h index 093b1eacc91b..44d68895a9b9 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -36,7 +36,7 @@ /* Do not use these with a slab allocator */ #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) -int do_swap_page(struct vm_fault *vmf, pte_t orig_pte); +int do_swap_page(struct vm_fault *vmf); void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index f062d54c9683..7434a63cac94 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -875,7 +875,6 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int referenced) { - pte_t pteval; int swapped_in = 0, ret = 0; struct vm_fault vmf = { .vma = vma, @@ -893,11 +892,11 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, vmf.pte = pte_offset_map(pmd, address); for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE; vmf.pte++, vmf.address += PAGE_SIZE) { - pteval = *vmf.pte; - if (!is_swap_pte(pteval)) + vmf.orig_pte = *vmf.pte; + if (!is_swap_pte(vmf.orig_pte)) continue; swapped_in++; - ret = do_swap_page(&vmf, pteval); + ret = do_swap_page(&vmf); /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ if (ret & VM_FAULT_RETRY) { diff --git a/mm/memory.c b/mm/memory.c index 7ba9cc58dddd..cf74f7ca911b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2070,8 +2070,8 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, * case, all we need to do here is to mark the page as writable and update * any related book-keeping. */ -static inline int wp_page_reuse(struct vm_fault *vmf, pte_t orig_pte, - struct page *page, int page_mkwrite, int dirty_shared) +static inline int wp_page_reuse(struct vm_fault *vmf, struct page *page, + int page_mkwrite, int dirty_shared) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; @@ -2084,8 +2084,8 @@ static inline int wp_page_reuse(struct vm_fault *vmf, pte_t orig_pte, if (page) page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1); - flush_cache_page(vma, vmf->address, pte_pfn(orig_pte)); - entry = pte_mkyoung(orig_pte); + flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); + entry = pte_mkyoung(vmf->orig_pte); entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) update_mmu_cache(vma, vmf->address, vmf->pte); @@ -2135,8 +2135,7 @@ static inline int wp_page_reuse(struct vm_fault *vmf, pte_t orig_pte, * held to the old page, as well as updating the rmap. * - In any case, unlock the PTL and drop the reference we took to the old page. */ -static int wp_page_copy(struct vm_fault *vmf, pte_t orig_pte, - struct page *old_page) +static int wp_page_copy(struct vm_fault *vmf, struct page *old_page) { struct vm_area_struct *vma = vmf->vma; struct mm_struct *mm = vma->vm_mm; @@ -2150,7 +2149,7 @@ static int wp_page_copy(struct vm_fault *vmf, pte_t orig_pte, if (unlikely(anon_vma_prepare(vma))) goto oom; - if (is_zero_pfn(pte_pfn(orig_pte))) { + if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { new_page = alloc_zeroed_user_highpage_movable(vma, vmf->address); if (!new_page) @@ -2174,7 +2173,7 @@ static int wp_page_copy(struct vm_fault *vmf, pte_t orig_pte, * Re-check the pte - we dropped the lock */ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); - if (likely(pte_same(*vmf->pte, orig_pte))) { + if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { if (old_page) { if (!PageAnon(old_page)) { dec_mm_counter_fast(mm, @@ -2184,7 +2183,7 @@ static int wp_page_copy(struct vm_fault *vmf, pte_t orig_pte, } else { inc_mm_counter_fast(mm, MM_ANONPAGES); } - flush_cache_page(vma, vmf->address, pte_pfn(orig_pte)); + flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); entry = mk_pte(new_page, vma->vm_page_prot); entry = maybe_mkwrite(pte_mkdirty(entry), vma); /* @@ -2268,7 +2267,7 @@ oom: * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED * mapping */ -static int wp_pfn_shared(struct vm_fault *vmf, pte_t orig_pte) +static int wp_pfn_shared(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; @@ -2286,16 +2285,15 @@ static int wp_pfn_shared(struct vm_fault *vmf, pte_t orig_pte) * We might have raced with another page fault while we * released the pte_offset_map_lock. */ - if (!pte_same(*vmf->pte, orig_pte)) { + if (!pte_same(*vmf->pte, vmf->orig_pte)) { pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } } - return wp_page_reuse(vmf, orig_pte, NULL, 0, 0); + return wp_page_reuse(vmf, NULL, 0, 0); } -static int wp_page_shared(struct vm_fault *vmf, pte_t orig_pte, - struct page *old_page) +static int wp_page_shared(struct vm_fault *vmf, struct page *old_page) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; @@ -2321,7 +2319,7 @@ static int wp_page_shared(struct vm_fault *vmf, pte_t orig_pte, */ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (!pte_same(*vmf->pte, orig_pte)) { + if (!pte_same(*vmf->pte, vmf->orig_pte)) { unlock_page(old_page); pte_unmap_unlock(vmf->pte, vmf->ptl); put_page(old_page); @@ -2330,7 +2328,7 @@ static int wp_page_shared(struct vm_fault *vmf, pte_t orig_pte, page_mkwrite = 1; } - return wp_page_reuse(vmf, orig_pte, old_page, page_mkwrite, 1); + return wp_page_reuse(vmf, old_page, page_mkwrite, 1); } /* @@ -2351,13 +2349,13 @@ static int wp_page_shared(struct vm_fault *vmf, pte_t orig_pte, * but allow concurrent faults), with pte both mapped and locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ -static int do_wp_page(struct vm_fault *vmf, pte_t orig_pte) +static int do_wp_page(struct vm_fault *vmf) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; struct page *old_page; - old_page = vm_normal_page(vma, vmf->address, orig_pte); + old_page = vm_normal_page(vma, vmf->address, vmf->orig_pte); if (!old_page) { /* * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a @@ -2368,10 +2366,10 @@ static int do_wp_page(struct vm_fault *vmf, pte_t orig_pte) */ if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) - return wp_pfn_shared(vmf, orig_pte); + return wp_pfn_shared(vmf); pte_unmap_unlock(vmf->pte, vmf->ptl); - return wp_page_copy(vmf, orig_pte, old_page); + return wp_page_copy(vmf, old_page); } /* @@ -2386,7 +2384,7 @@ static int do_wp_page(struct vm_fault *vmf, pte_t orig_pte) lock_page(old_page); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (!pte_same(*vmf->pte, orig_pte)) { + if (!pte_same(*vmf->pte, vmf->orig_pte)) { unlock_page(old_page); pte_unmap_unlock(vmf->pte, vmf->ptl); put_page(old_page); @@ -2406,12 +2404,12 @@ static int do_wp_page(struct vm_fault *vmf, pte_t orig_pte) page_move_anon_rmap(old_page, vma); } unlock_page(old_page); - return wp_page_reuse(vmf, orig_pte, old_page, 0, 0); + return wp_page_reuse(vmf, old_page, 0, 0); } unlock_page(old_page); } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { - return wp_page_shared(vmf, orig_pte, old_page); + return wp_page_shared(vmf, old_page); } /* @@ -2420,7 +2418,7 @@ static int do_wp_page(struct vm_fault *vmf, pte_t orig_pte) get_page(old_page); pte_unmap_unlock(vmf->pte, vmf->ptl); - return wp_page_copy(vmf, orig_pte, old_page); + return wp_page_copy(vmf, old_page); } static void unmap_mapping_range_vma(struct vm_area_struct *vma, @@ -2508,7 +2506,7 @@ EXPORT_SYMBOL(unmap_mapping_range); * We return with the mmap_sem locked or unlocked in the same cases * as does filemap_fault(). */ -int do_swap_page(struct vm_fault *vmf, pte_t orig_pte) +int do_swap_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct page *page, *swapcache; @@ -2519,10 +2517,10 @@ int do_swap_page(struct vm_fault *vmf, pte_t orig_pte) int exclusive = 0; int ret = 0; - if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, orig_pte)) + if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) goto out; - entry = pte_to_swp_entry(orig_pte); + entry = pte_to_swp_entry(vmf->orig_pte); if (unlikely(non_swap_entry(entry))) { if (is_migration_entry(entry)) { migration_entry_wait(vma->vm_mm, vmf->pmd, @@ -2530,7 +2528,7 @@ int do_swap_page(struct vm_fault *vmf, pte_t orig_pte) } else if (is_hwpoison_entry(entry)) { ret = VM_FAULT_HWPOISON; } else { - print_bad_pte(vma, vmf->address, orig_pte, NULL); + print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); ret = VM_FAULT_SIGBUS; } goto out; @@ -2547,7 +2545,7 @@ int do_swap_page(struct vm_fault *vmf, pte_t orig_pte) */ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (likely(pte_same(*vmf->pte, orig_pte))) + if (likely(pte_same(*vmf->pte, vmf->orig_pte))) ret = VM_FAULT_OOM; delayacct_clear_flag(DELAYACCT_PF_SWAPIN); goto unlock; @@ -2604,7 +2602,7 @@ int do_swap_page(struct vm_fault *vmf, pte_t orig_pte) */ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (unlikely(!pte_same(*vmf->pte, orig_pte))) + if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) goto out_nomap; if (unlikely(!PageUptodate(page))) { @@ -2632,9 +2630,10 @@ int do_swap_page(struct vm_fault *vmf, pte_t orig_pte) exclusive = RMAP_EXCLUSIVE; } flush_icache_page(vma, page); - if (pte_swp_soft_dirty(orig_pte)) + if (pte_swp_soft_dirty(vmf->orig_pte)) pte = pte_mksoft_dirty(pte); set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); + vmf->orig_pte = pte; if (page == swapcache) { do_page_add_anon_rmap(page, vma, vmf->address, exclusive); mem_cgroup_commit_charge(page, memcg, true, false); @@ -2664,7 +2663,7 @@ int do_swap_page(struct vm_fault *vmf, pte_t orig_pte) } if (vmf->flags & FAULT_FLAG_WRITE) { - ret |= do_wp_page(vmf, pte); + ret |= do_wp_page(vmf); if (ret & VM_FAULT_ERROR) ret &= VM_FAULT_ERROR; goto out; @@ -3363,7 +3362,7 @@ static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, return mpol_misplaced(page, vma, addr); } -static int do_numa_page(struct vm_fault *vmf, pte_t pte) +static int do_numa_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct page *page = NULL; @@ -3371,6 +3370,7 @@ static int do_numa_page(struct vm_fault *vmf, pte_t pte) int last_cpupid; int target_nid; bool migrated = false; + pte_t pte = vmf->orig_pte; bool was_writable = pte_write(pte); int flags = 0; @@ -3521,8 +3521,7 @@ static int handle_pte_fault(struct vm_fault *vmf) * So now it's safe to run pte_offset_map(). */ vmf->pte = pte_offset_map(vmf->pmd, vmf->address); - - entry = *vmf->pte; + vmf->orig_pte = *vmf->pte; /* * some architectures can have larger ptes than wordsize, @@ -3533,7 +3532,7 @@ static int handle_pte_fault(struct vm_fault *vmf) * ptl lock held. So here a barrier will do. */ barrier(); - if (pte_none(entry)) { + if (pte_none(vmf->orig_pte)) { pte_unmap(vmf->pte); vmf->pte = NULL; } @@ -3546,19 +3545,20 @@ static int handle_pte_fault(struct vm_fault *vmf) return do_fault(vmf); } - if (!pte_present(entry)) - return do_swap_page(vmf, entry); + if (!pte_present(vmf->orig_pte)) + return do_swap_page(vmf); - if (pte_protnone(entry) && vma_is_accessible(vmf->vma)) - return do_numa_page(vmf, entry); + if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) + return do_numa_page(vmf); vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); spin_lock(vmf->ptl); + entry = vmf->orig_pte; if (unlikely(!pte_same(*vmf->pte, entry))) goto unlock; if (vmf->flags & FAULT_FLAG_WRITE) { if (!pte_write(entry)) - return do_wp_page(vmf, entry); + return do_wp_page(vmf); entry = pte_mkdirty(entry); } entry = pte_mkyoung(entry); -- cgit v1.2.3 From 3917048d4572b9cabf6f8f5ad395eb693717367c Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:18 -0800 Subject: mm: allow full handling of COW faults in ->fault handlers Patch series "dax: Clear dirty bits after flushing caches", v5. Patchset to clear dirty bits from radix tree of DAX inodes when caches for corresponding pfns have been flushed. In principle, these patches enable handlers to easily update PTEs and do other work necessary to finish the fault without duplicating the functionality present in the generic code. I'd like to thank Kirill and Ross for reviews of the series! This patch (of 20): To allow full handling of COW faults add memcg field to struct vm_fault and a return value of ->fault() handler meaning that COW fault is fully handled and memcg charge must not be canceled. This will allow us to remove knowledge about special DAX locking from the generic fault code. Link: http://lkml.kernel.org/r/1479460644-25076-9-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Reviewed-by: Ross Zwisler Acked-by: Kirill A. Shutemov Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 4 +++- mm/memory.c | 14 +++++++------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 39c17a2efcea..6e25f4916d6f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -301,7 +301,8 @@ struct vm_fault { * the 'address' */ pte_t orig_pte; /* Value of PTE at the time of fault */ - struct page *cow_page; /* Handler may choose to COW */ + struct page *cow_page; /* Page handler may use for COW fault */ + struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */ struct page *page; /* ->fault handlers should return a * page here, unless VM_FAULT_NOPAGE * is set (which is also implied by @@ -1103,6 +1104,7 @@ static inline void clear_page_pfmemalloc(struct page *page) #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ #define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ #define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */ +#define VM_FAULT_DONE_COW 0x2000 /* ->fault has fully handled COW */ #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ diff --git a/mm/memory.c b/mm/memory.c index cf74f7ca911b..02504cd4ca0e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2844,9 +2844,8 @@ static int __do_fault(struct vm_fault *vmf) int ret; ret = vma->vm_ops->fault(vma, vmf); - if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) - return ret; - if (ret & VM_FAULT_DAX_LOCKED) + if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | + VM_FAULT_DAX_LOCKED | VM_FAULT_DONE_COW))) return ret; if (unlikely(PageHWPoison(vmf->page))) { @@ -3226,7 +3225,6 @@ static int do_read_fault(struct vm_fault *vmf) static int do_cow_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - struct mem_cgroup *memcg; int ret; if (unlikely(anon_vma_prepare(vma))) @@ -3237,7 +3235,7 @@ static int do_cow_fault(struct vm_fault *vmf) return VM_FAULT_OOM; if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL, - &memcg, false)) { + &vmf->memcg, false)) { put_page(vmf->cow_page); return VM_FAULT_OOM; } @@ -3245,12 +3243,14 @@ static int do_cow_fault(struct vm_fault *vmf) ret = __do_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; + if (ret & VM_FAULT_DONE_COW) + return ret; if (!(ret & VM_FAULT_DAX_LOCKED)) copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); __SetPageUptodate(vmf->cow_page); - ret |= alloc_set_pte(vmf, memcg, vmf->cow_page); + ret |= alloc_set_pte(vmf, vmf->memcg, vmf->cow_page); if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); if (!(ret & VM_FAULT_DAX_LOCKED)) { @@ -3263,7 +3263,7 @@ static int do_cow_fault(struct vm_fault *vmf) goto uncharge_out; return ret; uncharge_out: - mem_cgroup_cancel_charge(vmf->cow_page, memcg, false); + mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false); put_page(vmf->cow_page); return ret; } -- cgit v1.2.3 From 9118c0cbd44262d0015568266f314e645ed6b9ce Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:21 -0800 Subject: mm: factor out functionality to finish page faults Introduce finish_fault() as a helper function for finishing page faults. It is rather thin wrapper around alloc_set_pte() but since we'd want to call this from DAX code or filesystems, it is still useful to avoid some boilerplate code. Link: http://lkml.kernel.org/r/1479460644-25076-10-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Reviewed-by: Ross Zwisler Acked-by: Kirill A. Shutemov Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 + mm/memory.c | 44 +++++++++++++++++++++++++++++++++++--------- 2 files changed, 36 insertions(+), 9 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 6e25f4916d6f..60a230e6ece7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -620,6 +620,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, struct page *page); +int finish_fault(struct vm_fault *vmf); #endif /* diff --git a/mm/memory.c b/mm/memory.c index 02504cd4ca0e..22f7f6e38515 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3074,6 +3074,38 @@ fault_handled: return ret; } + +/** + * finish_fault - finish page fault once we have prepared the page to fault + * + * @vmf: structure describing the fault + * + * This function handles all that is needed to finish a page fault once the + * page to fault in is prepared. It handles locking of PTEs, inserts PTE for + * given page, adds reverse page mapping, handles memcg charges and LRU + * addition. The function returns 0 on success, VM_FAULT_ code in case of + * error. + * + * The function expects the page to be locked and on success it consumes a + * reference of a page being mapped (for the PTE which maps it). + */ +int finish_fault(struct vm_fault *vmf) +{ + struct page *page; + int ret; + + /* Did we COW the page? */ + if ((vmf->flags & FAULT_FLAG_WRITE) && + !(vmf->vma->vm_flags & VM_SHARED)) + page = vmf->cow_page; + else + page = vmf->page; + ret = alloc_set_pte(vmf, vmf->memcg, page); + if (vmf->pte) + pte_unmap_unlock(vmf->pte, vmf->ptl); + return ret; +} + static unsigned long fault_around_bytes __read_mostly = rounddown_pow_of_two(65536); @@ -3213,9 +3245,7 @@ static int do_read_fault(struct vm_fault *vmf) if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; - ret |= alloc_set_pte(vmf, NULL, vmf->page); - if (vmf->pte) - pte_unmap_unlock(vmf->pte, vmf->ptl); + ret |= finish_fault(vmf); unlock_page(vmf->page); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) put_page(vmf->page); @@ -3250,9 +3280,7 @@ static int do_cow_fault(struct vm_fault *vmf) copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); __SetPageUptodate(vmf->cow_page); - ret |= alloc_set_pte(vmf, vmf->memcg, vmf->cow_page); - if (vmf->pte) - pte_unmap_unlock(vmf->pte, vmf->ptl); + ret |= finish_fault(vmf); if (!(ret & VM_FAULT_DAX_LOCKED)) { unlock_page(vmf->page); put_page(vmf->page); @@ -3293,9 +3321,7 @@ static int do_shared_fault(struct vm_fault *vmf) } } - ret |= alloc_set_pte(vmf, NULL, vmf->page); - if (vmf->pte) - pte_unmap_unlock(vmf->pte, vmf->ptl); + ret |= finish_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) { unlock_page(vmf->page); -- cgit v1.2.3 From b1aa812b21084285e9f6098639be9cd5bf9e05d7 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:24 -0800 Subject: mm: move handling of COW faults into DAX code Move final handling of COW faults from generic code into DAX fault handler. That way generic code doesn't have to be aware of peculiarities of DAX locking so remove that knowledge and make locking functions private to fs/dax.c. Link: http://lkml.kernel.org/r/1479460644-25076-11-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Acked-by: Kirill A. Shutemov Reviewed-by: Ross Zwisler Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dax.c | 61 +++++++++++++++++++++++++---------------------------- include/linux/dax.h | 7 ------ include/linux/mm.h | 9 +------- mm/memory.c | 13 ++++-------- 4 files changed, 34 insertions(+), 56 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index f1dfae64b11a..e83aa4077df4 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -240,6 +240,23 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping, } } +static void dax_unlock_mapping_entry(struct address_space *mapping, + pgoff_t index) +{ + void *entry, **slot; + + spin_lock_irq(&mapping->tree_lock); + entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot); + if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || + !slot_locked(mapping, slot))) { + spin_unlock_irq(&mapping->tree_lock); + return; + } + unlock_slot(mapping, slot); + spin_unlock_irq(&mapping->tree_lock); + dax_wake_mapping_entry_waiter(mapping, index, entry, false); +} + static void put_locked_mapping_entry(struct address_space *mapping, pgoff_t index, void *entry) { @@ -433,22 +450,6 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping, __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); } -void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index) -{ - void *entry, **slot; - - spin_lock_irq(&mapping->tree_lock); - entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot); - if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || - !slot_locked(mapping, slot))) { - spin_unlock_irq(&mapping->tree_lock); - return; - } - unlock_slot(mapping, slot); - spin_unlock_irq(&mapping->tree_lock); - dax_wake_mapping_entry_waiter(mapping, index, entry, false); -} - /* * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree * entry to get unlocked before deleting it. @@ -500,10 +501,8 @@ static int dax_load_hole(struct address_space *mapping, void *entry, /* This will replace locked radix tree entry with a hole page */ page = find_or_create_page(mapping, vmf->pgoff, vmf->gfp_mask | __GFP_ZERO); - if (!page) { - put_locked_mapping_entry(mapping, vmf->pgoff, entry); + if (!page) return VM_FAULT_OOM; - } vmf->page = page; return VM_FAULT_LOCKED; } @@ -954,7 +953,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, struct iomap iomap = { 0 }; unsigned flags = IOMAP_FAULT; int error, major = 0; - int locked_status = 0; + int vmf_ret = 0; void *entry; /* @@ -1007,13 +1006,11 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, if (error) goto finish_iomap; - if (!radix_tree_exceptional_entry(entry)) { - vmf->page = entry; - locked_status = VM_FAULT_LOCKED; - } else { - vmf->entry = entry; - locked_status = VM_FAULT_DAX_LOCKED; - } + + __SetPageUptodate(vmf->cow_page); + vmf_ret = finish_fault(vmf); + if (!vmf_ret) + vmf_ret = VM_FAULT_DONE_COW; goto finish_iomap; } @@ -1030,7 +1027,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, case IOMAP_UNWRITTEN: case IOMAP_HOLE: if (!(vmf->flags & FAULT_FLAG_WRITE)) { - locked_status = dax_load_hole(mapping, entry, vmf); + vmf_ret = dax_load_hole(mapping, entry, vmf); break; } /*FALLTHRU*/ @@ -1042,7 +1039,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, finish_iomap: if (ops->iomap_end) { - if (error) { + if (error || (vmf_ret & VM_FAULT_ERROR)) { /* keep previous error */ ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags, &iomap); @@ -1052,7 +1049,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, } } unlock_entry: - if (!locked_status || error) + if (vmf_ret != VM_FAULT_LOCKED || error) put_locked_mapping_entry(mapping, vmf->pgoff, entry); out: if (error == -ENOMEM) @@ -1060,9 +1057,9 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, /* -EBUSY is fine, somebody else faulted on the same PTE */ if (error < 0 && error != -EBUSY) return VM_FAULT_SIGBUS | major; - if (locked_status) { + if (vmf_ret) { WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */ - return locked_status; + return vmf_ret; } return VM_FAULT_NOPAGE | major; } diff --git a/include/linux/dax.h b/include/linux/dax.h index 0afade8bd3d7..f97bcfe79472 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -46,7 +46,6 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping, #ifdef CONFIG_FS_DAX struct page *read_dax_sector(struct block_device *bdev, sector_t n); -void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index); int __dax_zero_page_range(struct block_device *bdev, sector_t sector, unsigned int offset, unsigned int length); #else @@ -55,12 +54,6 @@ static inline struct page *read_dax_sector(struct block_device *bdev, { return ERR_PTR(-ENXIO); } -/* Shouldn't ever be called when dax is disabled. */ -static inline void dax_unlock_mapping_entry(struct address_space *mapping, - pgoff_t index) -{ - BUG(); -} static inline int __dax_zero_page_range(struct block_device *bdev, sector_t sector, unsigned int offset, unsigned int length) { diff --git a/include/linux/mm.h b/include/linux/mm.h index 60a230e6ece7..59a4da1742e5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -308,12 +308,6 @@ struct vm_fault { * is set (which is also implied by * VM_FAULT_ERROR). */ - void *entry; /* ->fault handler can alternatively - * return locked DAX entry. In that - * case handler should return - * VM_FAULT_DAX_LOCKED and fill in - * entry here. - */ /* These three entries are valid only while holding ptl lock */ pte_t *pte; /* Pointer to pte entry matching * the 'address'. NULL if the page @@ -1104,8 +1098,7 @@ static inline void clear_page_pfmemalloc(struct page *page) #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ #define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ -#define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */ -#define VM_FAULT_DONE_COW 0x2000 /* ->fault has fully handled COW */ +#define VM_FAULT_DONE_COW 0x1000 /* ->fault has fully handled COW */ #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ diff --git a/mm/memory.c b/mm/memory.c index 22f7f6e38515..ca3b95fa5fd1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2845,7 +2845,7 @@ static int __do_fault(struct vm_fault *vmf) ret = vma->vm_ops->fault(vma, vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | - VM_FAULT_DAX_LOCKED | VM_FAULT_DONE_COW))) + VM_FAULT_DONE_COW))) return ret; if (unlikely(PageHWPoison(vmf->page))) { @@ -3276,17 +3276,12 @@ static int do_cow_fault(struct vm_fault *vmf) if (ret & VM_FAULT_DONE_COW) return ret; - if (!(ret & VM_FAULT_DAX_LOCKED)) - copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); + copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); __SetPageUptodate(vmf->cow_page); ret |= finish_fault(vmf); - if (!(ret & VM_FAULT_DAX_LOCKED)) { - unlock_page(vmf->page); - put_page(vmf->page); - } else { - dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff); - } + unlock_page(vmf->page); + put_page(vmf->page); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; return ret; -- cgit v1.2.3 From 97ba0c2b4b0994044e404b7a96fc92a2e0424534 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:27 -0800 Subject: mm: factor out common parts of write fault handling Currently we duplicate handling of shared write faults in wp_page_reuse() and do_shared_fault(). Factor them out into a common function. Link: http://lkml.kernel.org/r/1479460644-25076-12-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Reviewed-by: Ross Zwisler Acked-by: Kirill A. Shutemov Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 78 +++++++++++++++++++++++++++++-------------------------------- 1 file changed, 37 insertions(+), 41 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index ca3b95fa5fd1..6fd827804bf5 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2062,6 +2062,41 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, return ret; } +/* + * Handle dirtying of a page in shared file mapping on a write fault. + * + * The function expects the page to be locked and unlocks it. + */ +static void fault_dirty_shared_page(struct vm_area_struct *vma, + struct page *page) +{ + struct address_space *mapping; + bool dirtied; + bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; + + dirtied = set_page_dirty(page); + VM_BUG_ON_PAGE(PageAnon(page), page); + /* + * Take a local copy of the address_space - page.mapping may be zeroed + * by truncate after unlock_page(). The address_space itself remains + * pinned by vma->vm_file's reference. We rely on unlock_page()'s + * release semantics to prevent the compiler from undoing this copying. + */ + mapping = page_rmapping(page); + unlock_page(page); + + if ((dirtied || page_mkwrite) && mapping) { + /* + * Some device drivers do not set page.mapping + * but still dirty their pages + */ + balance_dirty_pages_ratelimited(mapping); + } + + if (!page_mkwrite) + file_update_time(vma->vm_file); +} + /* * Handle write page faults for pages that can be reused in the current vma * @@ -2092,28 +2127,11 @@ static inline int wp_page_reuse(struct vm_fault *vmf, struct page *page, pte_unmap_unlock(vmf->pte, vmf->ptl); if (dirty_shared) { - struct address_space *mapping; - int dirtied; - if (!page_mkwrite) lock_page(page); - dirtied = set_page_dirty(page); - VM_BUG_ON_PAGE(PageAnon(page), page); - mapping = page->mapping; - unlock_page(page); + fault_dirty_shared_page(vma, page); put_page(page); - - if ((dirtied || page_mkwrite) && mapping) { - /* - * Some device drivers do not set page.mapping - * but still dirty their pages - */ - balance_dirty_pages_ratelimited(mapping); - } - - if (!page_mkwrite) - file_update_time(vma->vm_file); } return VM_FAULT_WRITE; @@ -3294,8 +3312,6 @@ uncharge_out: static int do_shared_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - struct address_space *mapping; - int dirtied = 0; int ret, tmp; ret = __do_fault(vmf); @@ -3324,27 +3340,7 @@ static int do_shared_fault(struct vm_fault *vmf) return ret; } - if (set_page_dirty(vmf->page)) - dirtied = 1; - /* - * Take a local copy of the address_space - page.mapping may be zeroed - * by truncate after unlock_page(). The address_space itself remains - * pinned by vma->vm_file's reference. We rely on unlock_page()'s - * release semantics to prevent the compiler from undoing this copying. - */ - mapping = page_rmapping(vmf->page); - unlock_page(vmf->page); - if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) { - /* - * Some device drivers do not set page.mapping but still - * dirty their pages - */ - balance_dirty_pages_ratelimited(mapping); - } - - if (!vma->vm_ops->page_mkwrite) - file_update_time(vma->vm_file); - + fault_dirty_shared_page(vma, vmf->page); return ret; } -- cgit v1.2.3 From 38b8cb7fbb892503fe9fcf748ebbed8c9fde7bf8 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:30 -0800 Subject: mm: pass vm_fault structure into do_page_mkwrite() We will need more information in the ->page_mkwrite() helper for DAX to be able to fully finish faults there. Pass vm_fault structure to do_page_mkwrite() and use it there so that information propagates properly from upper layers. Link: http://lkml.kernel.org/r/1479460644-25076-13-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Reviewed-by: Ross Zwisler Acked-by: Kirill A. Shutemov Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 6fd827804bf5..e8a527885e8b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2034,20 +2034,17 @@ static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) * * We do this without the lock held, so that it can sleep if it needs to. */ -static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, - unsigned long address) +static int do_page_mkwrite(struct vm_fault *vmf) { - struct vm_fault vmf; int ret; + struct page *page = vmf->page; + unsigned int old_flags = vmf->flags; - vmf.address = address & PAGE_MASK; - vmf.pgoff = page->index; - vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; - vmf.gfp_mask = __get_fault_gfp_mask(vma); - vmf.page = page; - vmf.cow_page = NULL; + vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; - ret = vma->vm_ops->page_mkwrite(vma, &vmf); + ret = vmf->vma->vm_ops->page_mkwrite(vmf->vma, vmf); + /* Restore original flags so that caller is not surprised */ + vmf->flags = old_flags; if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) return ret; if (unlikely(!(ret & VM_FAULT_LOCKED))) { @@ -2323,7 +2320,8 @@ static int wp_page_shared(struct vm_fault *vmf, struct page *old_page) int tmp; pte_unmap_unlock(vmf->pte, vmf->ptl); - tmp = do_page_mkwrite(vma, old_page, vmf->address); + vmf->page = old_page; + tmp = do_page_mkwrite(vmf); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { put_page(old_page); @@ -3324,7 +3322,7 @@ static int do_shared_fault(struct vm_fault *vmf) */ if (vma->vm_ops->page_mkwrite) { unlock_page(vmf->page); - tmp = do_page_mkwrite(vma, vmf->page, vmf->address); + tmp = do_page_mkwrite(vmf); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { put_page(vmf->page); -- cgit v1.2.3 From a41b70d6dfc28b9e1a17c2a9f3181c2b614bfd54 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:33 -0800 Subject: mm: use vmf->page during WP faults So far we set vmf->page during WP faults only when we needed to pass it to the ->page_mkwrite handler. Set it in all the cases now and use that instead of passing page pointer explicitly around. Link: http://lkml.kernel.org/r/1479460644-25076-14-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Reviewed-by: Ross Zwisler Acked-by: Kirill A. Shutemov Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 58 +++++++++++++++++++++++++++++----------------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index e8a527885e8b..ad452898e6c0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2102,11 +2102,12 @@ static void fault_dirty_shared_page(struct vm_area_struct *vma, * case, all we need to do here is to mark the page as writable and update * any related book-keeping. */ -static inline int wp_page_reuse(struct vm_fault *vmf, struct page *page, +static inline int wp_page_reuse(struct vm_fault *vmf, int page_mkwrite, int dirty_shared) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; + struct page *page = vmf->page; pte_t entry; /* * Clear the pages cpupid information as the existing @@ -2150,10 +2151,11 @@ static inline int wp_page_reuse(struct vm_fault *vmf, struct page *page, * held to the old page, as well as updating the rmap. * - In any case, unlock the PTL and drop the reference we took to the old page. */ -static int wp_page_copy(struct vm_fault *vmf, struct page *old_page) +static int wp_page_copy(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct mm_struct *mm = vma->vm_mm; + struct page *old_page = vmf->page; struct page *new_page = NULL; pte_t entry; int page_copied = 0; @@ -2305,26 +2307,25 @@ static int wp_pfn_shared(struct vm_fault *vmf) return 0; } } - return wp_page_reuse(vmf, NULL, 0, 0); + return wp_page_reuse(vmf, 0, 0); } -static int wp_page_shared(struct vm_fault *vmf, struct page *old_page) +static int wp_page_shared(struct vm_fault *vmf) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; int page_mkwrite = 0; - get_page(old_page); + get_page(vmf->page); if (vma->vm_ops && vma->vm_ops->page_mkwrite) { int tmp; pte_unmap_unlock(vmf->pte, vmf->ptl); - vmf->page = old_page; tmp = do_page_mkwrite(vmf); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { - put_page(old_page); + put_page(vmf->page); return tmp; } /* @@ -2336,15 +2337,15 @@ static int wp_page_shared(struct vm_fault *vmf, struct page *old_page) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (!pte_same(*vmf->pte, vmf->orig_pte)) { - unlock_page(old_page); + unlock_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); - put_page(old_page); + put_page(vmf->page); return 0; } page_mkwrite = 1; } - return wp_page_reuse(vmf, old_page, page_mkwrite, 1); + return wp_page_reuse(vmf, page_mkwrite, 1); } /* @@ -2369,10 +2370,9 @@ static int do_wp_page(struct vm_fault *vmf) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; - struct page *old_page; - old_page = vm_normal_page(vma, vmf->address, vmf->orig_pte); - if (!old_page) { + vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); + if (!vmf->page) { /* * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a * VM_PFNMAP VMA. @@ -2385,30 +2385,30 @@ static int do_wp_page(struct vm_fault *vmf) return wp_pfn_shared(vmf); pte_unmap_unlock(vmf->pte, vmf->ptl); - return wp_page_copy(vmf, old_page); + return wp_page_copy(vmf); } /* * Take out anonymous pages first, anonymous shared vmas are * not dirty accountable. */ - if (PageAnon(old_page) && !PageKsm(old_page)) { + if (PageAnon(vmf->page) && !PageKsm(vmf->page)) { int total_mapcount; - if (!trylock_page(old_page)) { - get_page(old_page); + if (!trylock_page(vmf->page)) { + get_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); - lock_page(old_page); + lock_page(vmf->page); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (!pte_same(*vmf->pte, vmf->orig_pte)) { - unlock_page(old_page); + unlock_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); - put_page(old_page); + put_page(vmf->page); return 0; } - put_page(old_page); + put_page(vmf->page); } - if (reuse_swap_page(old_page, &total_mapcount)) { + if (reuse_swap_page(vmf->page, &total_mapcount)) { if (total_mapcount == 1) { /* * The page is all ours. Move it to @@ -2417,24 +2417,24 @@ static int do_wp_page(struct vm_fault *vmf) * Protected against the rmap code by * the page lock. */ - page_move_anon_rmap(old_page, vma); + page_move_anon_rmap(vmf->page, vma); } - unlock_page(old_page); - return wp_page_reuse(vmf, old_page, 0, 0); + unlock_page(vmf->page); + return wp_page_reuse(vmf, 0, 0); } - unlock_page(old_page); + unlock_page(vmf->page); } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { - return wp_page_shared(vmf, old_page); + return wp_page_shared(vmf); } /* * Ok, we need to copy. Oh, well.. */ - get_page(old_page); + get_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); - return wp_page_copy(vmf, old_page); + return wp_page_copy(vmf); } static void unmap_mapping_range_vma(struct vm_area_struct *vma, -- cgit v1.2.3 From 997dd98dd68beb2aea74cac53e7fd440cc8dba68 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:36 -0800 Subject: mm: move part of wp_page_reuse() into the single call site wp_page_reuse() handles write shared faults which is needed only in wp_page_shared(). Move the handling only into that location to make wp_page_reuse() simpler and avoid a strange situation when we sometimes pass in locked page, sometimes unlocked etc. Link: http://lkml.kernel.org/r/1479460644-25076-15-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Reviewed-by: Ross Zwisler Acked-by: Kirill A. Shutemov Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index ad452898e6c0..82e7689e3059 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2102,8 +2102,7 @@ static void fault_dirty_shared_page(struct vm_area_struct *vma, * case, all we need to do here is to mark the page as writable and update * any related book-keeping. */ -static inline int wp_page_reuse(struct vm_fault *vmf, - int page_mkwrite, int dirty_shared) +static inline void wp_page_reuse(struct vm_fault *vmf) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; @@ -2123,16 +2122,6 @@ static inline int wp_page_reuse(struct vm_fault *vmf, if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) update_mmu_cache(vma, vmf->address, vmf->pte); pte_unmap_unlock(vmf->pte, vmf->ptl); - - if (dirty_shared) { - if (!page_mkwrite) - lock_page(page); - - fault_dirty_shared_page(vma, page); - put_page(page); - } - - return VM_FAULT_WRITE; } /* @@ -2307,7 +2296,8 @@ static int wp_pfn_shared(struct vm_fault *vmf) return 0; } } - return wp_page_reuse(vmf, 0, 0); + wp_page_reuse(vmf); + return VM_FAULT_WRITE; } static int wp_page_shared(struct vm_fault *vmf) @@ -2345,7 +2335,13 @@ static int wp_page_shared(struct vm_fault *vmf) page_mkwrite = 1; } - return wp_page_reuse(vmf, page_mkwrite, 1); + wp_page_reuse(vmf); + if (!page_mkwrite) + lock_page(vmf->page); + fault_dirty_shared_page(vma, vmf->page); + put_page(vmf->page); + + return VM_FAULT_WRITE; } /* @@ -2420,7 +2416,8 @@ static int do_wp_page(struct vm_fault *vmf) page_move_anon_rmap(vmf->page, vma); } unlock_page(vmf->page); - return wp_page_reuse(vmf, 0, 0); + wp_page_reuse(vmf); + return VM_FAULT_WRITE; } unlock_page(vmf->page); } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == -- cgit v1.2.3 From 66a6197c118540d454913eef24d68d7491ab5d5f Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:39 -0800 Subject: mm: provide helper for finishing mkwrite faults Provide a helper function for finishing write faults due to PTE being read-only. The helper will be used by DAX to avoid the need of complicating generic MM code with DAX locking specifics. Link: http://lkml.kernel.org/r/1479460644-25076-16-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Reviewed-by: Ross Zwisler Acked-by: Kirill A. Shutemov Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 + mm/memory.c | 67 ++++++++++++++++++++++++++++++++---------------------- 2 files changed, 41 insertions(+), 27 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 59a4da1742e5..cec967e93f95 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -615,6 +615,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, struct page *page); int finish_fault(struct vm_fault *vmf); +int finish_mkwrite_fault(struct vm_fault *vmf); #endif /* diff --git a/mm/memory.c b/mm/memory.c index 82e7689e3059..bbc25da48a18 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2269,6 +2269,38 @@ oom: return VM_FAULT_OOM; } +/** + * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE + * writeable once the page is prepared + * + * @vmf: structure describing the fault + * + * This function handles all that is needed to finish a write page fault in a + * shared mapping due to PTE being read-only once the mapped page is prepared. + * It handles locking of PTE and modifying it. The function returns + * VM_FAULT_WRITE on success, 0 when PTE got changed before we acquired PTE + * lock. + * + * The function expects the page to be locked or other protection against + * concurrent faults / writeback (such as DAX radix tree locks). + */ +int finish_mkwrite_fault(struct vm_fault *vmf) +{ + WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); + vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, + &vmf->ptl); + /* + * We might have raced with another page fault while we released the + * pte_offset_map_lock. + */ + if (!pte_same(*vmf->pte, vmf->orig_pte)) { + pte_unmap_unlock(vmf->pte, vmf->ptl); + return 0; + } + wp_page_reuse(vmf); + return VM_FAULT_WRITE; +} + /* * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED * mapping @@ -2285,16 +2317,7 @@ static int wp_pfn_shared(struct vm_fault *vmf) ret = vma->vm_ops->pfn_mkwrite(vma, vmf); if (ret & VM_FAULT_ERROR) return ret; - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); - /* - * We might have raced with another page fault while we - * released the pte_offset_map_lock. - */ - if (!pte_same(*vmf->pte, vmf->orig_pte)) { - pte_unmap_unlock(vmf->pte, vmf->ptl); - return 0; - } + return finish_mkwrite_fault(vmf); } wp_page_reuse(vmf); return VM_FAULT_WRITE; @@ -2304,7 +2327,6 @@ static int wp_page_shared(struct vm_fault *vmf) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; - int page_mkwrite = 0; get_page(vmf->page); @@ -2318,26 +2340,17 @@ static int wp_page_shared(struct vm_fault *vmf) put_page(vmf->page); return tmp; } - /* - * Since we dropped the lock we need to revalidate - * the PTE as someone else may have changed it. If - * they did, we just return, as we can count on the - * MMU to tell us if they didn't also make it writable. - */ - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); - if (!pte_same(*vmf->pte, vmf->orig_pte)) { + tmp = finish_mkwrite_fault(vmf); + if (unlikely(!tmp || (tmp & + (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { unlock_page(vmf->page); - pte_unmap_unlock(vmf->pte, vmf->ptl); put_page(vmf->page); - return 0; + return tmp; } - page_mkwrite = 1; - } - - wp_page_reuse(vmf); - if (!page_mkwrite) + } else { + wp_page_reuse(vmf); lock_page(vmf->page); + } fault_dirty_shared_page(vma, vmf->page); put_page(vmf->page); -- cgit v1.2.3 From a19e25536ed3a20845f642ce531e10c27fb2add5 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:42 -0800 Subject: mm: change return values of finish_mkwrite_fault() Currently finish_mkwrite_fault() returns 0 when PTE got changed before we acquired PTE lock and VM_FAULT_WRITE when we succeeded in modifying the PTE. This is somewhat confusing since 0 generally means success, it is also inconsistent with finish_fault() which returns 0 on success. Change finish_mkwrite_fault() to return 0 on success and VM_FAULT_NOPAGE when PTE changed. Practically, there should be no behavioral difference since we bail out from the fault the same way regardless whether we return 0, VM_FAULT_NOPAGE, or VM_FAULT_WRITE. Also note that VM_FAULT_WRITE has no effect for shared mappings since the only two places that check it - KSM and GUP - care about private mappings only. Generally the meaning of VM_FAULT_WRITE for shared mappings is not well defined and we should probably clean that up. Link: http://lkml.kernel.org/r/1479460644-25076-17-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Acked-by: Kirill A. Shutemov Cc: Ross Zwisler Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index bbc25da48a18..8b7f0656a921 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2295,10 +2295,10 @@ int finish_mkwrite_fault(struct vm_fault *vmf) */ if (!pte_same(*vmf->pte, vmf->orig_pte)) { pte_unmap_unlock(vmf->pte, vmf->ptl); - return 0; + return VM_FAULT_NOPAGE; } wp_page_reuse(vmf); - return VM_FAULT_WRITE; + return 0; } /* @@ -2341,8 +2341,7 @@ static int wp_page_shared(struct vm_fault *vmf) return tmp; } tmp = finish_mkwrite_fault(vmf); - if (unlikely(!tmp || (tmp & - (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { + if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { unlock_page(vmf->page); put_page(vmf->page); return tmp; -- cgit v1.2.3 From cae1240257d9ba4b40eb240124c530de8ee349bc Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:45 -0800 Subject: mm: export follow_pte() DAX will need to implement its own version of page_check_address(). To avoid duplicating page table walking code, export follow_pte() which does what we need. Link: http://lkml.kernel.org/r/1479460644-25076-18-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Reviewed-by: Ross Zwisler Cc: Kirill A. Shutemov Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 ++ mm/memory.c | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index cec967e93f95..63926492c06a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1210,6 +1210,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); +int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, + spinlock_t **ptlp); int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn); int follow_phys(struct vm_area_struct *vma, unsigned long address, diff --git a/mm/memory.c b/mm/memory.c index 8b7f0656a921..edd899d0decb 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3817,8 +3817,8 @@ out: return -EINVAL; } -static inline int follow_pte(struct mm_struct *mm, unsigned long address, - pte_t **ptepp, spinlock_t **ptlp) +int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, + spinlock_t **ptlp) { int res; -- cgit v1.2.3 From a6abc2c0e77b16480f4d2c1eb7925e5287ae1526 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:47 -0800 Subject: dax: make cache flushing protected by entry lock Currently, flushing of caches for DAX mappings was ignoring entry lock. So far this was ok (modulo a bug that a difference in entry lock could cause cache flushing to be mistakenly skipped) but in the following patches we will write-protect PTEs on cache flushing and clear dirty tags. For that we will need more exclusion. So do cache flushing under an entry lock. This allows us to remove one lock-unlock pair of mapping->tree_lock as a bonus. Link: http://lkml.kernel.org/r/1479460644-25076-19-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Reviewed-by: Ross Zwisler Cc: Kirill A. Shutemov Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dax.c | 61 +++++++++++++++++++++++++++++++++++++++---------------------- 1 file changed, 39 insertions(+), 22 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index e83aa4077df4..df5c0daba698 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -618,32 +618,50 @@ static int dax_writeback_one(struct block_device *bdev, struct address_space *mapping, pgoff_t index, void *entry) { struct radix_tree_root *page_tree = &mapping->page_tree; - struct radix_tree_node *node; struct blk_dax_ctl dax; - void **slot; + void *entry2, **slot; int ret = 0; - spin_lock_irq(&mapping->tree_lock); /* - * Regular page slots are stabilized by the page lock even - * without the tree itself locked. These unlocked entries - * need verification under the tree lock. + * A page got tagged dirty in DAX mapping? Something is seriously + * wrong. */ - if (!__radix_tree_lookup(page_tree, index, &node, &slot)) - goto unlock; - if (*slot != entry) - goto unlock; - - /* another fsync thread may have already written back this entry */ - if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)) - goto unlock; + if (WARN_ON(!radix_tree_exceptional_entry(entry))) + return -EIO; + spin_lock_irq(&mapping->tree_lock); + entry2 = get_unlocked_mapping_entry(mapping, index, &slot); + /* Entry got punched out / reallocated? */ + if (!entry2 || !radix_tree_exceptional_entry(entry2)) + goto put_unlocked; + /* + * Entry got reallocated elsewhere? No need to writeback. We have to + * compare sectors as we must not bail out due to difference in lockbit + * or entry type. + */ + if (dax_radix_sector(entry2) != dax_radix_sector(entry)) + goto put_unlocked; if (WARN_ON_ONCE(dax_is_empty_entry(entry) || dax_is_zero_entry(entry))) { ret = -EIO; - goto unlock; + goto put_unlocked; } + /* Another fsync thread may have already written back this entry */ + if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)) + goto put_unlocked; + /* Lock the entry to serialize with page faults */ + entry = lock_slot(mapping, slot); + /* + * We can clear the tag now but we have to be careful so that concurrent + * dax_writeback_one() calls for the same index cannot finish before we + * actually flush the caches. This is achieved as the calls will look + * at the entry only under tree_lock and once they do that they will + * see the entry locked and wait for it to unlock. + */ + radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE); + spin_unlock_irq(&mapping->tree_lock); + /* * Even if dax_writeback_mapping_range() was given a wbc->range_start * in the middle of a PMD, the 'index' we are given will be aligned to @@ -653,15 +671,16 @@ static int dax_writeback_one(struct block_device *bdev, */ dax.sector = dax_radix_sector(entry); dax.size = PAGE_SIZE << dax_radix_order(entry); - spin_unlock_irq(&mapping->tree_lock); /* * We cannot hold tree_lock while calling dax_map_atomic() because it * eventually calls cond_resched(). */ ret = dax_map_atomic(bdev, &dax); - if (ret < 0) + if (ret < 0) { + put_locked_mapping_entry(mapping, index, entry); return ret; + } if (WARN_ON_ONCE(ret < dax.size)) { ret = -EIO; @@ -669,15 +688,13 @@ static int dax_writeback_one(struct block_device *bdev, } wb_cache_pmem(dax.addr, dax.size); - - spin_lock_irq(&mapping->tree_lock); - radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE); - spin_unlock_irq(&mapping->tree_lock); unmap: dax_unmap_atomic(bdev, &dax); + put_locked_mapping_entry(mapping, index, entry); return ret; - unlock: + put_unlocked: + put_unlocked_mapping_entry(mapping, index, entry2); spin_unlock_irq(&mapping->tree_lock); return ret; } -- cgit v1.2.3 From 2f89dc12a25ddf995b9acd7b6543fe892e3473d6 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:50 -0800 Subject: dax: protect PTE modification on WP fault by radix tree entry lock Currently PTE gets updated in wp_pfn_shared() after dax_pfn_mkwrite() has released corresponding radix tree entry lock. When we want to writeprotect PTE on cache flush, we need PTE modification to happen under radix tree entry lock to ensure consistent updates of PTE and radix tree (standard faults use page lock to ensure this consistency). So move update of PTE bit into dax_pfn_mkwrite(). Link: http://lkml.kernel.org/r/1479460644-25076-20-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Reviewed-by: Ross Zwisler Cc: Kirill A. Shutemov Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dax.c | 22 ++++++++++++++++------ mm/memory.c | 2 +- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index df5c0daba698..cf7a20a5858b 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -783,17 +783,27 @@ int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct file *file = vma->vm_file; struct address_space *mapping = file->f_mapping; - void *entry; + void *entry, **slot; pgoff_t index = vmf->pgoff; spin_lock_irq(&mapping->tree_lock); - entry = get_unlocked_mapping_entry(mapping, index, NULL); - if (!entry || !radix_tree_exceptional_entry(entry)) - goto out; + entry = get_unlocked_mapping_entry(mapping, index, &slot); + if (!entry || !radix_tree_exceptional_entry(entry)) { + if (entry) + put_unlocked_mapping_entry(mapping, index, entry); + spin_unlock_irq(&mapping->tree_lock); + return VM_FAULT_NOPAGE; + } radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY); - put_unlocked_mapping_entry(mapping, index, entry); -out: + entry = lock_slot(mapping, slot); spin_unlock_irq(&mapping->tree_lock); + /* + * If we race with somebody updating the PTE and finish_mkwrite_fault() + * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry + * the fault in either case. + */ + finish_mkwrite_fault(vmf); + put_locked_mapping_entry(mapping, index, entry); return VM_FAULT_NOPAGE; } EXPORT_SYMBOL_GPL(dax_pfn_mkwrite); diff --git a/mm/memory.c b/mm/memory.c index edd899d0decb..57d0bd1bd2c4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2315,7 +2315,7 @@ static int wp_pfn_shared(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); vmf->flags |= FAULT_FLAG_MKWRITE; ret = vma->vm_ops->pfn_mkwrite(vma, vmf); - if (ret & VM_FAULT_ERROR) + if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) return ret; return finish_mkwrite_fault(vmf); } -- cgit v1.2.3 From 4b4bb46d00b386e1c972890dc5785a7966eaa9c0 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Dec 2016 15:07:53 -0800 Subject: dax: clear dirty entry tags on cache flush Currently we never clear dirty tags in DAX mappings and thus address ranges to flush accumulate. Now that we have locking of radix tree entries, we have all the locking necessary to reliably clear the radix tree dirty tag when flushing caches for corresponding address range. Similarly to page_mkclean() we also have to write-protect pages to get a page fault when the page is next written to so that we can mark the entry dirty again. Link: http://lkml.kernel.org/r/1479460644-25076-21-git-send-email-jack@suse.cz Signed-off-by: Jan Kara Reviewed-by: Ross Zwisler Cc: Kirill A. Shutemov Cc: Dan Williams Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dax.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/fs/dax.c b/fs/dax.c index cf7a20a5858b..a8732fbed381 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include "internal.h" @@ -614,6 +615,59 @@ static void *dax_insert_mapping_entry(struct address_space *mapping, return new_entry; } +static inline unsigned long +pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma) +{ + unsigned long address; + + address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); + VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); + return address; +} + +/* Walk all mappings of a given index of a file and writeprotect them */ +static void dax_mapping_entry_mkclean(struct address_space *mapping, + pgoff_t index, unsigned long pfn) +{ + struct vm_area_struct *vma; + pte_t *ptep; + pte_t pte; + spinlock_t *ptl; + bool changed; + + i_mmap_lock_read(mapping); + vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { + unsigned long address; + + cond_resched(); + + if (!(vma->vm_flags & VM_SHARED)) + continue; + + address = pgoff_address(index, vma); + changed = false; + if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) + continue; + if (pfn != pte_pfn(*ptep)) + goto unlock; + if (!pte_dirty(*ptep) && !pte_write(*ptep)) + goto unlock; + + flush_cache_page(vma, address, pfn); + pte = ptep_clear_flush(vma, address, ptep); + pte = pte_wrprotect(pte); + pte = pte_mkclean(pte); + set_pte_at(vma->vm_mm, address, ptep, pte); + changed = true; +unlock: + pte_unmap_unlock(ptep, ptl); + + if (changed) + mmu_notifier_invalidate_page(vma->vm_mm, address); + } + i_mmap_unlock_read(mapping); +} + static int dax_writeback_one(struct block_device *bdev, struct address_space *mapping, pgoff_t index, void *entry) { @@ -687,7 +741,17 @@ static int dax_writeback_one(struct block_device *bdev, goto unmap; } + dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(dax.pfn)); wb_cache_pmem(dax.addr, dax.size); + /* + * After we have flushed the cache, we can clear the dirty tag. There + * cannot be new dirty data in the pfn after the flush has completed as + * the pfn mappings are writeprotected and fault waits for mapping + * entry lock. + */ + spin_lock_irq(&mapping->tree_lock); + radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY); + spin_unlock_irq(&mapping->tree_lock); unmap: dax_unmap_atomic(bdev, &dax); put_locked_mapping_entry(mapping, index, entry); -- cgit v1.2.3 From ebb9a9aedb95f697673a96a26ff0322a13676381 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:07:56 -0800 Subject: tools: add WARN_ON_ONCE Patch series "Radix tree patches for 4.10", v3. Mostly these are improvements; the only bug fixes in here relate to multiorder entries (which are unused in the 4.9 tree). This patch (of 32): The radix tree uses its own buggy WARN_ON_ONCE. Replace it with the definition from asm-generic/bug.h Link: http://lkml.kernel.org/r/1480369871-5271-37-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/include/asm/bug.h | 11 +++++++++++ tools/testing/radix-tree/Makefile | 2 +- tools/testing/radix-tree/linux/bug.h | 2 +- tools/testing/radix-tree/linux/types.h | 2 -- 4 files changed, 13 insertions(+), 4 deletions(-) diff --git a/tools/include/asm/bug.h b/tools/include/asm/bug.h index 9e5f4846967f..beda1a884b50 100644 --- a/tools/include/asm/bug.h +++ b/tools/include/asm/bug.h @@ -12,6 +12,17 @@ unlikely(__ret_warn_on); \ }) +#define WARN_ON_ONCE(condition) ({ \ + static int __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + __warned = true; \ + WARN_ON(1); \ + } \ + unlikely(__ret_warn_once); \ +}) + #define WARN_ONCE(condition, format...) ({ \ static int __warned; \ int __ret_warn_once = !!(condition); \ diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile index f2e07f2fd4b4..3c338dc3b162 100644 --- a/tools/testing/radix-tree/Makefile +++ b/tools/testing/radix-tree/Makefile @@ -1,5 +1,5 @@ -CFLAGS += -I. -g -O2 -Wall -D_LGPL_SOURCE +CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE LDFLAGS += -lpthread -lurcu TARGETS = main OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \ diff --git a/tools/testing/radix-tree/linux/bug.h b/tools/testing/radix-tree/linux/bug.h index ccbe444977df..23b8ed52f8c8 100644 --- a/tools/testing/radix-tree/linux/bug.h +++ b/tools/testing/radix-tree/linux/bug.h @@ -1 +1 @@ -#define WARN_ON_ONCE(x) assert(x) +#include "asm/bug.h" diff --git a/tools/testing/radix-tree/linux/types.h b/tools/testing/radix-tree/linux/types.h index faa0b6ff9ca8..8491d89873bb 100644 --- a/tools/testing/radix-tree/linux/types.h +++ b/tools/testing/radix-tree/linux/types.h @@ -6,8 +6,6 @@ #define __rcu #define __read_mostly -#define BITS_PER_LONG (sizeof(long) * 8) - static inline void INIT_LIST_HEAD(struct list_head *list) { list->next = list; -- cgit v1.2.3 From 31023cd66468359790beb046b6808fe0444672a2 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:07:59 -0800 Subject: radix tree test suite: allow GFP_ATOMIC allocations to fail In order to test the preload code, it is necessary to fail GFP_ATOMIC allocations, which requires defining GFP_KERNEL and GFP_ATOMIC properly. Remove the obsolete __GFP_WAIT and copy the definitions of the __GFP flags which are used from the kernel include files. We also need the real definition of gfpflags_allow_blocking() to persuade the radix tree to actually use its preallocated nodes. Link: http://lkml.kernel.org/r/1480369871-5271-38-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/radix-tree/linux.c | 7 ++++++- tools/testing/radix-tree/linux/gfp.h | 22 +++++++++++++++++++--- tools/testing/radix-tree/linux/slab.h | 5 ----- 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c index 154823737b20..3cfb04e98e2f 100644 --- a/tools/testing/radix-tree/linux.c +++ b/tools/testing/radix-tree/linux.c @@ -33,7 +33,12 @@ mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) { - void *ret = malloc(cachep->size); + void *ret; + + if (flags & __GFP_NOWARN) + return NULL; + + ret = malloc(cachep->size); if (cachep->ctor) cachep->ctor(ret); uatomic_inc(&nr_allocated); diff --git a/tools/testing/radix-tree/linux/gfp.h b/tools/testing/radix-tree/linux/gfp.h index 5201b915f631..5b09b2ce6c33 100644 --- a/tools/testing/radix-tree/linux/gfp.h +++ b/tools/testing/radix-tree/linux/gfp.h @@ -3,8 +3,24 @@ #define __GFP_BITS_SHIFT 26 #define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) -#define __GFP_WAIT 1 -#define __GFP_ACCOUNT 0 -#define __GFP_NOWARN 0 + +#define __GFP_HIGH 0x20u +#define __GFP_IO 0x40u +#define __GFP_FS 0x80u +#define __GFP_NOWARN 0x200u +#define __GFP_ATOMIC 0x80000u +#define __GFP_ACCOUNT 0x100000u +#define __GFP_DIRECT_RECLAIM 0x400000u +#define __GFP_KSWAPD_RECLAIM 0x2000000u + +#define __GFP_RECLAIM (__GFP_DIRECT_RECLAIM|__GFP_KSWAPD_RECLAIM) + +#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) +#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) + +static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) +{ + return !!(gfp_flags & __GFP_DIRECT_RECLAIM); +} #endif diff --git a/tools/testing/radix-tree/linux/slab.h b/tools/testing/radix-tree/linux/slab.h index 6d5a34770fd4..452e2bf502e3 100644 --- a/tools/testing/radix-tree/linux/slab.h +++ b/tools/testing/radix-tree/linux/slab.h @@ -7,11 +7,6 @@ #define SLAB_PANIC 2 #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ -static inline int gfpflags_allow_blocking(gfp_t mask) -{ - return 1; -} - struct kmem_cache { int size; void (*ctor)(void *); -- cgit v1.2.3 From 847d357635ce4c63b8901ab81333586a0f115fa5 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:02 -0800 Subject: radix tree test suite: track preempt_count Rather than simply NOP out preempt_enable() and preempt_disable(), keep track of preempt_count and display it regularly in case either the test suite or the code under test is forgetting to balance the enables & disables. Only found a test-case that was forgetting to re-enable preemption, but it's a possibility worth checking. Link: http://lkml.kernel.org/r/1480369871-5271-39-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/radix-tree/linux.c | 1 + tools/testing/radix-tree/linux/preempt.h | 6 +++--- tools/testing/radix-tree/main.c | 30 ++++++++++++++++++++---------- 3 files changed, 24 insertions(+), 13 deletions(-) diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c index 3cfb04e98e2f..1f32a16a3848 100644 --- a/tools/testing/radix-tree/linux.c +++ b/tools/testing/radix-tree/linux.c @@ -9,6 +9,7 @@ #include int nr_allocated; +int preempt_count; void *mempool_alloc(mempool_t *pool, int gfp_mask) { diff --git a/tools/testing/radix-tree/linux/preempt.h b/tools/testing/radix-tree/linux/preempt.h index 6210672e3baa..65c04c226965 100644 --- a/tools/testing/radix-tree/linux/preempt.h +++ b/tools/testing/radix-tree/linux/preempt.h @@ -1,4 +1,4 @@ -/* */ +extern int preempt_count; -#define preempt_disable() do { } while (0) -#define preempt_enable() do { } while (0) +#define preempt_disable() uatomic_inc(&preempt_count) +#define preempt_enable() uatomic_dec(&preempt_count) diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c index daa9010693e8..64ffe67605d4 100644 --- a/tools/testing/radix-tree/main.c +++ b/tools/testing/radix-tree/main.c @@ -293,27 +293,36 @@ static void single_thread_tests(bool long_run) { int i; - printf("starting single_thread_tests: %d allocated\n", nr_allocated); + printf("starting single_thread_tests: %d allocated, preempt %d\n", + nr_allocated, preempt_count); multiorder_checks(); - printf("after multiorder_check: %d allocated\n", nr_allocated); + printf("after multiorder_check: %d allocated, preempt %d\n", + nr_allocated, preempt_count); locate_check(); - printf("after locate_check: %d allocated\n", nr_allocated); + printf("after locate_check: %d allocated, preempt %d\n", + nr_allocated, preempt_count); tag_check(); - printf("after tag_check: %d allocated\n", nr_allocated); + printf("after tag_check: %d allocated, preempt %d\n", + nr_allocated, preempt_count); gang_check(); - printf("after gang_check: %d allocated\n", nr_allocated); + printf("after gang_check: %d allocated, preempt %d\n", + nr_allocated, preempt_count); add_and_check(); - printf("after add_and_check: %d allocated\n", nr_allocated); + printf("after add_and_check: %d allocated, preempt %d\n", + nr_allocated, preempt_count); dynamic_height_check(); - printf("after dynamic_height_check: %d allocated\n", nr_allocated); + printf("after dynamic_height_check: %d allocated, preempt %d\n", + nr_allocated, preempt_count); big_gang_check(long_run); - printf("after big_gang_check: %d allocated\n", nr_allocated); + printf("after big_gang_check: %d allocated, preempt %d\n", + nr_allocated, preempt_count); for (i = 0; i < (long_run ? 2000 : 3); i++) { copy_tag_check(); printf("%d ", i); fflush(stdout); } - printf("after copy_tag_check: %d allocated\n", nr_allocated); + printf("after copy_tag_check: %d allocated, preempt %d\n", + nr_allocated, preempt_count); } int main(int argc, char **argv) @@ -336,7 +345,8 @@ int main(int argc, char **argv) single_thread_tests(long_run); sleep(1); - printf("after sleep(1): %d allocated\n", nr_allocated); + printf("after sleep(1): %d allocated, preempt %d\n", + nr_allocated, preempt_count); rcu_unregister_thread(); exit(0); -- cgit v1.2.3 From 6df5ee786786ddafdddc922344a0b789f5b25fa4 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:05 -0800 Subject: radix tree test suite: free preallocated nodes It can be a source of mild concern when the test suite shows that we're leaking nodes. While poring over the source code looking for leaks can lead to some fascinating bugs being discovered, sometimes the leak is simply that these nodes were preallocated and are sitting on the per-CPU list. Free them by calling the CPU dead callback. Link: http://lkml.kernel.org/r/1480369871-5271-40-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/radix-tree/main.c | 3 +++ tools/testing/radix-tree/test.h | 1 + 2 files changed, 4 insertions(+) diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c index 64ffe67605d4..52ce1eab0fd3 100644 --- a/tools/testing/radix-tree/main.c +++ b/tools/testing/radix-tree/main.c @@ -344,6 +344,9 @@ int main(int argc, char **argv) iteration_test(); single_thread_tests(long_run); + /* Free any remaining preallocated nodes */ + radix_tree_cpu_dead(0); + sleep(1); printf("after sleep(1): %d allocated, preempt %d\n", nr_allocated, preempt_count); diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index 217fb2403f09..5d2fad05b263 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h @@ -44,3 +44,4 @@ void radix_tree_dump(struct radix_tree_root *root); int root_tag_get(struct radix_tree_root *root, unsigned int tag); unsigned long node_maxindex(struct radix_tree_node *); unsigned long shift_maxindex(unsigned int shift); +int radix_tree_cpu_dead(unsigned int cpu); -- cgit v1.2.3 From 061ef3936b16edc8f779d403d569392505665ed5 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:08 -0800 Subject: radix tree test suite: make runs more reproducible Instead of reseeding the random number generator every time around the loop in big_gang_check(), seed it at the beginning of execution. Use rand_r() and an independent base seed for each thread in iteration_test() so they don't stomp all over each others state. Since this particular test depends on the kernel scheduler, the iteration test can't be reproduced based purely on the random seed, but at least it won't pollute the other tests. Print the seed, and allow the seed to be specified so that a run which hits a problem can be reproduced. Link: http://lkml.kernel.org/r/1480369871-5271-41-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/radix-tree/iteration_check.c | 11 +++++++---- tools/testing/radix-tree/main.c | 9 +++++++-- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/tools/testing/radix-tree/iteration_check.c b/tools/testing/radix-tree/iteration_check.c index 9adb8e7415a6..11d570c3fc83 100644 --- a/tools/testing/radix-tree/iteration_check.c +++ b/tools/testing/radix-tree/iteration_check.c @@ -20,6 +20,7 @@ #define TAG 0 static pthread_mutex_t tree_lock = PTHREAD_MUTEX_INITIALIZER; static pthread_t threads[NUM_THREADS]; +static unsigned int seeds[3]; RADIX_TREE(tree, GFP_KERNEL); bool test_complete; @@ -71,7 +72,7 @@ static void *tagged_iteration_fn(void *arg) continue; } - if (rand() % 50 == 0) + if (rand_r(&seeds[0]) % 50 == 0) slot = radix_tree_iter_next(&iter); } rcu_read_unlock(); @@ -111,7 +112,7 @@ static void *untagged_iteration_fn(void *arg) continue; } - if (rand() % 50 == 0) + if (rand_r(&seeds[1]) % 50 == 0) slot = radix_tree_iter_next(&iter); } rcu_read_unlock(); @@ -129,7 +130,7 @@ static void *remove_entries_fn(void *arg) while (!test_complete) { int pgoff; - pgoff = rand() % 100; + pgoff = rand_r(&seeds[2]) % 100; pthread_mutex_lock(&tree_lock); item_delete(&tree, pgoff); @@ -146,9 +147,11 @@ void iteration_test(void) printf("Running iteration tests for 10 seconds\n"); - srand(time(0)); test_complete = false; + for (i = 0; i < 3; i++) + seeds[i] = rand(); + if (pthread_create(&threads[0], NULL, tagged_iteration_fn, NULL)) { perror("pthread_create"); exit(1); diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c index 52ce1eab0fd3..2eb694994497 100644 --- a/tools/testing/radix-tree/main.c +++ b/tools/testing/radix-tree/main.c @@ -67,7 +67,6 @@ void big_gang_check(bool long_run) for (i = 0; i < (long_run ? 1000 : 3); i++) { __big_gang_check(); - srand(time(0)); printf("%d ", i); fflush(stdout); } @@ -329,12 +328,18 @@ int main(int argc, char **argv) { bool long_run = false; int opt; + unsigned int seed = time(NULL); - while ((opt = getopt(argc, argv, "l")) != -1) { + while ((opt = getopt(argc, argv, "ls:")) != -1) { if (opt == 'l') long_run = true; + else if (opt == 's') + seed = strtoul(optarg, NULL, 0); } + printf("random seed %u\n", seed); + srand(seed); + rcu_register_thread(); radix_tree_init(); -- cgit v1.2.3 From ba20cd60c97945f0de9fe313f869b3a5855e1503 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:11 -0800 Subject: radix tree test suite: iteration test misuses RCU Each thread needs to register itself with RCU, otherwise the reading thread's read lock has no effect and the freeing thread will free the memory in the tree without waiting for the read lock to be dropped. Link: http://lkml.kernel.org/r/1480369871-5271-42-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/radix-tree/iteration_check.c | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/tools/testing/radix-tree/iteration_check.c b/tools/testing/radix-tree/iteration_check.c index 11d570c3fc83..df71cb841385 100644 --- a/tools/testing/radix-tree/iteration_check.c +++ b/tools/testing/radix-tree/iteration_check.c @@ -29,6 +29,8 @@ static void *add_entries_fn(void *arg) { int pgoff; + rcu_register_thread(); + while (!test_complete) { for (pgoff = 0; pgoff < 100; pgoff++) { pthread_mutex_lock(&tree_lock); @@ -38,6 +40,8 @@ static void *add_entries_fn(void *arg) } } + rcu_unregister_thread(); + return NULL; } @@ -53,6 +57,8 @@ static void *tagged_iteration_fn(void *arg) struct radix_tree_iter iter; void **slot; + rcu_register_thread(); + while (!test_complete) { rcu_read_lock(); radix_tree_for_each_tagged(slot, &tree, &iter, 0, TAG) { @@ -72,12 +78,18 @@ static void *tagged_iteration_fn(void *arg) continue; } - if (rand_r(&seeds[0]) % 50 == 0) + if (rand_r(&seeds[0]) % 50 == 0) { slot = radix_tree_iter_next(&iter); + rcu_read_unlock(); + rcu_barrier(); + rcu_read_lock(); + } } rcu_read_unlock(); } + rcu_unregister_thread(); + return NULL; } @@ -93,6 +105,8 @@ static void *untagged_iteration_fn(void *arg) struct radix_tree_iter iter; void **slot; + rcu_register_thread(); + while (!test_complete) { rcu_read_lock(); radix_tree_for_each_slot(slot, &tree, &iter, 0) { @@ -112,12 +126,18 @@ static void *untagged_iteration_fn(void *arg) continue; } - if (rand_r(&seeds[1]) % 50 == 0) + if (rand_r(&seeds[1]) % 50 == 0) { slot = radix_tree_iter_next(&iter); + rcu_read_unlock(); + rcu_barrier(); + rcu_read_lock(); + } } rcu_read_unlock(); } + rcu_unregister_thread(); + return NULL; } @@ -127,6 +147,8 @@ static void *untagged_iteration_fn(void *arg) */ static void *remove_entries_fn(void *arg) { + rcu_register_thread(); + while (!test_complete) { int pgoff; @@ -137,6 +159,8 @@ static void *remove_entries_fn(void *arg) pthread_mutex_unlock(&tree_lock); } + rcu_unregister_thread(); + return NULL; } -- cgit v1.2.3 From cfa40bcfd6fed7010b1633bf127ed8571d3b607e Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Wed, 14 Dec 2016 15:08:14 -0800 Subject: radix tree test suite: benchmark for iterator This adds simple benchmark for iterator similar to one I've used for commit 78c1d78488a3 ("radix-tree: introduce bit-optimized iterator") Building with make BENCHMARK=1 set radix tree order to 6, this allows to get performance comparable to in kernel performance. Link: http://lkml.kernel.org/r/1480369871-5271-43-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Konstantin Khlebnikov Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/radix-tree/Makefile | 6 +- tools/testing/radix-tree/benchmark.c | 98 +++++++++++++++++++++++++++++++++ tools/testing/radix-tree/linux/kernel.h | 4 ++ tools/testing/radix-tree/main.c | 2 + tools/testing/radix-tree/test.h | 1 + 5 files changed, 110 insertions(+), 1 deletion(-) create mode 100644 tools/testing/radix-tree/benchmark.c diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile index 3c338dc3b162..08283a841066 100644 --- a/tools/testing/radix-tree/Makefile +++ b/tools/testing/radix-tree/Makefile @@ -4,7 +4,11 @@ LDFLAGS += -lpthread -lurcu TARGETS = main OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \ regression1.o regression2.o regression3.o multiorder.o \ - iteration_check.o + iteration_check.o benchmark.o + +ifdef BENCHMARK + CFLAGS += -DBENCHMARK=1 +endif targets: $(TARGETS) diff --git a/tools/testing/radix-tree/benchmark.c b/tools/testing/radix-tree/benchmark.c new file mode 100644 index 000000000000..215ca86c7605 --- /dev/null +++ b/tools/testing/radix-tree/benchmark.c @@ -0,0 +1,98 @@ +/* + * benchmark.c: + * Author: Konstantin Khlebnikov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#include +#include +#include +#include +#include "test.h" + +#define NSEC_PER_SEC 1000000000L + +static long long benchmark_iter(struct radix_tree_root *root, bool tagged) +{ + volatile unsigned long sink = 0; + struct radix_tree_iter iter; + struct timespec start, finish; + long long nsec; + int l, loops = 1; + void **slot; + +#ifdef BENCHMARK +again: +#endif + clock_gettime(CLOCK_MONOTONIC, &start); + for (l = 0; l < loops; l++) { + if (tagged) { + radix_tree_for_each_tagged(slot, root, &iter, 0, 0) + sink ^= (unsigned long)slot; + } else { + radix_tree_for_each_slot(slot, root, &iter, 0) + sink ^= (unsigned long)slot; + } + } + clock_gettime(CLOCK_MONOTONIC, &finish); + + nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + + (finish.tv_nsec - start.tv_nsec); + +#ifdef BENCHMARK + if (loops == 1 && nsec * 5 < NSEC_PER_SEC) { + loops = NSEC_PER_SEC / nsec / 4 + 1; + goto again; + } +#endif + + nsec /= loops; + return nsec; +} + +static void benchmark_size(unsigned long size, unsigned long step, int order) +{ + RADIX_TREE(tree, GFP_KERNEL); + long long normal, tagged; + unsigned long index; + + for (index = 0 ; index < size ; index += step) { + item_insert_order(&tree, index, order); + radix_tree_tag_set(&tree, index, 0); + } + + tagged = benchmark_iter(&tree, true); + normal = benchmark_iter(&tree, false); + + printf("Size %ld, step %6ld, order %d tagged %10lld ns, normal %10lld ns\n", + size, step, order, tagged, normal); + + item_kill_tree(&tree); + rcu_barrier(); +} + +void benchmark(void) +{ + unsigned long size[] = {1 << 10, 1 << 20, 0}; + unsigned long step[] = {1, 2, 7, 15, 63, 64, 65, + 128, 256, 512, 12345, 0}; + int c, s; + + printf("starting benchmarks\n"); + printf("RADIX_TREE_MAP_SHIFT = %d\n", RADIX_TREE_MAP_SHIFT); + + for (c = 0; size[c]; c++) + for (s = 0; step[s]; s++) + benchmark_size(size[c], step[s], 0); + + for (c = 0; size[c]; c++) + for (s = 0; step[s]; s++) + benchmark_size(size[c], step[s] << 9, 9); +} diff --git a/tools/testing/radix-tree/linux/kernel.h b/tools/testing/radix-tree/linux/kernel.h index be98a47b4e1b..dbe4b92806b8 100644 --- a/tools/testing/radix-tree/linux/kernel.h +++ b/tools/testing/radix-tree/linux/kernel.h @@ -10,7 +10,11 @@ #include "../../include/linux/compiler.h" #include "../../../include/linux/kconfig.h" +#ifdef BENCHMARK +#define RADIX_TREE_MAP_SHIFT 6 +#else #define RADIX_TREE_MAP_SHIFT 3 +#endif #ifndef NULL #define NULL 0 diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c index 2eb694994497..f1d1e3bd9464 100644 --- a/tools/testing/radix-tree/main.c +++ b/tools/testing/radix-tree/main.c @@ -352,6 +352,8 @@ int main(int argc, char **argv) /* Free any remaining preallocated nodes */ radix_tree_cpu_dead(0); + benchmark(); + sleep(1); printf("after sleep(1): %d allocated, preempt %d\n", nr_allocated, preempt_count); diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index 5d2fad05b263..215ab77a56e3 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h @@ -28,6 +28,7 @@ void item_kill_tree(struct radix_tree_root *root); void tag_check(void); void multiorder_checks(void); void iteration_test(void); +void benchmark(void); struct item * item_tag_set(struct radix_tree_root *root, unsigned long index, int tag); -- cgit v1.2.3 From af1c5cca9030f1bb935463ceb8274bfe82719128 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:17 -0800 Subject: radix tree test suite: use rcu_barrier Calling rcu_barrier() allows all of the rcu-freed memory to be actually returned to the pool, and allows nr_allocated to return to 0. As well as allowing diffs between runs to be more useful, it also lets us pinpoint leaks more effectively. Link: http://lkml.kernel.org/r/1480369871-5271-44-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/radix-tree/main.c | 12 ++++++++++-- tools/testing/radix-tree/tag_check.c | 5 +++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c index f1d1e3bd9464..76d9c95aa487 100644 --- a/tools/testing/radix-tree/main.c +++ b/tools/testing/radix-tree/main.c @@ -295,24 +295,31 @@ static void single_thread_tests(bool long_run) printf("starting single_thread_tests: %d allocated, preempt %d\n", nr_allocated, preempt_count); multiorder_checks(); + rcu_barrier(); printf("after multiorder_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); locate_check(); + rcu_barrier(); printf("after locate_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); tag_check(); + rcu_barrier(); printf("after tag_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); gang_check(); + rcu_barrier(); printf("after gang_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); add_and_check(); + rcu_barrier(); printf("after add_and_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); dynamic_height_check(); + rcu_barrier(); printf("after dynamic_height_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); big_gang_check(long_run); + rcu_barrier(); printf("after big_gang_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); for (i = 0; i < (long_run ? 2000 : 3); i++) { @@ -320,6 +327,7 @@ static void single_thread_tests(bool long_run) printf("%d ", i); fflush(stdout); } + rcu_barrier(); printf("after copy_tag_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); } @@ -354,8 +362,8 @@ int main(int argc, char **argv) benchmark(); - sleep(1); - printf("after sleep(1): %d allocated, preempt %d\n", + rcu_barrier(); + printf("after rcu_barrier: %d allocated, preempt %d\n", nr_allocated, preempt_count); rcu_unregister_thread(); diff --git a/tools/testing/radix-tree/tag_check.c b/tools/testing/radix-tree/tag_check.c index b0ac05741750..186f6e4914e4 100644 --- a/tools/testing/radix-tree/tag_check.c +++ b/tools/testing/radix-tree/tag_check.c @@ -51,6 +51,7 @@ void simple_checks(void) verify_tag_consistency(&tree, 1); printf("before item_kill_tree: %d allocated\n", nr_allocated); item_kill_tree(&tree); + rcu_barrier(); printf("after item_kill_tree: %d allocated\n", nr_allocated); } @@ -331,12 +332,16 @@ void tag_check(void) single_check(); extend_checks(); contract_checks(); + rcu_barrier(); printf("after extend_checks: %d allocated\n", nr_allocated); __leak_check(); leak_check(); + rcu_barrier(); printf("after leak_check: %d allocated\n", nr_allocated); simple_checks(); + rcu_barrier(); printf("after simple_checks: %d allocated\n", nr_allocated); thrash_tags(); + rcu_barrier(); printf("after thrash_tags: %d allocated\n", nr_allocated); } -- cgit v1.2.3 From 3ad75f8a1d9b047989059c67afc38d57161270e9 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:20 -0800 Subject: radix tree test suite: handle exceptional entries item_kill_tree() assumes that everything in the tree is a pointer to a struct item, which is annoying when testing the behaviour of exceptional entries. Fix it to delete exceptional entries on the assumption they don't need to be freed. Link: http://lkml.kernel.org/r/1480369871-5271-45-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/radix-tree/test.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/testing/radix-tree/test.c b/tools/testing/radix-tree/test.c index a6e8099eaf4f..6f8dafc797ce 100644 --- a/tools/testing/radix-tree/test.c +++ b/tools/testing/radix-tree/test.c @@ -200,9 +200,16 @@ void verify_tag_consistency(struct radix_tree_root *root, unsigned int tag) void item_kill_tree(struct radix_tree_root *root) { + struct radix_tree_iter iter; + void **slot; struct item *items[32]; int nfound; + radix_tree_for_each_slot(slot, root, &iter, 0) { + if (radix_tree_exceptional_entry(*slot)) + radix_tree_delete(root, iter.index); + } + while ((nfound = radix_tree_gang_lookup(root, (void **)items, 0, 32))) { int i; -- cgit v1.2.3 From 101d9607fffefdfc9e3922f0ac9061a61edda1b0 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:23 -0800 Subject: radix tree test suite: record order in each item This probably doubles the size of each item allocated by the test suite but it lets us check a few more things, and may be needed for upcoming API changes that require the caller pass in the order of the entry. Link: http://lkml.kernel.org/r/1480369871-5271-46-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/radix-tree/multiorder.c | 2 +- tools/testing/radix-tree/test.c | 29 +++++++++++++++++++---------- tools/testing/radix-tree/test.h | 6 +++--- 3 files changed, 23 insertions(+), 14 deletions(-) diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index d1be94667a30..8d5865c95664 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c @@ -125,7 +125,7 @@ static void multiorder_check(unsigned long index, int order) unsigned long min = index & ~((1UL << order) - 1); unsigned long max = min + (1UL << order); void **slot; - struct item *item2 = item_create(min); + struct item *item2 = item_create(min, order); RADIX_TREE(tree, GFP_KERNEL); printf("Multiorder index %ld, order %d\n", index, order); diff --git a/tools/testing/radix-tree/test.c b/tools/testing/radix-tree/test.c index 6f8dafc797ce..0de548939a2e 100644 --- a/tools/testing/radix-tree/test.c +++ b/tools/testing/radix-tree/test.c @@ -24,21 +24,29 @@ int item_tag_get(struct radix_tree_root *root, unsigned long index, int tag) return radix_tree_tag_get(root, index, tag); } -int __item_insert(struct radix_tree_root *root, struct item *item, - unsigned order) +int __item_insert(struct radix_tree_root *root, struct item *item) { - return __radix_tree_insert(root, item->index, order, item); + return __radix_tree_insert(root, item->index, item->order, item); } int item_insert(struct radix_tree_root *root, unsigned long index) { - return __item_insert(root, item_create(index), 0); + return __item_insert(root, item_create(index, 0)); } int item_insert_order(struct radix_tree_root *root, unsigned long index, unsigned order) { - return __item_insert(root, item_create(index), order); + return __item_insert(root, item_create(index, order)); +} + +void item_sanity(struct item *item, unsigned long index) +{ + unsigned long mask; + assert(!radix_tree_is_internal_node(item)); + assert(item->order < BITS_PER_LONG); + mask = (1UL << item->order) - 1; + assert((item->index | mask) == (index | mask)); } int item_delete(struct radix_tree_root *root, unsigned long index) @@ -46,18 +54,19 @@ int item_delete(struct radix_tree_root *root, unsigned long index) struct item *item = radix_tree_delete(root, index); if (item) { - assert(item->index == index); + item_sanity(item, index); free(item); return 1; } return 0; } -struct item *item_create(unsigned long index) +struct item *item_create(unsigned long index, unsigned int order) { struct item *ret = malloc(sizeof(*ret)); ret->index = index; + ret->order = order; return ret; } @@ -66,8 +75,8 @@ void item_check_present(struct radix_tree_root *root, unsigned long index) struct item *item; item = radix_tree_lookup(root, index); - assert(item != 0); - assert(item->index == index); + assert(item != NULL); + item_sanity(item, index); } struct item *item_lookup(struct radix_tree_root *root, unsigned long index) @@ -80,7 +89,7 @@ void item_check_absent(struct radix_tree_root *root, unsigned long index) struct item *item; item = radix_tree_lookup(root, index); - assert(item == 0); + assert(item == NULL); } /* diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index 215ab77a56e3..423c528aaee9 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h @@ -5,11 +5,11 @@ struct item { unsigned long index; + unsigned int order; }; -struct item *item_create(unsigned long index); -int __item_insert(struct radix_tree_root *root, struct item *item, - unsigned order); +struct item *item_create(unsigned long index, unsigned int order); +int __item_insert(struct radix_tree_root *root, struct item *item); int item_insert(struct radix_tree_root *root, unsigned long index); int item_insert_order(struct radix_tree_root *root, unsigned long index, unsigned order); -- cgit v1.2.3 From b328daf3b7130098b105c18bdae694ddaad5b6e3 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:26 -0800 Subject: tools: add more bitmap functions I need the following functions for the radix tree: bitmap_fill bitmap_empty bitmap_full Copy the implementations from include/linux/bitmap.h Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/include/linux/bitmap.h | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h index 43c1c5021e4b..eef41d500e9e 100644 --- a/tools/include/linux/bitmap.h +++ b/tools/include/linux/bitmap.h @@ -35,6 +35,32 @@ static inline void bitmap_zero(unsigned long *dst, int nbits) } } +static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) +{ + unsigned int nlongs = BITS_TO_LONGS(nbits); + if (!small_const_nbits(nbits)) { + unsigned int len = (nlongs - 1) * sizeof(unsigned long); + memset(dst, 0xff, len); + } + dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits); +} + +static inline int bitmap_empty(const unsigned long *src, unsigned nbits) +{ + if (small_const_nbits(nbits)) + return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); + + return find_first_bit(src, nbits) == nbits; +} + +static inline int bitmap_full(const unsigned long *src, unsigned int nbits) +{ + if (small_const_nbits(nbits)) + return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); + + return find_first_zero_bit(src, nbits) == nbits; +} + static inline int bitmap_weight(const unsigned long *src, int nbits) { if (small_const_nbits(nbits)) -- cgit v1.2.3 From 0629573e6bbd60f20ed2d7a91da1214a6274e751 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:29 -0800 Subject: radix tree test suite: use common find-bit code Remove the old find_next_bit code in favour of linking in the find_bit code from tools/lib. Link: http://lkml.kernel.org/r/1480369871-5271-48-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/radix-tree/Makefile | 7 ++- tools/testing/radix-tree/find_next_bit.c | 57 ---------------------- tools/testing/radix-tree/linux/bitops.h | 40 +++++++++------ tools/testing/radix-tree/linux/bitops/non-atomic.h | 13 +++-- tools/testing/radix-tree/linux/kernel.h | 11 +++++ 5 files changed, 48 insertions(+), 80 deletions(-) delete mode 100644 tools/testing/radix-tree/find_next_bit.c diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile index 08283a841066..3635e4d3eca7 100644 --- a/tools/testing/radix-tree/Makefile +++ b/tools/testing/radix-tree/Makefile @@ -18,7 +18,12 @@ main: $(OFILES) clean: $(RM) -f $(TARGETS) *.o radix-tree.c -$(OFILES): *.h */*.h ../../../include/linux/radix-tree.h ../../include/linux/*.h +find_next_bit.o: ../../lib/find_bit.c + $(CC) $(CFLAGS) -c -o $@ $< + +$(OFILES): *.h */*.h \ + ../../include/linux/*.h \ + ../../../include/linux/radix-tree.h radix-tree.c: ../../../lib/radix-tree.c sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@ diff --git a/tools/testing/radix-tree/find_next_bit.c b/tools/testing/radix-tree/find_next_bit.c deleted file mode 100644 index d1c2178bb2d4..000000000000 --- a/tools/testing/radix-tree/find_next_bit.c +++ /dev/null @@ -1,57 +0,0 @@ -/* find_next_bit.c: fallback find next bit implementation - * - * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include - -#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) - -/* - * Find the next set bit in a memory region. - */ -unsigned long find_next_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) -{ - const unsigned long *p = addr + BITOP_WORD(offset); - unsigned long result = offset & ~(BITS_PER_LONG-1); - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset %= BITS_PER_LONG; - if (offset) { - tmp = *(p++); - tmp &= (~0UL << offset); - if (size < BITS_PER_LONG) - goto found_first; - if (tmp) - goto found_middle; - size -= BITS_PER_LONG; - result += BITS_PER_LONG; - } - while (size & ~(BITS_PER_LONG-1)) { - if ((tmp = *(p++))) - goto found_middle; - result += BITS_PER_LONG; - size -= BITS_PER_LONG; - } - if (!size) - return result; - tmp = *p; - -found_first: - tmp &= (~0UL >> (BITS_PER_LONG - size)); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ -found_middle: - return result + __ffs(tmp); -} diff --git a/tools/testing/radix-tree/linux/bitops.h b/tools/testing/radix-tree/linux/bitops.h index 71d58427ab60..a13e9bc76eec 100644 --- a/tools/testing/radix-tree/linux/bitops.h +++ b/tools/testing/radix-tree/linux/bitops.h @@ -2,9 +2,14 @@ #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #include +#include +#include +#include -#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) -#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) +#define BITS_PER_BYTE 8 +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) /** * __set_bit - Set a bit in memory @@ -17,16 +22,16 @@ */ static inline void __set_bit(int nr, volatile unsigned long *addr) { - unsigned long mask = BITOP_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); *p |= mask; } static inline void __clear_bit(int nr, volatile unsigned long *addr) { - unsigned long mask = BITOP_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); *p &= ~mask; } @@ -42,8 +47,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) */ static inline void __change_bit(int nr, volatile unsigned long *addr) { - unsigned long mask = BITOP_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); *p ^= mask; } @@ -59,8 +64,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) */ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) { - unsigned long mask = BITOP_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long old = *p; *p = old | mask; @@ -78,8 +83,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) */ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) { - unsigned long mask = BITOP_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long old = *p; *p = old & ~mask; @@ -90,8 +95,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) { - unsigned long mask = BITOP_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); unsigned long old = *p; *p = old ^ mask; @@ -105,7 +110,7 @@ static inline int __test_and_change_bit(int nr, */ static inline int test_bit(int nr, const volatile unsigned long *addr) { - return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } /** @@ -147,4 +152,9 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset); +static inline unsigned long hweight_long(unsigned long w) +{ + return sizeof(w) == 4 ? hweight32(w) : hweight64(w); +} + #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/non-atomic.h b/tools/testing/radix-tree/linux/bitops/non-atomic.h index 46a825cf2ae1..6a1bcb9d2c4a 100644 --- a/tools/testing/radix-tree/linux/bitops/non-atomic.h +++ b/tools/testing/radix-tree/linux/bitops/non-atomic.h @@ -3,7 +3,6 @@ #include -#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) /** @@ -17,7 +16,7 @@ */ static inline void __set_bit(int nr, volatile unsigned long *addr) { - unsigned long mask = BITOP_MASK(nr); + unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); *p |= mask; @@ -25,7 +24,7 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) static inline void __clear_bit(int nr, volatile unsigned long *addr) { - unsigned long mask = BITOP_MASK(nr); + unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); *p &= ~mask; @@ -42,7 +41,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) */ static inline void __change_bit(int nr, volatile unsigned long *addr) { - unsigned long mask = BITOP_MASK(nr); + unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); *p ^= mask; @@ -59,7 +58,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) */ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) { - unsigned long mask = BITOP_MASK(nr); + unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); unsigned long old = *p; @@ -78,7 +77,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) */ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) { - unsigned long mask = BITOP_MASK(nr); + unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); unsigned long old = *p; @@ -90,7 +89,7 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) { - unsigned long mask = BITOP_MASK(nr); + unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); unsigned long old = *p; diff --git a/tools/testing/radix-tree/linux/kernel.h b/tools/testing/radix-tree/linux/kernel.h index dbe4b92806b8..23e77f5673ce 100644 --- a/tools/testing/radix-tree/linux/kernel.h +++ b/tools/testing/radix-tree/linux/kernel.h @@ -47,4 +47,15 @@ static inline int in_interrupt(void) { return 0; } + +/* + * This looks more complex than it should be. But we need to + * get the type for the ~ right in round_down (it needs to be + * as wide as the result!), and we want to evaluate the macro + * arguments just once each. + */ +#define __round_mask(x, y) ((__typeof__(x))((y)-1)) +#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) +#define round_down(x, y) ((x) & ~__round_mask(x, y)) + #endif /* _KERNEL_H */ -- cgit v1.2.3 From 91b9677c4c1242a3d0afd76d0e91f43808243b92 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:31 -0800 Subject: radix-tree: fix typo Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/radix-tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 2e8c6f7aa56e..60c10361cbfa 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1110,7 +1110,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. * * This condition also used by radix_tree_next_slot() to stop - * contiguous iterating, and forbid swithing to the next chunk. + * contiguous iterating, and forbid switching to the next chunk. */ index = iter->next_index; if (!index && iter->index) -- cgit v1.2.3 From 91d9c05ac6c788531136888d31ef18c6a0ec160f Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:34 -0800 Subject: radix-tree: move rcu_head into a union with private_list I want to be able to reference node->parent after freeing node. Currently node->parent is in a union with rcu_head, so it is overwritten when the node is put on the RCU list. We know that private_list is not referenced after the node is freed, so it is safe for these two members to share space. Link: http://lkml.kernel.org/r/1480369871-5271-50-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/radix-tree.h | 14 ++++---------- lib/radix-tree.c | 1 + 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 744486057e9e..d04073ac3a9f 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -85,18 +85,12 @@ struct radix_tree_node { unsigned char offset; /* Slot offset in parent */ unsigned char count; /* Total entry count */ unsigned char exceptional; /* Exceptional entry count */ + struct radix_tree_node *parent; /* Used when ascending tree */ + void *private_data; /* For tree user */ union { - struct { - /* Used when ascending tree */ - struct radix_tree_node *parent; - /* For tree user */ - void *private_data; - }; - /* Used when freeing node */ - struct rcu_head rcu_head; + struct list_head private_list; /* For tree user */ + struct rcu_head rcu_head; /* Used when freeing node */ }; - /* For tree user */ - struct list_head private_list; void __rcu *slots[RADIX_TREE_MAP_SIZE]; unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; }; diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 60c10361cbfa..8c5911eae5e2 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -325,6 +325,7 @@ static void radix_tree_node_rcu_free(struct rcu_head *head) tag_clear(node, i, 0); node->slots[0] = NULL; + INIT_LIST_HEAD(&node->private_list); kmem_cache_free(radix_tree_node_cachep, node); } -- cgit v1.2.3 From 9498d2bb34b0866829c313569df35e77a83f12eb Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:37 -0800 Subject: radix-tree: create node_tag_set() Similar to node_tag_clear(), factor node_tag_set() out of radix_tree_range_tag_if_tagged(). Link: http://lkml.kernel.org/r/1480369871-5271-51-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/radix-tree.c | 41 +++++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 8c5911eae5e2..a90a4371deb8 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -991,6 +991,22 @@ static void node_tag_clear(struct radix_tree_root *root, root_tag_clear(root, tag); } +static void node_tag_set(struct radix_tree_root *root, + struct radix_tree_node *node, + unsigned int tag, unsigned int offset) +{ + while (node) { + if (tag_get(node, tag, offset)) + return; + tag_set(node, tag, offset); + offset = node->offset; + node = node->parent; + } + + if (!root_tag_get(root, tag)) + root_tag_set(root, tag); +} + /** * radix_tree_tag_clear - clear a tag on a radix tree node * @root: radix tree root @@ -1229,7 +1245,7 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, unsigned long nr_to_tag, unsigned int iftag, unsigned int settag) { - struct radix_tree_node *parent, *node, *child; + struct radix_tree_node *node, *child; unsigned long maxindex; unsigned long tagged = 0; unsigned long index = *first_indexp; @@ -1264,22 +1280,8 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, continue; } - /* tag the leaf */ tagged++; - tag_set(node, settag, offset); - - /* walk back up the path tagging interior nodes */ - parent = node; - for (;;) { - offset = parent->offset; - parent = parent->parent; - if (!parent) - break; - /* stop if we find a node with the tag already set */ - if (tag_get(parent, settag, offset)) - break; - tag_set(parent, settag, offset); - } + node_tag_set(root, node, settag, offset); next: /* Go to next entry in node */ index = ((index >> node->shift) + 1) << node->shift; @@ -1301,12 +1303,7 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, if (tagged >= nr_to_tag) break; } - /* - * We need not to tag the root tag if there is no tag which is set with - * settag within the range from *first_indexp to last_index. - */ - if (tagged > 0) - root_tag_set(root, settag); + *first_indexp = index; return tagged; -- cgit v1.2.3 From bc412fca6edc25bbbe28b6449512e15ebb1573ae Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:40 -0800 Subject: radix-tree: make radix_tree_find_next_bit more useful Since this function is specialised to the radix tree, pass in the node and tag to calculate the address of the bitmap in radix_tree_find_next_bit() instead of the caller. Likewise, there is no need to pass in the size of the bitmap. Link: http://lkml.kernel.org/r/1480369871-5271-52-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/radix-tree.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index a90a4371deb8..368052ac468a 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -191,13 +191,12 @@ static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) * Returns next bit offset, or size if nothing found. */ static __always_inline unsigned long -radix_tree_find_next_bit(const unsigned long *addr, - unsigned long size, unsigned long offset) +radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag, + unsigned long offset) { - if (!__builtin_constant_p(size)) - return find_next_bit(addr, size, offset); + const unsigned long *addr = node->tags[tag]; - if (offset < size) { + if (offset < RADIX_TREE_MAP_SIZE) { unsigned long tmp; addr += offset / BITS_PER_LONG; @@ -205,14 +204,14 @@ radix_tree_find_next_bit(const unsigned long *addr, if (tmp) return __ffs(tmp) + offset; offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1); - while (offset < size) { + while (offset < RADIX_TREE_MAP_SIZE) { tmp = *++addr; if (tmp) return __ffs(tmp) + offset; offset += BITS_PER_LONG; } } - return size; + return RADIX_TREE_MAP_SIZE; } #ifndef __KERNEL__ @@ -1160,9 +1159,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, return NULL; if (flags & RADIX_TREE_ITER_TAGGED) - offset = radix_tree_find_next_bit( - node->tags[tag], - RADIX_TREE_MAP_SIZE, + offset = radix_tree_find_next_bit(node, tag, offset + 1); else while (++offset < RADIX_TREE_MAP_SIZE) { -- cgit v1.2.3 From 218ed7503aee07c7758dcbdd782e8c1a25c9f1e9 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:43 -0800 Subject: radix-tree: improve dump output Print the indices of the entries as unsigned (instead of signed) integers and print the parent node of each entry to help navigate around larger trees where the layout is not quite so obvious. Print the indices covered by a node. Rearrange the order of fields printed so the indices and parents line up for each type of entry. Link: http://lkml.kernel.org/r/1480369871-5271-53-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/radix-tree.c | 49 ++++++++++++++++++++++++++----------------------- 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 368052ac468a..dbf4e8c8e100 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -214,15 +214,29 @@ radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag, return RADIX_TREE_MAP_SIZE; } +/* + * The maximum index which can be stored in a radix tree + */ +static inline unsigned long shift_maxindex(unsigned int shift) +{ + return (RADIX_TREE_MAP_SIZE << shift) - 1; +} + +static inline unsigned long node_maxindex(struct radix_tree_node *node) +{ + return shift_maxindex(node->shift); +} + #ifndef __KERNEL__ static void dump_node(struct radix_tree_node *node, unsigned long index) { unsigned long i; - pr_debug("radix node: %p offset %d tags %lx %lx %lx shift %d count %d exceptional %d parent %p\n", - node, node->offset, + pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n", + node, node->offset, index, index | node_maxindex(node), + node->parent, node->tags[0][0], node->tags[1][0], node->tags[2][0], - node->shift, node->count, node->exceptional, node->parent); + node->shift, node->count, node->exceptional); for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { unsigned long first = index | (i << node->shift); @@ -230,14 +244,16 @@ static void dump_node(struct radix_tree_node *node, unsigned long index) void *entry = node->slots[i]; if (!entry) continue; - if (is_sibling_entry(node, entry)) { - pr_debug("radix sblng %p offset %ld val %p indices %ld-%ld\n", - entry, i, - *(void **)entry_to_node(entry), - first, last); + if (entry == RADIX_TREE_RETRY) { + pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n", + i, first, last, node); } else if (!radix_tree_is_internal_node(entry)) { - pr_debug("radix entry %p offset %ld indices %ld-%ld\n", - entry, i, first, last); + pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n", + entry, i, first, last, node); + } else if (is_sibling_entry(node, entry)) { + pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n", + entry, i, first, last, node, + *(void **)entry_to_node(entry)); } else { dump_node(entry_to_node(entry), first); } @@ -455,19 +471,6 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) return __radix_tree_preload(gfp_mask, nr_nodes); } -/* - * The maximum index which can be stored in a radix tree - */ -static inline unsigned long shift_maxindex(unsigned int shift) -{ - return (RADIX_TREE_MAP_SIZE << shift) - 1; -} - -static inline unsigned long node_maxindex(struct radix_tree_node *node) -{ - return shift_maxindex(node->shift); -} - static unsigned radix_tree_load_root(struct radix_tree_root *root, struct radix_tree_node **nodep, unsigned long *maxindex) { -- cgit v1.2.3 From b35df27a39f40e39fabf1b1e9569c7b24e1add6a Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:46 -0800 Subject: btrfs: fix race in btrfs_free_dummy_fs_info() We drop the lock which protects the radix tree, so we must call radix_tree_iter_next() in order to avoid a modification to the tree invalidating the iterator state. Link: http://lkml.kernel.org/r/1480369871-5271-54-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/btrfs/tests/btrfs-tests.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index bf62ad919a95..73076a0ea6a9 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c @@ -162,6 +162,7 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info) slot = radix_tree_iter_retry(&iter); continue; } + slot = radix_tree_iter_next(&iter); spin_unlock(&fs_info->buffer_lock); free_extent_buffer_stale(eb); spin_lock(&fs_info->buffer_lock); -- cgit v1.2.3 From 148deab223b23734069abcacb5c7118b0e7deadc Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:49 -0800 Subject: radix-tree: improve multiorder iterators This fixes several interlinked problems with the iterators in the presence of multiorder entries. 1. radix_tree_iter_next() would only advance by one slot, which would result in the iterators returning the same entry more than once if there were sibling entries. 2. radix_tree_next_slot() could return an internal pointer instead of a user pointer if a tagged multiorder entry was immediately followed by an entry of lower order. 3. radix_tree_next_slot() expanded to a lot more code than it used to when multiorder support was compiled in. And I wasn't comfortable with entry_to_node() being in a header file. Fixing radix_tree_iter_next() for the presence of sibling entries necessarily involves examining the contents of the radix tree, so we now need to pass 'slot' to radix_tree_iter_next(), and we need to change the calling convention so it is called *before* dropping the lock which protects the tree. Also rename it to radix_tree_iter_resume(), as some people thought it was necessary to call radix_tree_iter_next() each time around the loop. radix_tree_next_slot() becomes closer to how it looked before multiorder support was introduced. It only checks to see if the next entry in the chunk is a sibling entry or a pointer to a node; this should be rare enough that handling this case out of line is not a performance impact (and such impact is amortised by the fact that the entry we just processed was a multiorder entry). Also, radix_tree_next_slot() used to force a new chunk lookup for untagged entries, which is more expensive than the out of line sibling entry skipping. Link: http://lkml.kernel.org/r/1480369871-5271-55-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/btrfs/tests/btrfs-tests.c | 2 +- include/linux/radix-tree.h | 71 +++++++-------- lib/radix-tree.c | 138 +++++++++++++++++++++++++---- mm/khugepaged.c | 7 +- mm/shmem.c | 6 +- tools/testing/radix-tree/iteration_check.c | 12 +-- tools/testing/radix-tree/multiorder.c | 28 ++++-- tools/testing/radix-tree/regression3.c | 8 +- tools/testing/radix-tree/test.h | 1 + 9 files changed, 188 insertions(+), 85 deletions(-) diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index 73076a0ea6a9..00ee006a8aa2 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c @@ -162,7 +162,7 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info) slot = radix_tree_iter_retry(&iter); continue; } - slot = radix_tree_iter_next(&iter); + slot = radix_tree_iter_resume(slot, &iter); spin_unlock(&fs_info->buffer_lock); free_extent_buffer_stale(eb); spin_lock(&fs_info->buffer_lock); diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index d04073ac3a9f..289d007d487b 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -403,20 +403,17 @@ __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) } /** - * radix_tree_iter_next - resume iterating when the chunk may be invalid - * @iter: iterator state + * radix_tree_iter_resume - resume iterating when the chunk may be invalid + * @slot: pointer to current slot + * @iter: iterator state + * Returns: New slot pointer * * If the iterator needs to release then reacquire a lock, the chunk may * have been invalidated by an insertion or deletion. Call this function - * to continue the iteration from the next index. + * before releasing the lock to continue the iteration from the next index. */ -static inline __must_check -void **radix_tree_iter_next(struct radix_tree_iter *iter) -{ - iter->next_index = __radix_tree_iter_add(iter, 1); - iter->tags = 0; - return NULL; -} +void **__must_check radix_tree_iter_resume(void **slot, + struct radix_tree_iter *iter); /** * radix_tree_chunk_size - get current chunk size @@ -430,10 +427,17 @@ radix_tree_chunk_size(struct radix_tree_iter *iter) return (iter->next_index - iter->index) >> iter_shift(iter); } -static inline struct radix_tree_node *entry_to_node(void *ptr) +#ifdef CONFIG_RADIX_TREE_MULTIORDER +void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, + unsigned flags); +#else +/* Can't happen without sibling entries, but the compiler can't tell that */ +static inline void ** __radix_tree_next_slot(void **slot, + struct radix_tree_iter *iter, unsigned flags) { - return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); + return slot; } +#endif /** * radix_tree_next_slot - find next slot in chunk @@ -447,7 +451,7 @@ static inline struct radix_tree_node *entry_to_node(void *ptr) * For tagged lookup it also eats @iter->tags. * * There are several cases where 'slot' can be passed in as NULL to this - * function. These cases result from the use of radix_tree_iter_next() or + * function. These cases result from the use of radix_tree_iter_resume() or * radix_tree_iter_retry(). In these cases we don't end up dereferencing * 'slot' because either: * a) we are doing tagged iteration and iter->tags has been set to 0, or @@ -458,51 +462,31 @@ static __always_inline void ** radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) { if (flags & RADIX_TREE_ITER_TAGGED) { - void *canon = slot; - iter->tags >>= 1; if (unlikely(!iter->tags)) return NULL; - while (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) && - radix_tree_is_internal_node(slot[1])) { - if (entry_to_node(slot[1]) == canon) { - iter->tags >>= 1; - iter->index = __radix_tree_iter_add(iter, 1); - slot++; - continue; - } - iter->next_index = __radix_tree_iter_add(iter, 1); - return NULL; - } if (likely(iter->tags & 1ul)) { iter->index = __radix_tree_iter_add(iter, 1); - return slot + 1; + slot++; + goto found; } if (!(flags & RADIX_TREE_ITER_CONTIG)) { unsigned offset = __ffs(iter->tags); - iter->tags >>= offset; - iter->index = __radix_tree_iter_add(iter, offset + 1); - return slot + offset + 1; + iter->tags >>= offset++; + iter->index = __radix_tree_iter_add(iter, offset); + slot += offset; + goto found; } } else { long count = radix_tree_chunk_size(iter); - void *canon = slot; while (--count > 0) { slot++; iter->index = __radix_tree_iter_add(iter, 1); - if (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) && - radix_tree_is_internal_node(*slot)) { - if (entry_to_node(*slot) == canon) - continue; - iter->next_index = iter->index; - break; - } - if (likely(*slot)) - return slot; + goto found; if (flags & RADIX_TREE_ITER_CONTIG) { /* forbid switching to the next chunk */ iter->next_index = 0; @@ -511,6 +495,11 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) } } return NULL; + + found: + if (unlikely(radix_tree_is_internal_node(*slot))) + return __radix_tree_next_slot(slot, iter, flags); + return slot; } /** @@ -561,6 +550,6 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) slot || (slot = radix_tree_next_chunk(root, iter, \ RADIX_TREE_ITER_TAGGED | tag)) ; \ slot = radix_tree_next_slot(slot, iter, \ - RADIX_TREE_ITER_TAGGED)) + RADIX_TREE_ITER_TAGGED | tag)) #endif /* _LINUX_RADIX_TREE_H */ diff --git a/lib/radix-tree.c b/lib/radix-tree.c index dbf4e8c8e100..0a7ac0fb4a65 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -69,6 +69,11 @@ struct radix_tree_preload { }; static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; +static inline struct radix_tree_node *entry_to_node(void *ptr) +{ + return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); +} + static inline void *node_to_entry(void *ptr) { return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE); @@ -1104,6 +1109,120 @@ static inline void __set_iter_shift(struct radix_tree_iter *iter, #endif } +/* Construct iter->tags bit-mask from node->tags[tag] array */ +static void set_iter_tags(struct radix_tree_iter *iter, + struct radix_tree_node *node, unsigned offset, + unsigned tag) +{ + unsigned tag_long = offset / BITS_PER_LONG; + unsigned tag_bit = offset % BITS_PER_LONG; + + iter->tags = node->tags[tag][tag_long] >> tag_bit; + + /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ + if (tag_long < RADIX_TREE_TAG_LONGS - 1) { + /* Pick tags from next element */ + if (tag_bit) + iter->tags |= node->tags[tag][tag_long + 1] << + (BITS_PER_LONG - tag_bit); + /* Clip chunk size, here only BITS_PER_LONG tags */ + iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG); + } +} + +#ifdef CONFIG_RADIX_TREE_MULTIORDER +static void **skip_siblings(struct radix_tree_node **nodep, + void **slot, struct radix_tree_iter *iter) +{ + void *sib = node_to_entry(slot - 1); + + while (iter->index < iter->next_index) { + *nodep = rcu_dereference_raw(*slot); + if (*nodep && *nodep != sib) + return slot; + slot++; + iter->index = __radix_tree_iter_add(iter, 1); + iter->tags >>= 1; + } + + *nodep = NULL; + return NULL; +} + +void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, + unsigned flags) +{ + unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; + struct radix_tree_node *node = rcu_dereference_raw(*slot); + + slot = skip_siblings(&node, slot, iter); + + while (radix_tree_is_internal_node(node)) { + unsigned offset; + unsigned long next_index; + + if (node == RADIX_TREE_RETRY) + return slot; + node = entry_to_node(node); + iter->shift = node->shift; + + if (flags & RADIX_TREE_ITER_TAGGED) { + offset = radix_tree_find_next_bit(node, tag, 0); + if (offset == RADIX_TREE_MAP_SIZE) + return NULL; + slot = &node->slots[offset]; + iter->index = __radix_tree_iter_add(iter, offset); + set_iter_tags(iter, node, offset, tag); + node = rcu_dereference_raw(*slot); + } else { + offset = 0; + slot = &node->slots[0]; + for (;;) { + node = rcu_dereference_raw(*slot); + if (node) + break; + slot++; + offset++; + if (offset == RADIX_TREE_MAP_SIZE) + return NULL; + } + iter->index = __radix_tree_iter_add(iter, offset); + } + if ((flags & RADIX_TREE_ITER_CONTIG) && (offset > 0)) + goto none; + next_index = (iter->index | shift_maxindex(iter->shift)) + 1; + if (next_index < iter->next_index) + iter->next_index = next_index; + } + + return slot; + none: + iter->next_index = 0; + return NULL; +} +EXPORT_SYMBOL(__radix_tree_next_slot); +#else +static void **skip_siblings(struct radix_tree_node **nodep, + void **slot, struct radix_tree_iter *iter) +{ + return slot; +} +#endif + +void **radix_tree_iter_resume(void **slot, struct radix_tree_iter *iter) +{ + struct radix_tree_node *node; + + slot++; + iter->index = __radix_tree_iter_add(iter, 1); + node = rcu_dereference_raw(*slot); + skip_siblings(&node, slot, iter); + iter->next_index = iter->index; + iter->tags = 0; + return NULL; +} +EXPORT_SYMBOL(radix_tree_iter_resume); + /** * radix_tree_next_chunk - find next chunk of slots for iteration * @@ -1191,23 +1310,8 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, iter->next_index = (index | node_maxindex(node)) + 1; __set_iter_shift(iter, node->shift); - /* Construct iter->tags bit-mask from node->tags[tag] array */ - if (flags & RADIX_TREE_ITER_TAGGED) { - unsigned tag_long, tag_bit; - - tag_long = offset / BITS_PER_LONG; - tag_bit = offset % BITS_PER_LONG; - iter->tags = node->tags[tag][tag_long] >> tag_bit; - /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ - if (tag_long < RADIX_TREE_TAG_LONGS - 1) { - /* Pick tags from next element */ - if (tag_bit) - iter->tags |= node->tags[tag][tag_long + 1] << - (BITS_PER_LONG - tag_bit); - /* Clip chunk size, here only BITS_PER_LONG tags */ - iter->next_index = index + BITS_PER_LONG; - } - } + if (flags & RADIX_TREE_ITER_TAGGED) + set_iter_tags(iter, node, offset, tag); return node->slots + offset; } diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 7434a63cac94..e32389a97030 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1446,7 +1446,7 @@ static void collapse_shmem(struct mm_struct *mm, radix_tree_replace_slot(&mapping->page_tree, slot, new_page + (index % HPAGE_PMD_NR)); - slot = radix_tree_iter_next(&iter); + slot = radix_tree_iter_resume(slot, &iter); index++; continue; out_lru: @@ -1546,7 +1546,6 @@ tree_unlocked: /* Put holes back where they were */ radix_tree_delete(&mapping->page_tree, iter.index); - slot = radix_tree_iter_next(&iter); continue; } @@ -1557,11 +1556,11 @@ tree_unlocked: page_ref_unfreeze(page, 2); radix_tree_replace_slot(&mapping->page_tree, slot, page); + slot = radix_tree_iter_resume(slot, &iter); spin_unlock_irq(&mapping->tree_lock); putback_lru_page(page); unlock_page(page); spin_lock_irq(&mapping->tree_lock); - slot = radix_tree_iter_next(&iter); } VM_BUG_ON(nr_none); spin_unlock_irq(&mapping->tree_lock); @@ -1641,8 +1640,8 @@ static void khugepaged_scan_shmem(struct mm_struct *mm, present++; if (need_resched()) { + slot = radix_tree_iter_resume(slot, &iter); cond_resched_rcu(); - slot = radix_tree_iter_next(&iter); } } rcu_read_unlock(); diff --git a/mm/shmem.c b/mm/shmem.c index abd7403aba41..be11c6dd414a 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -661,8 +661,8 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping, swapped++; if (need_resched()) { + slot = radix_tree_iter_resume(slot, &iter); cond_resched_rcu(); - slot = radix_tree_iter_next(&iter); } } @@ -2447,8 +2447,8 @@ static void shmem_tag_pins(struct address_space *mapping) } if (need_resched()) { + slot = radix_tree_iter_resume(slot, &iter); cond_resched_rcu(); - slot = radix_tree_iter_next(&iter); } } rcu_read_unlock(); @@ -2517,8 +2517,8 @@ static int shmem_wait_for_pins(struct address_space *mapping) spin_unlock_irq(&mapping->tree_lock); continue_resched: if (need_resched()) { + slot = radix_tree_iter_resume(slot, &iter); cond_resched_rcu(); - slot = radix_tree_iter_next(&iter); } } rcu_read_unlock(); diff --git a/tools/testing/radix-tree/iteration_check.c b/tools/testing/radix-tree/iteration_check.c index df71cb841385..f328a66899b4 100644 --- a/tools/testing/radix-tree/iteration_check.c +++ b/tools/testing/radix-tree/iteration_check.c @@ -48,8 +48,8 @@ static void *add_entries_fn(void *arg) /* * Iterate over the tagged entries, doing a radix_tree_iter_retry() as we find * things that have been removed and randomly resetting our iteration to the - * next chunk with radix_tree_iter_next(). Both radix_tree_iter_retry() and - * radix_tree_iter_next() cause radix_tree_next_slot() to be called with a + * next chunk with radix_tree_iter_resume(). Both radix_tree_iter_retry() and + * radix_tree_iter_resume() cause radix_tree_next_slot() to be called with a * NULL 'slot' variable. */ static void *tagged_iteration_fn(void *arg) @@ -79,7 +79,7 @@ static void *tagged_iteration_fn(void *arg) } if (rand_r(&seeds[0]) % 50 == 0) { - slot = radix_tree_iter_next(&iter); + slot = radix_tree_iter_resume(slot, &iter); rcu_read_unlock(); rcu_barrier(); rcu_read_lock(); @@ -96,8 +96,8 @@ static void *tagged_iteration_fn(void *arg) /* * Iterate over the entries, doing a radix_tree_iter_retry() as we find things * that have been removed and randomly resetting our iteration to the next - * chunk with radix_tree_iter_next(). Both radix_tree_iter_retry() and - * radix_tree_iter_next() cause radix_tree_next_slot() to be called with a + * chunk with radix_tree_iter_resume(). Both radix_tree_iter_retry() and + * radix_tree_iter_resume() cause radix_tree_next_slot() to be called with a * NULL 'slot' variable. */ static void *untagged_iteration_fn(void *arg) @@ -127,7 +127,7 @@ static void *untagged_iteration_fn(void *arg) } if (rand_r(&seeds[1]) % 50 == 0) { - slot = radix_tree_iter_next(&iter); + slot = radix_tree_iter_resume(slot, &iter); rcu_read_unlock(); rcu_barrier(); rcu_read_lock(); diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index 8d5865c95664..b9be8856d652 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c @@ -231,11 +231,14 @@ void multiorder_iteration(void) radix_tree_for_each_slot(slot, &tree, &iter, j) { int height = order[i] / RADIX_TREE_MAP_SHIFT; int shift = height * RADIX_TREE_MAP_SHIFT; - int mask = (1 << order[i]) - 1; + unsigned long mask = (1UL << order[i]) - 1; + struct item *item = *slot; - assert(iter.index >= (index[i] &~ mask)); - assert(iter.index <= (index[i] | mask)); + assert((iter.index | mask) == (index[i] | mask)); assert(iter.shift == shift); + assert(!radix_tree_is_internal_node(item)); + assert((item->index | mask) == (index[i] | mask)); + assert(item->order == order[i]); i++; } } @@ -269,7 +272,7 @@ void multiorder_tagged_iteration(void) assert(radix_tree_tag_set(&tree, tag_index[i], 1)); for (j = 0; j < 256; j++) { - int mask, k; + int k; for (i = 0; i < TAG_ENTRIES; i++) { for (k = i; index[k] < tag_index[i]; k++) @@ -279,12 +282,16 @@ void multiorder_tagged_iteration(void) } radix_tree_for_each_tagged(slot, &tree, &iter, j, 1) { + unsigned long mask; + struct item *item = *slot; for (k = i; index[k] < tag_index[i]; k++) ; - mask = (1 << order[k]) - 1; + mask = (1UL << order[k]) - 1; - assert(iter.index >= (tag_index[i] &~ mask)); - assert(iter.index <= (tag_index[i] | mask)); + assert((iter.index | mask) == (tag_index[i] | mask)); + assert(!radix_tree_is_internal_node(item)); + assert((item->index | mask) == (tag_index[i] | mask)); + assert(item->order == order[k]); i++; } } @@ -303,12 +310,15 @@ void multiorder_tagged_iteration(void) } radix_tree_for_each_tagged(slot, &tree, &iter, j, 2) { + struct item *item = *slot; for (k = i; index[k] < tag_index[i]; k++) ; mask = (1 << order[k]) - 1; - assert(iter.index >= (tag_index[i] &~ mask)); - assert(iter.index <= (tag_index[i] | mask)); + assert((iter.index | mask) == (tag_index[i] | mask)); + assert(!radix_tree_is_internal_node(item)); + assert((item->index | mask) == (tag_index[i] | mask)); + assert(item->order == order[k]); i++; } } diff --git a/tools/testing/radix-tree/regression3.c b/tools/testing/radix-tree/regression3.c index 1f06ed73d0a8..b594841fae85 100644 --- a/tools/testing/radix-tree/regression3.c +++ b/tools/testing/radix-tree/regression3.c @@ -5,7 +5,7 @@ * In following radix_tree_next_slot current chunk size becomes zero. * This isn't checked and it tries to dereference null pointer in slot. * - * Helper radix_tree_iter_next reset slot to NULL and next_index to index + 1, + * Helper radix_tree_iter_resume reset slot to NULL and next_index to index + 1, * for tagger iteraction it also must reset cached tags in iterator to abort * next radix_tree_next_slot and go to slow-path into radix_tree_next_chunk. * @@ -88,7 +88,7 @@ void regression3_test(void) printf("slot %ld %p\n", iter.index, *slot); if (!iter.index) { printf("next at %ld\n", iter.index); - slot = radix_tree_iter_next(&iter); + slot = radix_tree_iter_resume(slot, &iter); } } @@ -96,7 +96,7 @@ void regression3_test(void) printf("contig %ld %p\n", iter.index, *slot); if (!iter.index) { printf("next at %ld\n", iter.index); - slot = radix_tree_iter_next(&iter); + slot = radix_tree_iter_resume(slot, &iter); } } @@ -106,7 +106,7 @@ void regression3_test(void) printf("tagged %ld %p\n", iter.index, *slot); if (!iter.index) { printf("next at %ld\n", iter.index); - slot = radix_tree_iter_next(&iter); + slot = radix_tree_iter_resume(slot, &iter); } } diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index 423c528aaee9..617416ec3c5e 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h @@ -41,6 +41,7 @@ void verify_tag_consistency(struct radix_tree_root *root, unsigned int tag); extern int nr_allocated; /* Normally private parts of lib/radix-tree.c */ +struct radix_tree_node *entry_to_node(void *ptr); void radix_tree_dump(struct radix_tree_root *root); int root_tag_get(struct radix_tree_root *root, unsigned int tag); unsigned long node_maxindex(struct radix_tree_node *); -- cgit v1.2.3 From 478922e2b0f41567e4a530771bfb3f693f857d45 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:52 -0800 Subject: radix-tree: delete radix_tree_locate_item() This rather complicated function can be better implemented as an iterator. It has only one caller, so move the functionality to the only place that needs it. Update the test suite to follow the same pattern. Link: http://lkml.kernel.org/r/1480369871-5271-56-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Acked-by: Konstantin Khlebnikov Tested-by: Kirill A. Shutemov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/radix-tree.h | 1 - lib/radix-tree.c | 99 ----------------------------------------- mm/shmem.c | 26 ++++++++++- tools/testing/radix-tree/main.c | 8 ++-- tools/testing/radix-tree/test.c | 22 +++++++++ tools/testing/radix-tree/test.h | 2 + 6 files changed, 53 insertions(+), 105 deletions(-) diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 289d007d487b..a13d3f7c6c65 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -296,7 +296,6 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, unsigned long nr_to_tag, unsigned int fromtag, unsigned int totag); int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); -unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item); static inline void radix_tree_preload_end(void) { diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 0a7ac0fb4a65..5bdf1bdbca00 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1579,105 +1579,6 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, } EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); -#if defined(CONFIG_SHMEM) && defined(CONFIG_SWAP) -#include /* for cond_resched() */ - -struct locate_info { - unsigned long found_index; - bool stop; -}; - -/* - * This linear search is at present only useful to shmem_unuse_inode(). - */ -static unsigned long __locate(struct radix_tree_node *slot, void *item, - unsigned long index, struct locate_info *info) -{ - unsigned long i; - - do { - unsigned int shift = slot->shift; - - for (i = (index >> shift) & RADIX_TREE_MAP_MASK; - i < RADIX_TREE_MAP_SIZE; - i++, index += (1UL << shift)) { - struct radix_tree_node *node = - rcu_dereference_raw(slot->slots[i]); - if (node == RADIX_TREE_RETRY) - goto out; - if (!radix_tree_is_internal_node(node)) { - if (node == item) { - info->found_index = index; - info->stop = true; - goto out; - } - continue; - } - node = entry_to_node(node); - if (is_sibling_entry(slot, node)) - continue; - slot = node; - break; - } - } while (i < RADIX_TREE_MAP_SIZE); - -out: - if ((index == 0) && (i == RADIX_TREE_MAP_SIZE)) - info->stop = true; - return index; -} - -/** - * radix_tree_locate_item - search through radix tree for item - * @root: radix tree root - * @item: item to be found - * - * Returns index where item was found, or -1 if not found. - * Caller must hold no lock (since this time-consuming function needs - * to be preemptible), and must check afterwards if item is still there. - */ -unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) -{ - struct radix_tree_node *node; - unsigned long max_index; - unsigned long cur_index = 0; - struct locate_info info = { - .found_index = -1, - .stop = false, - }; - - do { - rcu_read_lock(); - node = rcu_dereference_raw(root->rnode); - if (!radix_tree_is_internal_node(node)) { - rcu_read_unlock(); - if (node == item) - info.found_index = 0; - break; - } - - node = entry_to_node(node); - - max_index = node_maxindex(node); - if (cur_index > max_index) { - rcu_read_unlock(); - break; - } - - cur_index = __locate(node, item, cur_index, &info); - rcu_read_unlock(); - cond_resched(); - } while (!info.stop && cur_index <= max_index); - - return info.found_index; -} -#else -unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) -{ - return -1; -} -#endif /* CONFIG_SHMEM && CONFIG_SWAP */ - /** * __radix_tree_delete_node - try to free node after clearing a slot * @root: radix tree root diff --git a/mm/shmem.c b/mm/shmem.c index be11c6dd414a..54287d443806 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1049,6 +1049,30 @@ static void shmem_evict_inode(struct inode *inode) clear_inode(inode); } +static unsigned long find_swap_entry(struct radix_tree_root *root, void *item) +{ + struct radix_tree_iter iter; + void **slot; + unsigned long found = -1; + unsigned int checked = 0; + + rcu_read_lock(); + radix_tree_for_each_slot(slot, root, &iter, 0) { + if (*slot == item) { + found = iter.index; + break; + } + checked++; + if ((checked % 4096) != 0) + continue; + slot = radix_tree_iter_resume(slot, &iter); + cond_resched_rcu(); + } + + rcu_read_unlock(); + return found; +} + /* * If swap found in inode, free it and move page from swapcache to filecache. */ @@ -1062,7 +1086,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, int error = 0; radswap = swp_to_radix_entry(swap); - index = radix_tree_locate_item(&mapping->page_tree, radswap); + index = find_swap_entry(&mapping->page_tree, radswap); if (index == -1) return -EAGAIN; /* tell shmem_unuse we found nothing */ diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c index 76d9c95aa487..a028dae8a043 100644 --- a/tools/testing/radix-tree/main.c +++ b/tools/testing/radix-tree/main.c @@ -239,7 +239,7 @@ static void __locate_check(struct radix_tree_root *tree, unsigned long index, item_insert_order(tree, index, order); item = item_lookup(tree, index); - index2 = radix_tree_locate_item(tree, item); + index2 = find_item(tree, item); if (index != index2) { printf("index %ld order %d inserted; found %ld\n", index, order, index2); @@ -273,17 +273,17 @@ static void locate_check(void) index += (1UL << order)) { __locate_check(&tree, index + offset, order); } - if (radix_tree_locate_item(&tree, &tree) != -1) + if (find_item(&tree, &tree) != -1) abort(); item_kill_tree(&tree); } } - if (radix_tree_locate_item(&tree, &tree) != -1) + if (find_item(&tree, &tree) != -1) abort(); __locate_check(&tree, -1, 0); - if (radix_tree_locate_item(&tree, &tree) != -1) + if (find_item(&tree, &tree) != -1) abort(); item_kill_tree(&tree); } diff --git a/tools/testing/radix-tree/test.c b/tools/testing/radix-tree/test.c index 0de548939a2e..88bf57f7175e 100644 --- a/tools/testing/radix-tree/test.c +++ b/tools/testing/radix-tree/test.c @@ -151,6 +151,28 @@ void item_full_scan(struct radix_tree_root *root, unsigned long start, assert(nfound == 0); } +/* Use the same pattern as find_swap_entry() in mm/shmem.c */ +unsigned long find_item(struct radix_tree_root *root, void *item) +{ + struct radix_tree_iter iter; + void **slot; + unsigned long found = -1; + unsigned long checked = 0; + + radix_tree_for_each_slot(slot, root, &iter, 0) { + if (*slot == item) { + found = iter.index; + break; + } + checked++; + if ((checked % 4) != 0) + continue; + slot = radix_tree_iter_resume(slot, &iter); + } + + return found; +} + static int verify_node(struct radix_tree_node *slot, unsigned int tag, int tagged) { diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index 617416ec3c5e..3d9d1d30da22 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h @@ -25,6 +25,8 @@ void item_full_scan(struct radix_tree_root *root, unsigned long start, unsigned long nr, int chunk); void item_kill_tree(struct radix_tree_root *root); +unsigned long find_item(struct radix_tree_root *, void *item); + void tag_check(void); void multiorder_checks(void); void iteration_test(void); -- cgit v1.2.3 From 268f42de718128cd0301293177e79c08c38e39a6 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:55 -0800 Subject: radix-tree: delete radix_tree_range_tag_if_tagged() This is an exceptionally complicated function with just one caller (tag_pages_for_writeback). We devote a large portion of the runtime of the test suite to testing this one function which has one caller. By introducing the new function radix_tree_iter_tag_set(), we can eliminate all of the complexity while keeping the performance. The caller can now use a fairly standard radix_tree_for_each() loop, and it doesn't need to worry about tricksy things like 'start' wrapping. The test suite continues to spend a large amount of time investigating this function, but now it's testing the underlying primitives such as radix_tree_iter_resume() and the radix_tree_for_each_tagged() iterator which are also used by other parts of the kernel. Link: http://lkml.kernel.org/r/1480369871-5271-57-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/radix-tree.h | 74 ++++++++++----------- lib/radix-tree.c | 117 ++++++--------------------------- mm/page-writeback.c | 28 +++++--- tools/testing/radix-tree/main.c | 12 ++-- tools/testing/radix-tree/multiorder.c | 13 ++-- tools/testing/radix-tree/regression2.c | 3 +- tools/testing/radix-tree/tag_check.c | 4 +- tools/testing/radix-tree/test.c | 34 ++++++++++ tools/testing/radix-tree/test.h | 3 + 9 files changed, 125 insertions(+), 163 deletions(-) diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index a13d3f7c6c65..7a8d2516c73a 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -120,6 +120,41 @@ static inline bool radix_tree_empty(struct radix_tree_root *root) return root->rnode == NULL; } +/** + * struct radix_tree_iter - radix tree iterator state + * + * @index: index of current slot + * @next_index: one beyond the last index for this chunk + * @tags: bit-mask for tag-iterating + * @node: node that contains current slot + * @shift: shift for the node that holds our slots + * + * This radix tree iterator works in terms of "chunks" of slots. A chunk is a + * subinterval of slots contained within one radix tree leaf node. It is + * described by a pointer to its first slot and a struct radix_tree_iter + * which holds the chunk's position in the tree and its size. For tagged + * iteration radix_tree_iter also holds the slots' bit-mask for one chosen + * radix tree tag. + */ +struct radix_tree_iter { + unsigned long index; + unsigned long next_index; + unsigned long tags; + struct radix_tree_node *node; +#ifdef CONFIG_RADIX_TREE_MULTIORDER + unsigned int shift; +#endif +}; + +static inline unsigned int iter_shift(const struct radix_tree_iter *iter) +{ +#ifdef CONFIG_RADIX_TREE_MULTIORDER + return iter->shift; +#else + return 0; +#endif +} + /** * Radix-tree synchronization * @@ -283,6 +318,8 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, unsigned long index, unsigned int tag); int radix_tree_tag_get(struct radix_tree_root *root, unsigned long index, unsigned int tag); +void radix_tree_iter_tag_set(struct radix_tree_root *root, + const struct radix_tree_iter *iter, unsigned int tag); unsigned int radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items, @@ -291,10 +328,6 @@ unsigned int radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, unsigned long first_index, unsigned int max_items, unsigned int tag); -unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, - unsigned long *first_indexp, unsigned long last_index, - unsigned long nr_to_tag, - unsigned int fromtag, unsigned int totag); int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); static inline void radix_tree_preload_end(void) @@ -302,39 +335,6 @@ static inline void radix_tree_preload_end(void) preempt_enable(); } -/** - * struct radix_tree_iter - radix tree iterator state - * - * @index: index of current slot - * @next_index: one beyond the last index for this chunk - * @tags: bit-mask for tag-iterating - * @shift: shift for the node that holds our slots - * - * This radix tree iterator works in terms of "chunks" of slots. A chunk is a - * subinterval of slots contained within one radix tree leaf node. It is - * described by a pointer to its first slot and a struct radix_tree_iter - * which holds the chunk's position in the tree and its size. For tagged - * iteration radix_tree_iter also holds the slots' bit-mask for one chosen - * radix tree tag. - */ -struct radix_tree_iter { - unsigned long index; - unsigned long next_index; - unsigned long tags; -#ifdef CONFIG_RADIX_TREE_MULTIORDER - unsigned int shift; -#endif -}; - -static inline unsigned int iter_shift(struct radix_tree_iter *iter) -{ -#ifdef CONFIG_RADIX_TREE_MULTIORDER - return iter->shift; -#else - return 0; -#endif -} - #define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ #define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ #define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */ diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 5bdf1bdbca00..d1f4ca5b7989 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -219,6 +219,11 @@ radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag, return RADIX_TREE_MAP_SIZE; } +static unsigned int iter_offset(const struct radix_tree_iter *iter) +{ + return (iter->index >> iter_shift(iter)) & RADIX_TREE_MAP_MASK; +} + /* * The maximum index which can be stored in a radix tree */ @@ -1014,6 +1019,18 @@ static void node_tag_set(struct radix_tree_root *root, root_tag_set(root, tag); } +/** + * radix_tree_iter_tag_set - set a tag on the current iterator entry + * @root: radix tree root + * @iter: iterator state + * @tag: tag to set + */ +void radix_tree_iter_tag_set(struct radix_tree_root *root, + const struct radix_tree_iter *iter, unsigned int tag) +{ + node_tag_set(root, iter->node, tag, iter_offset(iter)); +} + /** * radix_tree_tag_clear - clear a tag on a radix tree node * @root: radix tree root @@ -1164,6 +1181,7 @@ void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, if (node == RADIX_TREE_RETRY) return slot; node = entry_to_node(node); + iter->node = node; iter->shift = node->shift; if (flags & RADIX_TREE_ITER_TAGGED) { @@ -1266,6 +1284,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, iter->index = index; iter->next_index = maxindex + 1; iter->tags = 1; + iter->node = NULL; __set_iter_shift(iter, 0); return (void **)&root->rnode; } @@ -1308,6 +1327,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, /* Update the iterator state */ iter->index = (index &~ node_maxindex(node)) | (offset << node->shift); iter->next_index = (index | node_maxindex(node)) + 1; + iter->node = node; __set_iter_shift(iter, node->shift); if (flags & RADIX_TREE_ITER_TAGGED) @@ -1317,103 +1337,6 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, } EXPORT_SYMBOL(radix_tree_next_chunk); -/** - * radix_tree_range_tag_if_tagged - for each item in given range set given - * tag if item has another tag set - * @root: radix tree root - * @first_indexp: pointer to a starting index of a range to scan - * @last_index: last index of a range to scan - * @nr_to_tag: maximum number items to tag - * @iftag: tag index to test - * @settag: tag index to set if tested tag is set - * - * This function scans range of radix tree from first_index to last_index - * (inclusive). For each item in the range if iftag is set, the function sets - * also settag. The function stops either after tagging nr_to_tag items or - * after reaching last_index. - * - * The tags must be set from the leaf level only and propagated back up the - * path to the root. We must do this so that we resolve the full path before - * setting any tags on intermediate nodes. If we set tags as we descend, then - * we can get to the leaf node and find that the index that has the iftag - * set is outside the range we are scanning. This reults in dangling tags and - * can lead to problems with later tag operations (e.g. livelocks on lookups). - * - * The function returns the number of leaves where the tag was set and sets - * *first_indexp to the first unscanned index. - * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must - * be prepared to handle that. - */ -unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, - unsigned long *first_indexp, unsigned long last_index, - unsigned long nr_to_tag, - unsigned int iftag, unsigned int settag) -{ - struct radix_tree_node *node, *child; - unsigned long maxindex; - unsigned long tagged = 0; - unsigned long index = *first_indexp; - - radix_tree_load_root(root, &child, &maxindex); - last_index = min(last_index, maxindex); - if (index > last_index) - return 0; - if (!nr_to_tag) - return 0; - if (!root_tag_get(root, iftag)) { - *first_indexp = last_index + 1; - return 0; - } - if (!radix_tree_is_internal_node(child)) { - *first_indexp = last_index + 1; - root_tag_set(root, settag); - return 1; - } - - node = entry_to_node(child); - - for (;;) { - unsigned offset = radix_tree_descend(node, &child, index); - if (!child) - goto next; - if (!tag_get(node, iftag, offset)) - goto next; - /* Sibling slots never have tags set on them */ - if (radix_tree_is_internal_node(child)) { - node = entry_to_node(child); - continue; - } - - tagged++; - node_tag_set(root, node, settag, offset); - next: - /* Go to next entry in node */ - index = ((index >> node->shift) + 1) << node->shift; - /* Overflow can happen when last_index is ~0UL... */ - if (index > last_index || !index) - break; - offset = (index >> node->shift) & RADIX_TREE_MAP_MASK; - while (offset == 0) { - /* - * We've fully scanned this node. Go up. Because - * last_index is guaranteed to be in the tree, what - * we do below cannot wander astray. - */ - node = node->parent; - offset = (index >> node->shift) & RADIX_TREE_MAP_MASK; - } - if (is_sibling_entry(node, node->slots[offset])) - goto next; - if (tagged >= nr_to_tag) - break; - } - - *first_indexp = index; - - return tagged; -} -EXPORT_SYMBOL(radix_tree_range_tag_if_tagged); - /** * radix_tree_gang_lookup - perform multiple lookup on a radix tree * @root: radix tree root diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 52e2f8e3b472..290e8b7d3181 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2106,18 +2106,26 @@ void tag_pages_for_writeback(struct address_space *mapping, pgoff_t start, pgoff_t end) { #define WRITEBACK_TAG_BATCH 4096 - unsigned long tagged; - - do { - spin_lock_irq(&mapping->tree_lock); - tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree, - &start, end, WRITEBACK_TAG_BATCH, - PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE); + unsigned long tagged = 0; + struct radix_tree_iter iter; + void **slot; + + spin_lock_irq(&mapping->tree_lock); + radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, start, + PAGECACHE_TAG_DIRTY) { + if (iter.index > end) + break; + radix_tree_iter_tag_set(&mapping->page_tree, &iter, + PAGECACHE_TAG_TOWRITE); + tagged++; + if ((tagged % WRITEBACK_TAG_BATCH) != 0) + continue; + slot = radix_tree_iter_resume(slot, &iter); spin_unlock_irq(&mapping->tree_lock); - WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH); cond_resched(); - /* We check 'start' to handle wrapping when end == ~0UL */ - } while (tagged >= WRITEBACK_TAG_BATCH && start); + spin_lock_irq(&mapping->tree_lock); + } + spin_unlock_irq(&mapping->tree_lock); } EXPORT_SYMBOL(tag_pages_for_writeback); diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c index a028dae8a043..170175ca4105 100644 --- a/tools/testing/radix-tree/main.c +++ b/tools/testing/radix-tree/main.c @@ -205,8 +205,7 @@ void copy_tag_check(void) } // printf("\ncopying tags...\n"); - cur = start; - tagged = radix_tree_range_tag_if_tagged(&tree, &cur, end, ITEMS, 0, 1); + tagged = tag_tagged_items(&tree, NULL, start, end, ITEMS, 0, 1); // printf("checking copied tags\n"); assert(tagged == count); @@ -214,16 +213,13 @@ void copy_tag_check(void) /* Copy tags in several rounds */ // printf("\ncopying tags...\n"); - cur = start; - do { - tmp = rand() % (count/10+2); - tagged = radix_tree_range_tag_if_tagged(&tree, &cur, end, tmp, 0, 2); - } while (tmp == tagged); + tmp = rand() % (count / 10 + 2); + tagged = tag_tagged_items(&tree, NULL, start, end, tmp, 0, 2); + assert(tagged == count); // printf("%lu %lu %lu\n", tagged, tmp, count); // printf("checking copied tags\n"); check_copied_tags(&tree, start, end, idx, ITEMS, 0, 2); - assert(tagged < tmp); verify_tag_consistency(&tree, 0); verify_tag_consistency(&tree, 1); verify_tag_consistency(&tree, 2); diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index b9be8856d652..86daf23b3509 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c @@ -26,7 +26,6 @@ static void __multiorder_tag_test(int index, int order) { RADIX_TREE(tree, GFP_KERNEL); int base, err, i; - unsigned long first = 0; /* our canonical entry */ base = index & ~((1 << order) - 1); @@ -60,7 +59,7 @@ static void __multiorder_tag_test(int index, int order) assert(!radix_tree_tag_get(&tree, i, 1)); } - assert(radix_tree_range_tag_if_tagged(&tree, &first, ~0UL, 10, 0, 1) == 1); + assert(tag_tagged_items(&tree, NULL, 0, ~0UL, 10, 0, 1) == 1); assert(radix_tree_tag_clear(&tree, index, 0)); for_each_index(i, base, order) { @@ -251,7 +250,6 @@ void multiorder_tagged_iteration(void) RADIX_TREE(tree, GFP_KERNEL); struct radix_tree_iter iter; void **slot; - unsigned long first = 0; int i, j; printf("Multiorder tagged iteration test\n"); @@ -296,8 +294,8 @@ void multiorder_tagged_iteration(void) } } - radix_tree_range_tag_if_tagged(&tree, &first, ~0UL, - MT_NUM_ENTRIES, 1, 2); + assert(tag_tagged_items(&tree, NULL, 0, ~0UL, TAG_ENTRIES, 1, 2) == + TAG_ENTRIES); for (j = 0; j < 256; j++) { int mask, k; @@ -323,9 +321,8 @@ void multiorder_tagged_iteration(void) } } - first = 1; - radix_tree_range_tag_if_tagged(&tree, &first, ~0UL, - MT_NUM_ENTRIES, 1, 0); + assert(tag_tagged_items(&tree, NULL, 1, ~0UL, MT_NUM_ENTRIES * 2, 1, 0) + == TAG_ENTRIES); i = 0; radix_tree_for_each_tagged(slot, &tree, &iter, 0, 0) { assert(iter.index == tag_index[i]); diff --git a/tools/testing/radix-tree/regression2.c b/tools/testing/radix-tree/regression2.c index 63bf347aaf33..a41325d7a170 100644 --- a/tools/testing/radix-tree/regression2.c +++ b/tools/testing/radix-tree/regression2.c @@ -50,6 +50,7 @@ #include #include "regression.h" +#include "test.h" #define PAGECACHE_TAG_DIRTY 0 #define PAGECACHE_TAG_WRITEBACK 1 @@ -90,7 +91,7 @@ void regression2_test(void) /* 1. */ start = 0; end = max_slots - 2; - radix_tree_range_tag_if_tagged(&mt_tree, &start, end, 1, + tag_tagged_items(&mt_tree, NULL, start, end, 1, PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE); /* 2. */ diff --git a/tools/testing/radix-tree/tag_check.c b/tools/testing/radix-tree/tag_check.c index 186f6e4914e4..ed5f87d1dce2 100644 --- a/tools/testing/radix-tree/tag_check.c +++ b/tools/testing/radix-tree/tag_check.c @@ -23,7 +23,7 @@ __simple_checks(struct radix_tree_root *tree, unsigned long index, int tag) item_tag_set(tree, index, tag); ret = item_tag_get(tree, index, tag); assert(ret != 0); - ret = radix_tree_range_tag_if_tagged(tree, &first, ~0UL, 10, tag, !tag); + ret = tag_tagged_items(tree, NULL, first, ~0UL, 10, tag, !tag); assert(ret == 1); ret = item_tag_get(tree, index, !tag); assert(ret != 0); @@ -320,7 +320,7 @@ static void single_check(void) assert(ret == 0); verify_tag_consistency(&tree, 0); verify_tag_consistency(&tree, 1); - ret = radix_tree_range_tag_if_tagged(&tree, &first, 10, 10, 0, 1); + ret = tag_tagged_items(&tree, NULL, first, 10, 10, 0, 1); assert(ret == 1); ret = radix_tree_gang_lookup_tag(&tree, (void **)items, 0, BATCH, 1); assert(ret == 1); diff --git a/tools/testing/radix-tree/test.c b/tools/testing/radix-tree/test.c index 88bf57f7175e..e5726e373646 100644 --- a/tools/testing/radix-tree/test.c +++ b/tools/testing/radix-tree/test.c @@ -151,6 +151,40 @@ void item_full_scan(struct radix_tree_root *root, unsigned long start, assert(nfound == 0); } +/* Use the same pattern as tag_pages_for_writeback() in mm/page-writeback.c */ +int tag_tagged_items(struct radix_tree_root *root, pthread_mutex_t *lock, + unsigned long start, unsigned long end, unsigned batch, + unsigned iftag, unsigned thentag) +{ + unsigned long tagged = 0; + struct radix_tree_iter iter; + void **slot; + + if (batch == 0) + batch = 1; + + if (lock) + pthread_mutex_lock(lock); + radix_tree_for_each_tagged(slot, root, &iter, start, iftag) { + if (iter.index > end) + break; + radix_tree_iter_tag_set(root, &iter, thentag); + tagged++; + if ((tagged % batch) != 0) + continue; + slot = radix_tree_iter_resume(slot, &iter); + if (lock) { + pthread_mutex_unlock(lock); + rcu_barrier(); + pthread_mutex_lock(lock); + } + } + if (lock) + pthread_mutex_unlock(lock); + + return tagged; +} + /* Use the same pattern as find_swap_entry() in mm/shmem.c */ unsigned long find_item(struct radix_tree_root *root, void *item) { diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index 3d9d1d30da22..e11e4d260b4e 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h @@ -25,6 +25,9 @@ void item_full_scan(struct radix_tree_root *root, unsigned long start, unsigned long nr, int chunk); void item_kill_tree(struct radix_tree_root *root); +int tag_tagged_items(struct radix_tree_root *, pthread_mutex_t *, + unsigned long start, unsigned long end, unsigned batch, + unsigned iftag, unsigned thentag); unsigned long find_item(struct radix_tree_root *, void *item); void tag_check(void); -- cgit v1.2.3 From 175542f575723e43f897ddb09d0011c13f7cf0ec Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:08:58 -0800 Subject: radix-tree: add radix_tree_join This new function allows for the replacement of many smaller entries in the radix tree with one larger multiorder entry. From the point of view of an RCU walker, they may see a mixture of the smaller entries and the large entry during the same walk, but they will never see NULL for an index which was populated before the join. Link: http://lkml.kernel.org/r/1480369871-5271-58-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/radix-tree.h | 3 + lib/radix-tree.c | 183 ++++++++++++++++++++++++++++------ tools/testing/radix-tree/multiorder.c | 58 +++++++++++ 3 files changed, 213 insertions(+), 31 deletions(-) diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 7a8d2516c73a..935293a24f7d 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -335,6 +335,9 @@ static inline void radix_tree_preload_end(void) preempt_enable(); } +int radix_tree_join(struct radix_tree_root *, unsigned long index, + unsigned new_order, void *); + #define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ #define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ #define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */ diff --git a/lib/radix-tree.c b/lib/radix-tree.c index d1f4ca5b7989..962cfb3a7671 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -339,17 +339,14 @@ static void radix_tree_node_rcu_free(struct rcu_head *head) { struct radix_tree_node *node = container_of(head, struct radix_tree_node, rcu_head); - int i; /* - * must only free zeroed nodes into the slab. radix_tree_shrink - * can leave us with a non-NULL entry in the first slot, so clear - * that here to make sure. + * Must only free zeroed nodes into the slab. We can be left with + * non-NULL entries by radix_tree_free_nodes, so clear the entries + * and tags here. */ - for (i = 0; i < RADIX_TREE_MAX_TAGS; i++) - tag_clear(node, i, 0); - - node->slots[0] = NULL; + memset(node->slots, 0, sizeof(node->slots)); + memset(node->tags, 0, sizeof(node->tags)); INIT_LIST_HEAD(&node->private_list); kmem_cache_free(radix_tree_node_cachep, node); @@ -678,14 +675,14 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index, shift = radix_tree_load_root(root, &child, &maxindex); /* Make sure the tree is high enough. */ + if (order > 0 && max == ((1UL << order) - 1)) + max++; if (max > maxindex) { int error = radix_tree_extend(root, max, shift); if (error < 0) return error; shift = error; child = root->rnode; - if (order == shift) - shift += RADIX_TREE_MAP_SHIFT; } while (shift > order) { @@ -697,6 +694,8 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index, return -ENOMEM; child->shift = shift; child->offset = offset; + child->count = 0; + child->exceptional = 0; child->parent = node; rcu_assign_pointer(*slot, node_to_entry(child)); if (node) @@ -710,31 +709,121 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index, slot = &node->slots[offset]; } + if (nodep) + *nodep = node; + if (slotp) + *slotp = slot; + return 0; +} + #ifdef CONFIG_RADIX_TREE_MULTIORDER - /* Insert pointers to the canonical entry */ - if (order > shift) { - unsigned i, n = 1 << (order - shift); +/* + * Free any nodes below this node. The tree is presumed to not need + * shrinking, and any user data in the tree is presumed to not need a + * destructor called on it. If we need to add a destructor, we can + * add that functionality later. Note that we may not clear tags or + * slots from the tree as an RCU walker may still have a pointer into + * this subtree. We could replace the entries with RADIX_TREE_RETRY, + * but we'll still have to clear those in rcu_free. + */ +static void radix_tree_free_nodes(struct radix_tree_node *node) +{ + unsigned offset = 0; + struct radix_tree_node *child = entry_to_node(node); + + for (;;) { + void *entry = child->slots[offset]; + if (radix_tree_is_internal_node(entry) && + !is_sibling_entry(child, entry)) { + child = entry_to_node(entry); + offset = 0; + continue; + } + offset++; + while (offset == RADIX_TREE_MAP_SIZE) { + struct radix_tree_node *old = child; + offset = child->offset + 1; + child = child->parent; + radix_tree_node_free(old); + if (old == entry_to_node(node)) + return; + } + } +} + +static inline int insert_entries(struct radix_tree_node *node, void **slot, + void *item, unsigned order, bool replace) +{ + struct radix_tree_node *child; + unsigned i, n, tag, offset, tags = 0; + + if (node) { + n = 1 << (order - node->shift); + offset = get_slot_offset(node, slot); + } else { + n = 1; + offset = 0; + } + + if (n > 1) { offset = offset & ~(n - 1); slot = &node->slots[offset]; - child = node_to_entry(slot); - for (i = 0; i < n; i++) { - if (slot[i]) + } + child = node_to_entry(slot); + + for (i = 0; i < n; i++) { + if (slot[i]) { + if (replace) { + node->count--; + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) + if (tag_get(node, tag, offset + i)) + tags |= 1 << tag; + } else return -EEXIST; } + } - for (i = 1; i < n; i++) { + for (i = 0; i < n; i++) { + struct radix_tree_node *old = slot[i]; + if (i) { rcu_assign_pointer(slot[i], child); - node->count++; + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) + if (tags & (1 << tag)) + tag_clear(node, tag, offset + i); + } else { + rcu_assign_pointer(slot[i], item); + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) + if (tags & (1 << tag)) + tag_set(node, tag, offset); } + if (radix_tree_is_internal_node(old) && + !is_sibling_entry(node, old)) + radix_tree_free_nodes(old); + if (radix_tree_exceptional_entry(old)) + node->exceptional--; } -#endif - - if (nodep) - *nodep = node; - if (slotp) - *slotp = slot; - return 0; + if (node) { + node->count += n; + if (radix_tree_exceptional_entry(item)) + node->exceptional += n; + } + return n; } +#else +static inline int insert_entries(struct radix_tree_node *node, void **slot, + void *item, unsigned order, bool replace) +{ + if (*slot) + return -EEXIST; + rcu_assign_pointer(*slot, item); + if (node) { + node->count++; + if (radix_tree_exceptional_entry(item)) + node->exceptional++; + } + return 1; +} +#endif /** * __radix_tree_insert - insert into a radix tree @@ -757,15 +846,13 @@ int __radix_tree_insert(struct radix_tree_root *root, unsigned long index, error = __radix_tree_create(root, index, order, &node, &slot); if (error) return error; - if (*slot != NULL) - return -EEXIST; - rcu_assign_pointer(*slot, item); + + error = insert_entries(node, slot, item, order, false); + if (error < 0) + return error; if (node) { unsigned offset = get_slot_offset(node, slot); - node->count++; - if (radix_tree_exceptional_entry(item)) - node->exceptional++; BUG_ON(tag_get(node, 0, offset)); BUG_ON(tag_get(node, 1, offset)); BUG_ON(tag_get(node, 2, offset)); @@ -942,6 +1029,40 @@ void radix_tree_replace_slot(struct radix_tree_root *root, replace_slot(root, NULL, slot, item, true); } +#ifdef CONFIG_RADIX_TREE_MULTIORDER +/** + * radix_tree_join - replace multiple entries with one multiorder entry + * @root: radix tree root + * @index: an index inside the new entry + * @order: order of the new entry + * @item: new entry + * + * Call this function to replace several entries with one larger entry. + * The existing entries are presumed to not need freeing as a result of + * this call. + * + * The replacement entry will have all the tags set on it that were set + * on any of the entries it is replacing. + */ +int radix_tree_join(struct radix_tree_root *root, unsigned long index, + unsigned order, void *item) +{ + struct radix_tree_node *node; + void **slot; + int error; + + BUG_ON(radix_tree_is_internal_node(item)); + + error = __radix_tree_create(root, index, order, &node, &slot); + if (!error) + error = insert_entries(node, slot, item, order, true); + if (error > 0) + error = 0; + + return error; +} +#endif + /** * radix_tree_tag_set - set a tag on a radix tree node * @root: radix tree root diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index 86daf23b3509..c9f656cf5f52 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c @@ -332,6 +332,63 @@ void multiorder_tagged_iteration(void) item_kill_tree(&tree); } +static void __multiorder_join(unsigned long index, + unsigned order1, unsigned order2) +{ + unsigned long loc; + void *item, *item2 = item_create(index + 1, order1); + RADIX_TREE(tree, GFP_KERNEL); + + item_insert_order(&tree, index, order2); + item = radix_tree_lookup(&tree, index); + radix_tree_join(&tree, index + 1, order1, item2); + loc = find_item(&tree, item); + if (loc == -1) + free(item); + item = radix_tree_lookup(&tree, index + 1); + assert(item == item2); + item_kill_tree(&tree); +} + +static void __multiorder_join2(unsigned order1, unsigned order2) +{ + RADIX_TREE(tree, GFP_KERNEL); + struct radix_tree_node *node; + void *item1 = item_create(0, order1); + void *item2; + + item_insert_order(&tree, 0, order2); + radix_tree_insert(&tree, 1 << order2, (void *)0x12UL); + item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL); + assert(item2 == (void *)0x12UL); + assert(node->exceptional == 1); + + radix_tree_join(&tree, 0, order1, item1); + item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL); + assert(item2 == item1); + assert(node->exceptional == 0); + item_kill_tree(&tree); +} + +static void multiorder_join(void) +{ + int i, j, idx; + + for (idx = 0; idx < 1024; idx = idx * 2 + 3) { + for (i = 1; i < 15; i++) { + for (j = 0; j < i; j++) { + __multiorder_join(idx, i, j); + } + } + } + + for (i = 1; i < 15; i++) { + for (j = 0; j < i; j++) { + __multiorder_join2(i, j); + } + } +} + void multiorder_checks(void) { int i; @@ -349,4 +406,5 @@ void multiorder_checks(void) multiorder_tag_tests(); multiorder_iteration(); multiorder_tagged_iteration(); + multiorder_join(); } -- cgit v1.2.3 From e157b555945fb16ddc6cce605a1eb6b4135ea5f1 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:09:01 -0800 Subject: radix-tree: add radix_tree_split This new function splits a larger multiorder entry into smaller entries (potentially multi-order entries). These entries are initialised to RADIX_TREE_RETRY to ensure that RCU walkers who see this state aren't confused. The caller should then call radix_tree_for_each_slot() and radix_tree_replace_slot() in order to turn these retry entries into the intended new entries. Tags are replicated from the original multiorder entry into each new entry. Link: http://lkml.kernel.org/r/1480369871-5271-59-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/radix-tree.h | 12 +++ lib/radix-tree.c | 142 +++++++++++++++++++++++++++++++++- tools/testing/radix-tree/multiorder.c | 64 +++++++++++++++ 3 files changed, 214 insertions(+), 4 deletions(-) diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 935293a24f7d..1f4b56120de8 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -80,6 +80,14 @@ static inline bool radix_tree_is_internal_node(void *ptr) #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ RADIX_TREE_MAP_SHIFT)) +/* + * @count is the count of every non-NULL element in the ->slots array + * whether that is an exceptional entry, a retry entry, a user pointer, + * a sibling entry or a pointer to the next level of the tree. + * @exceptional is the count of every element in ->slots which is + * either radix_tree_exceptional_entry() or is a sibling entry for an + * exceptional entry. + */ struct radix_tree_node { unsigned char shift; /* Bits remaining in each slot */ unsigned char offset; /* Slot offset in parent */ @@ -293,6 +301,8 @@ void __radix_tree_replace(struct radix_tree_root *root, struct radix_tree_node *node, void **slot, void *item, radix_tree_update_node_t update_node, void *private); +void radix_tree_iter_replace(struct radix_tree_root *, + const struct radix_tree_iter *, void **slot, void *item); void radix_tree_replace_slot(struct radix_tree_root *root, void **slot, void *item); void __radix_tree_delete_node(struct radix_tree_root *root, @@ -335,6 +345,8 @@ static inline void radix_tree_preload_end(void) preempt_enable(); } +int radix_tree_split(struct radix_tree_root *, unsigned long index, + unsigned new_order); int radix_tree_join(struct radix_tree_root *, unsigned long index, unsigned new_order, void *); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 962cfb3a7671..ade2ed3f5190 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -22,6 +22,7 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include #include #include #include @@ -758,7 +759,10 @@ static inline int insert_entries(struct radix_tree_node *node, void **slot, unsigned i, n, tag, offset, tags = 0; if (node) { - n = 1 << (order - node->shift); + if (order > node->shift) + n = 1 << (order - node->shift); + else + n = 1; offset = get_slot_offset(node, slot); } else { n = 1; @@ -797,7 +801,8 @@ static inline int insert_entries(struct radix_tree_node *node, void **slot, tag_set(node, tag, offset); } if (radix_tree_is_internal_node(old) && - !is_sibling_entry(node, old)) + !is_sibling_entry(node, old) && + (old != RADIX_TREE_RETRY)) radix_tree_free_nodes(old); if (radix_tree_exceptional_entry(old)) node->exceptional--; @@ -1021,7 +1026,8 @@ void __radix_tree_replace(struct radix_tree_root *root, * NOTE: This cannot be used to switch between non-entries (empty slots), * regular entries, and exceptional entries, as that requires accounting * inside the radix tree node. When switching from one type of entry or - * deleting, use __radix_tree_lookup() and __radix_tree_replace(). + * deleting, use __radix_tree_lookup() and __radix_tree_replace() or + * radix_tree_iter_replace(). */ void radix_tree_replace_slot(struct radix_tree_root *root, void **slot, void *item) @@ -1029,6 +1035,21 @@ void radix_tree_replace_slot(struct radix_tree_root *root, replace_slot(root, NULL, slot, item, true); } +/** + * radix_tree_iter_replace - replace item in a slot + * @root: radix tree root + * @slot: pointer to slot + * @item: new item to store in the slot. + * + * For use with radix_tree_split() and radix_tree_for_each_slot(). + * Caller must hold tree write locked across split and replacement. + */ +void radix_tree_iter_replace(struct radix_tree_root *root, + const struct radix_tree_iter *iter, void **slot, void *item) +{ + __radix_tree_replace(root, iter->node, slot, item, NULL, NULL); +} + #ifdef CONFIG_RADIX_TREE_MULTIORDER /** * radix_tree_join - replace multiple entries with one multiorder entry @@ -1061,6 +1082,117 @@ int radix_tree_join(struct radix_tree_root *root, unsigned long index, return error; } + +/** + * radix_tree_split - Split an entry into smaller entries + * @root: radix tree root + * @index: An index within the large entry + * @order: Order of new entries + * + * Call this function as the first step in replacing a multiorder entry + * with several entries of lower order. After this function returns, + * loop over the relevant portion of the tree using radix_tree_for_each_slot() + * and call radix_tree_iter_replace() to set up each new entry. + * + * The tags from this entry are replicated to all the new entries. + * + * The radix tree should be locked against modification during the entire + * replacement operation. Lock-free lookups will see RADIX_TREE_RETRY which + * should prompt RCU walkers to restart the lookup from the root. + */ +int radix_tree_split(struct radix_tree_root *root, unsigned long index, + unsigned order) +{ + struct radix_tree_node *parent, *node, *child; + void **slot; + unsigned int offset, end; + unsigned n, tag, tags = 0; + + if (!__radix_tree_lookup(root, index, &parent, &slot)) + return -ENOENT; + if (!parent) + return -ENOENT; + + offset = get_slot_offset(parent, slot); + + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) + if (tag_get(parent, tag, offset)) + tags |= 1 << tag; + + for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) { + if (!is_sibling_entry(parent, parent->slots[end])) + break; + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) + if (tags & (1 << tag)) + tag_set(parent, tag, end); + /* rcu_assign_pointer ensures tags are set before RETRY */ + rcu_assign_pointer(parent->slots[end], RADIX_TREE_RETRY); + } + rcu_assign_pointer(parent->slots[offset], RADIX_TREE_RETRY); + parent->exceptional -= (end - offset); + + if (order == parent->shift) + return 0; + if (order > parent->shift) { + while (offset < end) + offset += insert_entries(parent, &parent->slots[offset], + RADIX_TREE_RETRY, order, true); + return 0; + } + + node = parent; + + for (;;) { + if (node->shift > order) { + child = radix_tree_node_alloc(root); + if (!child) + goto nomem; + child->shift = node->shift - RADIX_TREE_MAP_SHIFT; + child->offset = offset; + child->count = 0; + child->parent = node; + if (node != parent) { + node->count++; + node->slots[offset] = node_to_entry(child); + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) + if (tags & (1 << tag)) + tag_set(node, tag, offset); + } + + node = child; + offset = 0; + continue; + } + + n = insert_entries(node, &node->slots[offset], + RADIX_TREE_RETRY, order, false); + BUG_ON(n > RADIX_TREE_MAP_SIZE); + + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) + if (tags & (1 << tag)) + tag_set(node, tag, offset); + offset += n; + + while (offset == RADIX_TREE_MAP_SIZE) { + if (node == parent) + break; + offset = node->offset; + child = node; + node = node->parent; + rcu_assign_pointer(node->slots[offset], + node_to_entry(child)); + offset++; + } + if ((node == parent) && (offset == end)) + return 0; + } + + nomem: + /* Shouldn't happen; did user forget to preload? */ + /* TODO: free all the allocated nodes */ + WARN_ON(1); + return -ENOMEM; +} #endif /** @@ -1441,8 +1573,10 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, child = rcu_dereference_raw(node->slots[offset]); } - if ((child == NULL) || (child == RADIX_TREE_RETRY)) + if (!child) goto restart; + if (child == RADIX_TREE_RETRY) + break; } while (radix_tree_is_internal_node(child)); /* Update the iterator state */ diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index c9f656cf5f52..fa6effe997a3 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c @@ -389,6 +389,69 @@ static void multiorder_join(void) } } +static void __multiorder_split(int old_order, int new_order) +{ + RADIX_TREE(tree, GFP_KERNEL); + void **slot; + struct radix_tree_iter iter; + struct radix_tree_node *node; + void *item; + + item_insert_order(&tree, 0, old_order); + radix_tree_tag_set(&tree, 0, 2); + radix_tree_split(&tree, 0, new_order); + radix_tree_for_each_slot(slot, &tree, &iter, 0) { + radix_tree_iter_replace(&tree, &iter, slot, + item_create(iter.index, new_order)); + } + + item_kill_tree(&tree); + + __radix_tree_insert(&tree, 0, old_order, (void *)0x12); + + item = __radix_tree_lookup(&tree, 0, &node, NULL); + assert(item == (void *)0x12); + assert(node->exceptional > 0); + + radix_tree_split(&tree, 0, new_order); + radix_tree_for_each_slot(slot, &tree, &iter, 0) { + radix_tree_iter_replace(&tree, &iter, slot, + item_create(iter.index, new_order)); + } + + item = __radix_tree_lookup(&tree, 0, &node, NULL); + assert(item != (void *)0x12); + assert(node->exceptional == 0); + + item_kill_tree(&tree); + + __radix_tree_insert(&tree, 0, old_order, (void *)0x12); + + item = __radix_tree_lookup(&tree, 0, &node, NULL); + assert(item == (void *)0x12); + assert(node->exceptional > 0); + + radix_tree_split(&tree, 0, new_order); + radix_tree_for_each_slot(slot, &tree, &iter, 0) { + radix_tree_iter_replace(&tree, &iter, slot, (void *)0x16); + } + + item = __radix_tree_lookup(&tree, 0, &node, NULL); + assert(item == (void *)0x16); + assert(node->exceptional > 0); + + item_kill_tree(&tree); +} + +static void multiorder_split(void) +{ + int i, j; + + for (i = 9; i < 19; i++) + for (j = 0; j < i; j++) + __multiorder_split(i, j); +} + void multiorder_checks(void) { int i; @@ -407,4 +470,5 @@ void multiorder_checks(void) multiorder_iteration(); multiorder_tagged_iteration(); multiorder_join(); + multiorder_split(); } -- cgit v1.2.3 From 2791653a6814d170fa893344618563a7b1da95c6 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:09:04 -0800 Subject: radix-tree: add radix_tree_split_preload() Calculate how many nodes we need to allocate to split an old_order entry into multiple entries, each of size new_order. The test suite checks that we allocated exactly the right number of nodes; neither too many (checked by rtp->nr == 0), nor too few (checked by comparing nr_allocated before and after the call to radix_tree_split()). Link: http://lkml.kernel.org/r/1480369871-5271-60-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/radix-tree.h | 1 + lib/radix-tree.c | 24 +++++++++++++++++++- tools/testing/radix-tree/multiorder.c | 42 +++++++++++++++++++++++++++++++++-- tools/testing/radix-tree/test.h | 5 +++++ 4 files changed, 69 insertions(+), 3 deletions(-) diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 1f4b56120de8..5dea8f6440e4 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -345,6 +345,7 @@ static inline void radix_tree_preload_end(void) preempt_enable(); } +int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t); int radix_tree_split(struct radix_tree_root *, unsigned long index, unsigned new_order); int radix_tree_join(struct radix_tree_root *, unsigned long index, diff --git a/lib/radix-tree.c b/lib/radix-tree.c index ade2ed3f5190..be1183e62590 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -368,7 +368,7 @@ radix_tree_node_free(struct radix_tree_node *node) * To make use of this facility, the radix tree must be initialised without * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). */ -static int __radix_tree_preload(gfp_t gfp_mask, int nr) +static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) { struct radix_tree_preload *rtp; struct radix_tree_node *node; @@ -434,6 +434,28 @@ int radix_tree_maybe_preload(gfp_t gfp_mask) } EXPORT_SYMBOL(radix_tree_maybe_preload); +#ifdef CONFIG_RADIX_TREE_MULTIORDER +/* + * Preload with enough objects to ensure that we can split a single entry + * of order @old_order into many entries of size @new_order + */ +int radix_tree_split_preload(unsigned int old_order, unsigned int new_order, + gfp_t gfp_mask) +{ + unsigned top = 1 << (old_order % RADIX_TREE_MAP_SHIFT); + unsigned layers = (old_order / RADIX_TREE_MAP_SHIFT) - + (new_order / RADIX_TREE_MAP_SHIFT); + unsigned nr = 0; + + WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); + BUG_ON(new_order >= old_order); + + while (layers--) + nr = nr * RADIX_TREE_MAP_SIZE + 1; + return __radix_tree_preload(gfp_mask, top * nr); +} +#endif + /* * The same as function above, but preload number of nodes required to insert * (1 << order) continuous naturally-aligned elements. diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index fa6effe997a3..5421f015f46c 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c @@ -389,35 +389,67 @@ static void multiorder_join(void) } } +static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc) +{ + struct radix_tree_preload *rtp = &radix_tree_preloads; + if (rtp->nr != 0) + printf("split(%u %u) remaining %u\n", old_order, new_order, + rtp->nr); + /* + * Can't check for equality here as some nodes may have been + * RCU-freed while we ran. But we should never finish with more + * nodes allocated since they should have all been preloaded. + */ + if (nr_allocated > alloc) + printf("split(%u %u) allocated %u %u\n", old_order, new_order, + alloc, nr_allocated); +} + static void __multiorder_split(int old_order, int new_order) { - RADIX_TREE(tree, GFP_KERNEL); + RADIX_TREE(tree, GFP_ATOMIC); void **slot; struct radix_tree_iter iter; struct radix_tree_node *node; void *item; + unsigned alloc; + + radix_tree_preload(GFP_KERNEL); + assert(item_insert_order(&tree, 0, old_order) == 0); + radix_tree_preload_end(); + + /* Wipe out the preloaded cache or it'll confuse check_mem() */ + radix_tree_cpu_dead(0); - item_insert_order(&tree, 0, old_order); radix_tree_tag_set(&tree, 0, 2); + + radix_tree_split_preload(old_order, new_order, GFP_KERNEL); + alloc = nr_allocated; radix_tree_split(&tree, 0, new_order); + check_mem(old_order, new_order, alloc); radix_tree_for_each_slot(slot, &tree, &iter, 0) { radix_tree_iter_replace(&tree, &iter, slot, item_create(iter.index, new_order)); } + radix_tree_preload_end(); item_kill_tree(&tree); + radix_tree_preload(GFP_KERNEL); __radix_tree_insert(&tree, 0, old_order, (void *)0x12); + radix_tree_preload_end(); item = __radix_tree_lookup(&tree, 0, &node, NULL); assert(item == (void *)0x12); assert(node->exceptional > 0); + radix_tree_split_preload(old_order, new_order, GFP_KERNEL); radix_tree_split(&tree, 0, new_order); radix_tree_for_each_slot(slot, &tree, &iter, 0) { radix_tree_iter_replace(&tree, &iter, slot, item_create(iter.index, new_order)); } + radix_tree_preload_end(); item = __radix_tree_lookup(&tree, 0, &node, NULL); assert(item != (void *)0x12); @@ -425,16 +457,20 @@ static void __multiorder_split(int old_order, int new_order) item_kill_tree(&tree); + radix_tree_preload(GFP_KERNEL); __radix_tree_insert(&tree, 0, old_order, (void *)0x12); + radix_tree_preload_end(); item = __radix_tree_lookup(&tree, 0, &node, NULL); assert(item == (void *)0x12); assert(node->exceptional > 0); + radix_tree_split_preload(old_order, new_order, GFP_KERNEL); radix_tree_split(&tree, 0, new_order); radix_tree_for_each_slot(slot, &tree, &iter, 0) { radix_tree_iter_replace(&tree, &iter, slot, (void *)0x16); } + radix_tree_preload_end(); item = __radix_tree_lookup(&tree, 0, &node, NULL); assert(item == (void *)0x16); @@ -471,4 +507,6 @@ void multiorder_checks(void) multiorder_tagged_iteration(); multiorder_join(); multiorder_split(); + + radix_tree_cpu_dead(0); } diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index e11e4d260b4e..7c2611caa6d2 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h @@ -52,3 +52,8 @@ int root_tag_get(struct radix_tree_root *root, unsigned int tag); unsigned long node_maxindex(struct radix_tree_node *); unsigned long shift_maxindex(unsigned int shift); int radix_tree_cpu_dead(unsigned int cpu); +struct radix_tree_preload { + unsigned nr; + struct radix_tree_node *nodes; +}; +extern struct radix_tree_preload radix_tree_preloads; -- cgit v1.2.3 From a90eb3a2a405cf7e96093ed531a285067dfdbc9d Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:09:07 -0800 Subject: radix-tree: fix replacement for multiorder entries When replacing an entry with NULL, we need to delete any sibling entries. Also account deleting exceptional entries properly. Also fix a bug with radix_tree_iter_replace() where we would fail to remove entirely freed nodes. Also fix accounting bug when switching between normal and exceptional entries with replace_slot. Also add testcases for all these bugs. Link: http://lkml.kernel.org/r/1480369871-5271-61-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/radix-tree.c | 60 +++++++++++++++++------- tools/testing/radix-tree/multiorder.c | 87 ++++++++++++++++++++++++++++++----- 2 files changed, 119 insertions(+), 28 deletions(-) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index be1183e62590..d09c17dd60ae 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -977,6 +977,24 @@ void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) } EXPORT_SYMBOL(radix_tree_lookup); +static inline int slot_count(struct radix_tree_node *node, + void **slot) +{ + int n = 1; +#ifdef CONFIG_RADIX_TREE_MULTIORDER + void *ptr = node_to_entry(slot); + unsigned offset = get_slot_offset(node, slot); + int i; + + for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { + if (node->slots[offset + i] != ptr) + break; + n++; + } +#endif + return n; +} + static void replace_slot(struct radix_tree_root *root, struct radix_tree_node *node, void **slot, void *item, @@ -995,12 +1013,35 @@ static void replace_slot(struct radix_tree_root *root, if (node) { node->count += count; - node->exceptional += exceptional; + if (exceptional) { + exceptional *= slot_count(node, slot); + node->exceptional += exceptional; + } } rcu_assign_pointer(*slot, item); } +static inline void delete_sibling_entries(struct radix_tree_node *node, + void **slot) +{ +#ifdef CONFIG_RADIX_TREE_MULTIORDER + bool exceptional = radix_tree_exceptional_entry(*slot); + void *ptr = node_to_entry(slot); + unsigned offset = get_slot_offset(node, slot); + int i; + + for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { + if (node->slots[offset + i] != ptr) + break; + node->slots[offset + i] = NULL; + node->count--; + if (exceptional) + node->exceptional--; + } +#endif +} + /** * __radix_tree_replace - replace item in a slot * @root: radix tree root @@ -1018,6 +1059,8 @@ void __radix_tree_replace(struct radix_tree_root *root, void **slot, void *item, radix_tree_update_node_t update_node, void *private) { + if (!item) + delete_sibling_entries(node, slot); /* * This function supports replacing exceptional entries and * deleting entries, but that needs accounting against the @@ -1794,20 +1837,6 @@ void __radix_tree_delete_node(struct radix_tree_root *root, delete_node(root, node, NULL, NULL); } -static inline void delete_sibling_entries(struct radix_tree_node *node, - void *ptr, unsigned offset) -{ -#ifdef CONFIG_RADIX_TREE_MULTIORDER - int i; - for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { - if (node->slots[offset + i] != ptr) - break; - node->slots[offset + i] = NULL; - node->count--; - } -#endif -} - /** * radix_tree_delete_item - delete an item from a radix tree * @root: radix tree root @@ -1847,7 +1876,6 @@ void *radix_tree_delete_item(struct radix_tree_root *root, for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) node_tag_clear(root, node, tag, offset); - delete_sibling_entries(node, node_to_entry(slot), offset); __radix_tree_replace(root, node, slot, NULL, NULL, NULL); return entry; diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index 5421f015f46c..9757b8928bd4 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c @@ -410,8 +410,6 @@ static void __multiorder_split(int old_order, int new_order) RADIX_TREE(tree, GFP_ATOMIC); void **slot; struct radix_tree_iter iter; - struct radix_tree_node *node; - void *item; unsigned alloc; radix_tree_preload(GFP_KERNEL); @@ -434,58 +432,122 @@ static void __multiorder_split(int old_order, int new_order) radix_tree_preload_end(); item_kill_tree(&tree); +} + +static void __multiorder_split2(int old_order, int new_order) +{ + RADIX_TREE(tree, GFP_KERNEL); + void **slot; + struct radix_tree_iter iter; + struct radix_tree_node *node; + void *item; - radix_tree_preload(GFP_KERNEL); __radix_tree_insert(&tree, 0, old_order, (void *)0x12); - radix_tree_preload_end(); item = __radix_tree_lookup(&tree, 0, &node, NULL); assert(item == (void *)0x12); assert(node->exceptional > 0); - radix_tree_split_preload(old_order, new_order, GFP_KERNEL); radix_tree_split(&tree, 0, new_order); radix_tree_for_each_slot(slot, &tree, &iter, 0) { radix_tree_iter_replace(&tree, &iter, slot, item_create(iter.index, new_order)); } - radix_tree_preload_end(); item = __radix_tree_lookup(&tree, 0, &node, NULL); assert(item != (void *)0x12); assert(node->exceptional == 0); item_kill_tree(&tree); +} + +static void __multiorder_split3(int old_order, int new_order) +{ + RADIX_TREE(tree, GFP_KERNEL); + void **slot; + struct radix_tree_iter iter; + struct radix_tree_node *node; + void *item; - radix_tree_preload(GFP_KERNEL); __radix_tree_insert(&tree, 0, old_order, (void *)0x12); - radix_tree_preload_end(); item = __radix_tree_lookup(&tree, 0, &node, NULL); assert(item == (void *)0x12); assert(node->exceptional > 0); - radix_tree_split_preload(old_order, new_order, GFP_KERNEL); radix_tree_split(&tree, 0, new_order); radix_tree_for_each_slot(slot, &tree, &iter, 0) { radix_tree_iter_replace(&tree, &iter, slot, (void *)0x16); } - radix_tree_preload_end(); item = __radix_tree_lookup(&tree, 0, &node, NULL); assert(item == (void *)0x16); assert(node->exceptional > 0); item_kill_tree(&tree); + + __radix_tree_insert(&tree, 0, old_order, (void *)0x12); + + item = __radix_tree_lookup(&tree, 0, &node, NULL); + assert(item == (void *)0x12); + assert(node->exceptional > 0); + + radix_tree_split(&tree, 0, new_order); + radix_tree_for_each_slot(slot, &tree, &iter, 0) { + if (iter.index == (1 << new_order)) + radix_tree_iter_replace(&tree, &iter, slot, + (void *)0x16); + else + radix_tree_iter_replace(&tree, &iter, slot, NULL); + } + + item = __radix_tree_lookup(&tree, 1 << new_order, &node, NULL); + assert(item == (void *)0x16); + assert(node->count == node->exceptional); + do { + node = node->parent; + if (!node) + break; + assert(node->count == 1); + assert(node->exceptional == 0); + } while (1); + + item_kill_tree(&tree); } static void multiorder_split(void) { int i, j; - for (i = 9; i < 19; i++) - for (j = 0; j < i; j++) + for (i = 3; i < 11; i++) + for (j = 0; j < i; j++) { __multiorder_split(i, j); + __multiorder_split2(i, j); + __multiorder_split3(i, j); + } +} + +static void multiorder_account(void) +{ + RADIX_TREE(tree, GFP_KERNEL); + struct radix_tree_node *node; + void **slot; + + item_insert_order(&tree, 0, 5); + + __radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12); + __radix_tree_lookup(&tree, 0, &node, NULL); + assert(node->count == node->exceptional * 2); + radix_tree_delete(&tree, 1 << 5); + assert(node->exceptional == 0); + + __radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12); + __radix_tree_lookup(&tree, 1 << 5, &node, &slot); + assert(node->count == node->exceptional * 2); + __radix_tree_replace(&tree, node, slot, NULL, NULL, NULL); + assert(node->exceptional == 0); + + item_kill_tree(&tree); } void multiorder_checks(void) @@ -507,6 +569,7 @@ void multiorder_checks(void) multiorder_tagged_iteration(); multiorder_join(); multiorder_split(); + multiorder_account(); radix_tree_cpu_dead(0); } -- cgit v1.2.3 From 3e3cdc68bede179a957fcd6be7b833a83df4e5de Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:09:10 -0800 Subject: radix tree test suite: check multiorder iteration The random iteration test only inserts order-0 entries currently. Update it to insert entries of order between 7 and 0. Also make the maximum index configurable, make some variables static, make the test duration variable, remove some useless spinning, and add a fifth thread which calls tag_tagged_items(). Link: http://lkml.kernel.org/r/1480369871-5271-62-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/radix-tree/iteration_check.c | 80 ++++++++++++++++++------------ tools/testing/radix-tree/main.c | 3 +- tools/testing/radix-tree/multiorder.c | 23 +++++++++ tools/testing/radix-tree/test.h | 2 +- 4 files changed, 73 insertions(+), 35 deletions(-) diff --git a/tools/testing/radix-tree/iteration_check.c b/tools/testing/radix-tree/iteration_check.c index f328a66899b4..7572b7ed930e 100644 --- a/tools/testing/radix-tree/iteration_check.c +++ b/tools/testing/radix-tree/iteration_check.c @@ -16,26 +16,36 @@ #include #include "test.h" -#define NUM_THREADS 4 -#define TAG 0 +#define NUM_THREADS 5 +#define MAX_IDX 100 +#define TAG 0 +#define NEW_TAG 1 + static pthread_mutex_t tree_lock = PTHREAD_MUTEX_INITIALIZER; static pthread_t threads[NUM_THREADS]; static unsigned int seeds[3]; -RADIX_TREE(tree, GFP_KERNEL); -bool test_complete; +static RADIX_TREE(tree, GFP_KERNEL); +static bool test_complete; +static int max_order; /* relentlessly fill the tree with tagged entries */ static void *add_entries_fn(void *arg) { - int pgoff; - rcu_register_thread(); while (!test_complete) { - for (pgoff = 0; pgoff < 100; pgoff++) { + unsigned long pgoff; + int order; + + for (pgoff = 0; pgoff < MAX_IDX; pgoff++) { pthread_mutex_lock(&tree_lock); - if (item_insert(&tree, pgoff) == 0) - item_tag_set(&tree, pgoff, TAG); + for (order = max_order; order >= 0; order--) { + if (item_insert_order(&tree, pgoff, order) + == 0) { + item_tag_set(&tree, pgoff, TAG); + break; + } + } pthread_mutex_unlock(&tree_lock); } } @@ -62,14 +72,7 @@ static void *tagged_iteration_fn(void *arg) while (!test_complete) { rcu_read_lock(); radix_tree_for_each_tagged(slot, &tree, &iter, 0, TAG) { - void *entry; - int i; - - /* busy wait to let removals happen */ - for (i = 0; i < 1000000; i++) - ; - - entry = radix_tree_deref_slot(slot); + void *entry = radix_tree_deref_slot(slot); if (unlikely(!entry)) continue; @@ -110,14 +113,7 @@ static void *untagged_iteration_fn(void *arg) while (!test_complete) { rcu_read_lock(); radix_tree_for_each_slot(slot, &tree, &iter, 0) { - void *entry; - int i; - - /* busy wait to let removals happen */ - for (i = 0; i < 1000000; i++) - ; - - entry = radix_tree_deref_slot(slot); + void *entry = radix_tree_deref_slot(slot); if (unlikely(!entry)) continue; @@ -152,7 +148,7 @@ static void *remove_entries_fn(void *arg) while (!test_complete) { int pgoff; - pgoff = rand_r(&seeds[2]) % 100; + pgoff = rand_r(&seeds[2]) % MAX_IDX; pthread_mutex_lock(&tree_lock); item_delete(&tree, pgoff); @@ -164,36 +160,54 @@ static void *remove_entries_fn(void *arg) return NULL; } +static void *tag_entries_fn(void *arg) +{ + rcu_register_thread(); + + while (!test_complete) { + tag_tagged_items(&tree, &tree_lock, 0, MAX_IDX, 10, TAG, + NEW_TAG); + } + rcu_unregister_thread(); + return NULL; +} + /* This is a unit test for a bug found by the syzkaller tester */ -void iteration_test(void) +void iteration_test(unsigned order, unsigned test_duration) { int i; - printf("Running iteration tests for 10 seconds\n"); + printf("Running %siteration tests for %d seconds\n", + order > 0 ? "multiorder " : "", test_duration); + max_order = order; test_complete = false; for (i = 0; i < 3; i++) seeds[i] = rand(); if (pthread_create(&threads[0], NULL, tagged_iteration_fn, NULL)) { - perror("pthread_create"); + perror("create tagged iteration thread"); exit(1); } if (pthread_create(&threads[1], NULL, untagged_iteration_fn, NULL)) { - perror("pthread_create"); + perror("create untagged iteration thread"); exit(1); } if (pthread_create(&threads[2], NULL, add_entries_fn, NULL)) { - perror("pthread_create"); + perror("create add entry thread"); exit(1); } if (pthread_create(&threads[3], NULL, remove_entries_fn, NULL)) { - perror("pthread_create"); + perror("create remove entry thread"); + exit(1); + } + if (pthread_create(&threads[4], NULL, tag_entries_fn, NULL)) { + perror("create tag entry thread"); exit(1); } - sleep(10); + sleep(test_duration); test_complete = true; for (i = 0; i < NUM_THREADS; i++) { diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c index 170175ca4105..f7e9801a6754 100644 --- a/tools/testing/radix-tree/main.c +++ b/tools/testing/radix-tree/main.c @@ -350,7 +350,8 @@ int main(int argc, char **argv) regression1_test(); regression2_test(); regression3_test(); - iteration_test(); + iteration_test(0, 10); + iteration_test(7, 20); single_thread_tests(long_run); /* Free any remaining preallocated nodes */ diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index 9757b8928bd4..08b4e16dc86f 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c @@ -75,8 +75,27 @@ static void __multiorder_tag_test(int index, int order) item_kill_tree(&tree); } +static void __multiorder_tag_test2(unsigned order, unsigned long index2) +{ + RADIX_TREE(tree, GFP_KERNEL); + unsigned long index = (1 << order); + index2 += index; + + assert(item_insert_order(&tree, 0, order) == 0); + assert(item_insert(&tree, index2) == 0); + + assert(radix_tree_tag_set(&tree, 0, 0)); + assert(radix_tree_tag_set(&tree, index2, 0)); + + assert(tag_tagged_items(&tree, NULL, 0, ~0UL, 10, 0, 1) == 2); + + item_kill_tree(&tree); +} + static void multiorder_tag_tests(void) { + int i, j; + /* test multi-order entry for indices 0-7 with no sibling pointers */ __multiorder_tag_test(0, 3); __multiorder_tag_test(5, 3); @@ -116,6 +135,10 @@ static void multiorder_tag_tests(void) __multiorder_tag_test(300, 8); __multiorder_tag_test(0x12345678UL, 8); + + for (i = 1; i < 10; i++) + for (j = 0; j < (10 << i); j++) + __multiorder_tag_test2(i, j); } static void multiorder_check(unsigned long index, int order) diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index 7c2611caa6d2..056a23b56467 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h @@ -32,7 +32,7 @@ unsigned long find_item(struct radix_tree_root *, void *item); void tag_check(void); void multiorder_checks(void); -void iteration_test(void); +void iteration_test(unsigned order, unsigned duration); void benchmark(void); struct item * -- cgit v1.2.3 From 99c494077e2d4282a17120a772eecc00ec3004cc Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:09:13 -0800 Subject: idr: add ida_is_empty Two of the USB Gadgets were poking around in the internals of struct ida in order to determine if it is empty. Add the appropriate abstraction. Link: http://lkml.kernel.org/r/1480369871-5271-63-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Acked-by: Konstantin Khlebnikov Tested-by: Kirill A. Shutemov Cc: Ross Zwisler Cc: Felipe Balbi Cc: Greg Kroah-Hartman Cc: Michal Nazarewicz Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/usb/gadget/function/f_hid.c | 6 +++--- drivers/usb/gadget/function/f_printer.c | 6 +++--- include/linux/idr.h | 5 +++++ 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 7abd70b2a588..3151d2a0fe59 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -905,7 +905,7 @@ static void hidg_free_inst(struct usb_function_instance *f) mutex_lock(&hidg_ida_lock); hidg_put_minor(opts->minor); - if (idr_is_empty(&hidg_ida.idr)) + if (ida_is_empty(&hidg_ida)) ghid_cleanup(); mutex_unlock(&hidg_ida_lock); @@ -931,7 +931,7 @@ static struct usb_function_instance *hidg_alloc_inst(void) mutex_lock(&hidg_ida_lock); - if (idr_is_empty(&hidg_ida.idr)) { + if (ida_is_empty(&hidg_ida)) { status = ghid_setup(NULL, HIDG_MINORS); if (status) { ret = ERR_PTR(status); @@ -944,7 +944,7 @@ static struct usb_function_instance *hidg_alloc_inst(void) if (opts->minor < 0) { ret = ERR_PTR(opts->minor); kfree(opts); - if (idr_is_empty(&hidg_ida.idr)) + if (ida_is_empty(&hidg_ida)) ghid_cleanup(); goto unlock; } diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index 0de36cda6e41..8054da9276dd 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -1265,7 +1265,7 @@ static void gprinter_free_inst(struct usb_function_instance *f) mutex_lock(&printer_ida_lock); gprinter_put_minor(opts->minor); - if (idr_is_empty(&printer_ida.idr)) + if (ida_is_empty(&printer_ida)) gprinter_cleanup(); mutex_unlock(&printer_ida_lock); @@ -1289,7 +1289,7 @@ static struct usb_function_instance *gprinter_alloc_inst(void) mutex_lock(&printer_ida_lock); - if (idr_is_empty(&printer_ida.idr)) { + if (ida_is_empty(&printer_ida)) { status = gprinter_setup(PRINTER_MINORS); if (status) { ret = ERR_PTR(status); @@ -1302,7 +1302,7 @@ static struct usb_function_instance *gprinter_alloc_inst(void) if (opts->minor < 0) { ret = ERR_PTR(opts->minor); kfree(opts); - if (idr_is_empty(&printer_ida.idr)) + if (ida_is_empty(&printer_ida)) gprinter_cleanup(); goto unlock; } diff --git a/include/linux/idr.h b/include/linux/idr.h index 083d61e92706..3639a28188c9 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -195,6 +195,11 @@ static inline int ida_get_new(struct ida *ida, int *p_id) return ida_get_new_above(ida, 0, p_id); } +static inline bool ida_is_empty(struct ida *ida) +{ + return idr_is_empty(&ida->idr); +} + void __init idr_init_cache(void); #endif /* __IDR_H__ */ -- cgit v1.2.3 From 37f4915fef0572e41ab91b7d3f7feb237cddbd92 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:09:16 -0800 Subject: tpm: use idr_find(), not idr_find_slowpath() idr_find_slowpath() is not intended to be part of the public API, it's an implementation detail. There's no reason to skip straight to the slowpath here. Link: http://lkml.kernel.org/r/1480369871-5271-64-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Peter Huewe Cc: Marcel Selhorst Cc: Jarkko Sakkinen Cc: Jason Gunthorpe Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/tpm/tpm-chip.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index e5950131bd90..447b6ca09826 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -84,7 +84,7 @@ EXPORT_SYMBOL_GPL(tpm_put_ops); * * The return'd chip has been tpm_try_get_ops'd and must be released via * tpm_put_ops - */ + */ struct tpm_chip *tpm_chip_find_get(int chip_num) { struct tpm_chip *chip, *res = NULL; @@ -103,7 +103,7 @@ struct tpm_chip *tpm_chip_find_get(int chip_num) } } while (chip_prev != chip_num); } else { - chip = idr_find_slowpath(&dev_nums_idr, chip_num); + chip = idr_find(&dev_nums_idr, chip_num); if (chip && !tpm_try_get_ops(chip)) res = chip; } -- cgit v1.2.3 From 444306129a920015a2cc876d13fcbf52382f39bd Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:09:19 -0800 Subject: rxrpc: abstract away knowledge of IDR internals Add idr_get_cursor() / idr_set_cursor() APIs, and remove the reference to IDR_SIZE. Link: http://lkml.kernel.org/r/1480369871-5271-65-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Reviewed-by: David Howells Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/idr.h | 26 ++++++++++++++++++++++++++ net/rxrpc/af_rxrpc.c | 11 ++++++----- net/rxrpc/conn_client.c | 4 ++-- 3 files changed, 34 insertions(+), 7 deletions(-) diff --git a/include/linux/idr.h b/include/linux/idr.h index 3639a28188c9..1eb755f77f2f 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -55,6 +55,32 @@ struct idr { } #define DEFINE_IDR(name) struct idr name = IDR_INIT(name) +/** + * idr_get_cursor - Return the current position of the cyclic allocator + * @idr: idr handle + * + * The value returned is the value that will be next returned from + * idr_alloc_cyclic() if it is free (otherwise the search will start from + * this position). + */ +static inline unsigned int idr_get_cursor(struct idr *idr) +{ + return READ_ONCE(idr->cur); +} + +/** + * idr_set_cursor - Set the current position of the cyclic allocator + * @idr: idr handle + * @val: new position + * + * The next call to idr_alloc_cyclic() will return @val if it is free + * (otherwise the search will start from this position). + */ +static inline void idr_set_cursor(struct idr *idr, unsigned int val) +{ + WRITE_ONCE(idr->cur, val); +} + /** * DOC: idr sync * idr synchronization (stolen from radix-tree.h) diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 2d59c9be40e1..5f63f6dcaabb 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -762,16 +762,17 @@ static const struct net_proto_family rxrpc_family_ops = { static int __init af_rxrpc_init(void) { int ret = -1; + unsigned int tmp; BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb)); get_random_bytes(&rxrpc_epoch, sizeof(rxrpc_epoch)); rxrpc_epoch |= RXRPC_RANDOM_EPOCH; - get_random_bytes(&rxrpc_client_conn_ids.cur, - sizeof(rxrpc_client_conn_ids.cur)); - rxrpc_client_conn_ids.cur &= 0x3fffffff; - if (rxrpc_client_conn_ids.cur == 0) - rxrpc_client_conn_ids.cur = 1; + get_random_bytes(&tmp, sizeof(tmp)); + tmp &= 0x3fffffff; + if (tmp == 0) + tmp = 1; + idr_set_cursor(&rxrpc_client_conn_ids, tmp); ret = -ENOMEM; rxrpc_call_jar = kmem_cache_create( diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index 60ef9605167e..6cbcdcc29853 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -263,12 +263,12 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn) * times the maximum number of client conns away from the current * allocation point to try and keep the IDs concentrated. */ - id_cursor = READ_ONCE(rxrpc_client_conn_ids.cur); + id_cursor = idr_get_cursor(&rxrpc_client_conn_ids); id = conn->proto.cid >> RXRPC_CIDSHIFT; distance = id - id_cursor; if (distance < 0) distance = -distance; - limit = round_up(rxrpc_max_client_connections, IDR_SIZE) * 4; + limit = max(rxrpc_max_client_connections * 4, 1024U); if (distance > limit) goto mark_dont_reuse; -- cgit v1.2.3 From 424251a4a929a1b6dff2056d49135e3805132e32 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:09:22 -0800 Subject: idr: reduce the number of bits per level from 8 to 6 In preparation for merging the IDR and radix tree, reduce the fanout at each level from 256 to 64. If this causes a performance problem then a bisect will point to this commit, and we'll have a better idea about what we might do to fix it. Link: http://lkml.kernel.org/r/1480369871-5271-66-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/idr.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/include/linux/idr.h b/include/linux/idr.h index 1eb755f77f2f..3c01b89aed67 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -18,12 +18,11 @@ #include /* - * We want shallower trees and thus more bits covered at each layer. 8 - * bits gives us large enough first layer for most use cases and maximum - * tree depth of 4. Each idr_layer is slightly larger than 2k on 64bit and - * 1k on 32bit. + * Using 6 bits at each layer allows us to allocate 7 layers out of each page. + * 8 bits only gave us 3 layers out of every pair of pages, which is less + * efficient except for trees with a largest element between 192-255 inclusive. */ -#define IDR_BITS 8 +#define IDR_BITS 6 #define IDR_SIZE (1 << IDR_BITS) #define IDR_MASK ((1 << IDR_BITS)-1) -- cgit v1.2.3 From de1af8f62a78ea8abcc2dddb6de622e4069a368e Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:09:25 -0800 Subject: radix tree test suite: add some more functionality IDR needs more functionality from the kernel: kmalloc()/kfree(), and xchg(). Link: http://lkml.kernel.org/r/1480369871-5271-67-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Tested-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/radix-tree/linux.c | 15 +++++++++++++++ tools/testing/radix-tree/linux/kernel.h | 3 +++ tools/testing/radix-tree/linux/slab.h | 3 +++ 3 files changed, 21 insertions(+) diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c index 1f32a16a3848..ff0452e8a0c4 100644 --- a/tools/testing/radix-tree/linux.c +++ b/tools/testing/radix-tree/linux.c @@ -54,6 +54,21 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) free(objp); } +void *kmalloc(size_t size, gfp_t gfp) +{ + void *ret = malloc(size); + uatomic_inc(&nr_allocated); + return ret; +} + +void kfree(void *p) +{ + if (!p) + return; + uatomic_dec(&nr_allocated); + free(p); +} + struct kmem_cache * kmem_cache_create(const char *name, size_t size, size_t offset, unsigned long flags, void (*ctor)(void *)) diff --git a/tools/testing/radix-tree/linux/kernel.h b/tools/testing/radix-tree/linux/kernel.h index 23e77f5673ce..9b43b4975d83 100644 --- a/tools/testing/radix-tree/linux/kernel.h +++ b/tools/testing/radix-tree/linux/kernel.h @@ -8,6 +8,7 @@ #include #include "../../include/linux/compiler.h" +#include "../../include/linux/err.h" #include "../../../include/linux/kconfig.h" #ifdef BENCHMARK @@ -58,4 +59,6 @@ static inline int in_interrupt(void) #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) #define round_down(x, y) ((x) & ~__round_mask(x, y)) +#define xchg(ptr, x) uatomic_xchg(ptr, x) + #endif /* _KERNEL_H */ diff --git a/tools/testing/radix-tree/linux/slab.h b/tools/testing/radix-tree/linux/slab.h index 452e2bf502e3..446639f78fc1 100644 --- a/tools/testing/radix-tree/linux/slab.h +++ b/tools/testing/radix-tree/linux/slab.h @@ -7,6 +7,9 @@ #define SLAB_PANIC 2 #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ +void *kmalloc(size_t size, gfp_t); +void kfree(void *); + struct kmem_cache { int size; void (*ctor)(void *); -- cgit v1.2.3 From bbe9d71f2c545398987a6fea5090a6ca76f4a8dc Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:09:28 -0800 Subject: radix tree test suite: cache recently freed objects The kmem_cache_alloc implementation simply allocates new memory from malloc() and calls the ctor, which zeroes out the entire object. This means it cannot spot bugs where the object isn't properly reinitialised before being freed. Add a small (11 objects) cache before freeing objects back to malloc. This is enough to let us write a test to catch it, although the memory allocator is now aware of the structure of the radix tree node, since it chains free objects through ->private_data (like the percpu cache does). Link: http://lkml.kernel.org/r/1481667692-14500-2-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Cc: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/radix-tree/linux.c | 48 ++++++++++++++++++++++++++++++----- tools/testing/radix-tree/linux/slab.h | 5 ---- 2 files changed, 41 insertions(+), 12 deletions(-) diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c index ff0452e8a0c4..d31ea7c9abec 100644 --- a/tools/testing/radix-tree/linux.c +++ b/tools/testing/radix-tree/linux.c @@ -1,16 +1,27 @@ #include #include #include +#include #include #include #include +#include #include +#include #include int nr_allocated; int preempt_count; +struct kmem_cache { + pthread_mutex_t lock; + int size; + int nr_objs; + void *objs; + void (*ctor)(void *); +}; + void *mempool_alloc(mempool_t *pool, int gfp_mask) { return pool->alloc(gfp_mask, pool->data); @@ -34,24 +45,44 @@ mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) { - void *ret; + struct radix_tree_node *node; if (flags & __GFP_NOWARN) return NULL; - ret = malloc(cachep->size); - if (cachep->ctor) - cachep->ctor(ret); + pthread_mutex_lock(&cachep->lock); + if (cachep->nr_objs) { + cachep->nr_objs--; + node = cachep->objs; + cachep->objs = node->private_data; + pthread_mutex_unlock(&cachep->lock); + node->private_data = NULL; + } else { + pthread_mutex_unlock(&cachep->lock); + node = malloc(cachep->size); + if (cachep->ctor) + cachep->ctor(node); + } + uatomic_inc(&nr_allocated); - return ret; + return node; } void kmem_cache_free(struct kmem_cache *cachep, void *objp) { assert(objp); uatomic_dec(&nr_allocated); - memset(objp, 0, cachep->size); - free(objp); + pthread_mutex_lock(&cachep->lock); + if (cachep->nr_objs > 10) { + memset(objp, POISON_FREE, cachep->size); + free(objp); + } else { + struct radix_tree_node *node = objp; + cachep->nr_objs++; + node->private_data = cachep->objs; + cachep->objs = node; + } + pthread_mutex_unlock(&cachep->lock); } void *kmalloc(size_t size, gfp_t gfp) @@ -75,7 +106,10 @@ kmem_cache_create(const char *name, size_t size, size_t offset, { struct kmem_cache *ret = malloc(sizeof(*ret)); + pthread_mutex_init(&ret->lock, NULL); ret->size = size; + ret->nr_objs = 0; + ret->objs = NULL; ret->ctor = ctor; return ret; } diff --git a/tools/testing/radix-tree/linux/slab.h b/tools/testing/radix-tree/linux/slab.h index 446639f78fc1..e40337f41a38 100644 --- a/tools/testing/radix-tree/linux/slab.h +++ b/tools/testing/radix-tree/linux/slab.h @@ -10,11 +10,6 @@ void *kmalloc(size_t size, gfp_t); void kfree(void *); -struct kmem_cache { - int size; - void (*ctor)(void *); -}; - void *kmem_cache_alloc(struct kmem_cache *cachep, int flags); void kmem_cache_free(struct kmem_cache *cachep, void *objp); -- cgit v1.2.3 From e8de4340767dd002978c285e3adddaeda8ac652c Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:09:31 -0800 Subject: radix-tree: ensure counts are initialised radix_tree_join() was freeing nodes with a non-zero ->exceptional count, and radix_tree_split() wasn't zeroing ->exceptional when it allocated the new node. Fix this by making all callers of radix_tree_node_alloc() pass in the new counts (and some other always-initialised fields), which will prevent the problem recurring if in future we decide to do something similar. Link: http://lkml.kernel.org/r/1481667692-14500-3-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Cc: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/radix-tree.c | 41 ++++++++++++++++--------------- tools/testing/radix-tree/multiorder.c | 45 +++++++++++++++++++++++++++++++---- 2 files changed, 61 insertions(+), 25 deletions(-) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index d09c17dd60ae..0019aca0f328 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -288,7 +288,10 @@ static void radix_tree_dump(struct radix_tree_root *root) * that the caller has pinned this thread of control to the current CPU. */ static struct radix_tree_node * -radix_tree_node_alloc(struct radix_tree_root *root) +radix_tree_node_alloc(struct radix_tree_root *root, + struct radix_tree_node *parent, + unsigned int shift, unsigned int offset, + unsigned int count, unsigned int exceptional) { struct radix_tree_node *ret = NULL; gfp_t gfp_mask = root_gfp_mask(root); @@ -333,6 +336,13 @@ radix_tree_node_alloc(struct radix_tree_root *root) ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); out: BUG_ON(radix_tree_is_internal_node(ret)); + if (ret) { + ret->parent = parent; + ret->shift = shift; + ret->offset = offset; + ret->count = count; + ret->exceptional = exceptional; + } return ret; } @@ -538,8 +548,8 @@ static int radix_tree_extend(struct radix_tree_root *root, goto out; do { - struct radix_tree_node *node = radix_tree_node_alloc(root); - + struct radix_tree_node *node = radix_tree_node_alloc(root, + NULL, shift, 0, 1, 0); if (!node) return -ENOMEM; @@ -550,16 +560,11 @@ static int radix_tree_extend(struct radix_tree_root *root, } BUG_ON(shift > BITS_PER_LONG); - node->shift = shift; - node->offset = 0; - node->count = 1; - node->parent = NULL; if (radix_tree_is_internal_node(slot)) { entry_to_node(slot)->parent = node; - } else { + } else if (radix_tree_exceptional_entry(slot)) { /* Moving an exceptional root->rnode to a node */ - if (radix_tree_exceptional_entry(slot)) - node->exceptional = 1; + node->exceptional = 1; } node->slots[0] = slot; slot = node_to_entry(node); @@ -712,14 +717,10 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index, shift -= RADIX_TREE_MAP_SHIFT; if (child == NULL) { /* Have to add a child node. */ - child = radix_tree_node_alloc(root); + child = radix_tree_node_alloc(root, node, shift, + offset, 0, 0); if (!child) return -ENOMEM; - child->shift = shift; - child->offset = offset; - child->count = 0; - child->exceptional = 0; - child->parent = node; rcu_assign_pointer(*slot, node_to_entry(child)); if (node) node->count++; @@ -1209,13 +1210,11 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index, for (;;) { if (node->shift > order) { - child = radix_tree_node_alloc(root); + child = radix_tree_node_alloc(root, node, + node->shift - RADIX_TREE_MAP_SHIFT, + offset, 0, 0); if (!child) goto nomem; - child->shift = node->shift - RADIX_TREE_MAP_SHIFT; - child->offset = offset; - child->count = 0; - child->parent = node; if (node != parent) { node->count++; node->slots[offset] = node_to_entry(child); diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index 08b4e16dc86f..f79812a5e070 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c @@ -355,7 +355,7 @@ void multiorder_tagged_iteration(void) item_kill_tree(&tree); } -static void __multiorder_join(unsigned long index, +static void multiorder_join1(unsigned long index, unsigned order1, unsigned order2) { unsigned long loc; @@ -373,7 +373,7 @@ static void __multiorder_join(unsigned long index, item_kill_tree(&tree); } -static void __multiorder_join2(unsigned order1, unsigned order2) +static void multiorder_join2(unsigned order1, unsigned order2) { RADIX_TREE(tree, GFP_KERNEL); struct radix_tree_node *node; @@ -393,6 +393,39 @@ static void __multiorder_join2(unsigned order1, unsigned order2) item_kill_tree(&tree); } +/* + * This test revealed an accounting bug for exceptional entries at one point. + * Nodes were being freed back into the pool with an elevated exception count + * by radix_tree_join() and then radix_tree_split() was failing to zero the + * count of exceptional entries. + */ +static void multiorder_join3(unsigned int order) +{ + RADIX_TREE(tree, GFP_KERNEL); + struct radix_tree_node *node; + void **slot; + struct radix_tree_iter iter; + unsigned long i; + + for (i = 0; i < (1 << order); i++) { + radix_tree_insert(&tree, i, (void *)0x12UL); + } + + radix_tree_join(&tree, 0, order, (void *)0x16UL); + rcu_barrier(); + + radix_tree_split(&tree, 0, 0); + + radix_tree_for_each_slot(slot, &tree, &iter, 0) { + radix_tree_iter_replace(&tree, &iter, slot, (void *)0x12UL); + } + + __radix_tree_lookup(&tree, 0, &node, NULL); + assert(node->exceptional == node->count); + + item_kill_tree(&tree); +} + static void multiorder_join(void) { int i, j, idx; @@ -400,16 +433,20 @@ static void multiorder_join(void) for (idx = 0; idx < 1024; idx = idx * 2 + 3) { for (i = 1; i < 15; i++) { for (j = 0; j < i; j++) { - __multiorder_join(idx, i, j); + multiorder_join1(idx, i, j); } } } for (i = 1; i < 15; i++) { for (j = 0; j < i; j++) { - __multiorder_join2(i, j); + multiorder_join2(i, j); } } + + for (i = 3; i < 10; i++) { + multiorder_join3(i); + } } static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc) -- cgit v1.2.3 From 092bc0b225a9bdb6dca0959ddd6826afb6d40747 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:09:34 -0800 Subject: radix tree test suite: add new tag check We have a check that setting a tag on a single entry at root succeeds, but we were missing a check that clearing a tag on that same entry also succeeds. Link: http://lkml.kernel.org/r/1481667692-14500-4-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Cc: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/radix-tree/tag_check.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/testing/radix-tree/tag_check.c b/tools/testing/radix-tree/tag_check.c index ed5f87d1dce2..fd98c132207a 100644 --- a/tools/testing/radix-tree/tag_check.c +++ b/tools/testing/radix-tree/tag_check.c @@ -324,6 +324,9 @@ static void single_check(void) assert(ret == 1); ret = radix_tree_gang_lookup_tag(&tree, (void **)items, 0, BATCH, 1); assert(ret == 1); + item_tag_clear(&tree, 0, 0); + ret = radix_tree_gang_lookup_tag(&tree, (void **)items, 0, BATCH, 0); + assert(ret == 0); item_kill_tree(&tree); } -- cgit v1.2.3 From e1e14ab8411df344a17687821f8f78f0a1e73cbb Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 14 Dec 2016 15:09:36 -0800 Subject: radix tree test suite: delete unused rcupdate.c This file was used to implement call_rcu() before liburcu implemented that function. It hasn't even been compiled since before the test suite was added to the kernel. Remove it to reduce confusion. Link: http://lkml.kernel.org/r/1481667692-14500-5-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox Cc: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Ross Zwisler Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/radix-tree/rcupdate.c | 86 ------------------------------------- 1 file changed, 86 deletions(-) delete mode 100644 tools/testing/radix-tree/rcupdate.c diff --git a/tools/testing/radix-tree/rcupdate.c b/tools/testing/radix-tree/rcupdate.c deleted file mode 100644 index 31a2d14225d6..000000000000 --- a/tools/testing/radix-tree/rcupdate.c +++ /dev/null @@ -1,86 +0,0 @@ -#include -#include -#include -#include - -static pthread_mutex_t rculock = PTHREAD_MUTEX_INITIALIZER; -static struct rcu_head *rcuhead_global = NULL; -static __thread int nr_rcuhead = 0; -static __thread struct rcu_head *rcuhead = NULL; -static __thread struct rcu_head *rcutail = NULL; - -static pthread_cond_t rcu_worker_cond = PTHREAD_COND_INITIALIZER; - -/* switch to urcu implementation when it is merged. */ -void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *head)) -{ - head->func = func; - head->next = rcuhead; - rcuhead = head; - if (!rcutail) - rcutail = head; - nr_rcuhead++; - if (nr_rcuhead >= 1000) { - int signal = 0; - - pthread_mutex_lock(&rculock); - if (!rcuhead_global) - signal = 1; - rcutail->next = rcuhead_global; - rcuhead_global = head; - pthread_mutex_unlock(&rculock); - - nr_rcuhead = 0; - rcuhead = NULL; - rcutail = NULL; - - if (signal) { - pthread_cond_signal(&rcu_worker_cond); - } - } -} - -static void *rcu_worker(void *arg) -{ - struct rcu_head *r; - - rcupdate_thread_init(); - - while (1) { - pthread_mutex_lock(&rculock); - while (!rcuhead_global) { - pthread_cond_wait(&rcu_worker_cond, &rculock); - } - r = rcuhead_global; - rcuhead_global = NULL; - - pthread_mutex_unlock(&rculock); - - synchronize_rcu(); - - while (r) { - struct rcu_head *tmp = r->next; - r->func(r); - r = tmp; - } - } - - rcupdate_thread_exit(); - - return NULL; -} - -static pthread_t worker_thread; -void rcupdate_init(void) -{ - pthread_create(&worker_thread, NULL, rcu_worker, NULL); -} - -void rcupdate_thread_init(void) -{ - rcu_register_thread(); -} -void rcupdate_thread_exit(void) -{ - rcu_unregister_thread(); -} -- cgit v1.2.3