summaryrefslogtreecommitdiff
path: root/arch/s390/mm/gmap.c
diff options
context:
space:
mode:
authorChristian Borntraeger <borntraeger@linux.ibm.com>2022-05-30 12:27:05 +0300
committerHeiko Carstens <hca@linux.ibm.com>2022-06-01 13:03:16 +0300
commit6d5946274df1fff539a7eece458a43be733d1db8 (patch)
treee7d3215f840d3b1ffad53ba7ff186abe542ff12e /arch/s390/mm/gmap.c
parent315945a43e91c81ebfab0ef011d1dcab419c6d9b (diff)
downloadlinux-6d5946274df1fff539a7eece458a43be733d1db8.tar.xz
s390/gmap: voluntarily schedule during key setting
With large and many guest with storage keys it is possible to create large latencies or stalls during initial key setting: rcu: INFO: rcu_sched self-detected stall on CPU rcu: 18-....: (2099 ticks this GP) idle=54e/1/0x4000000000000002 softirq=35598716/35598716 fqs=998 (t=2100 jiffies g=155867385 q=20879) Task dump for CPU 18: CPU 1/KVM R running task 0 1030947 256019 0x06000004 Call Trace: sched_show_task rcu_dump_cpu_stacks rcu_sched_clock_irq update_process_times tick_sched_handle tick_sched_timer __hrtimer_run_queues hrtimer_interrupt do_IRQ ext_int_handler ptep_zap_key The mmap lock is held during the page walking but since this is a semaphore scheduling is still possible. Same for the kvm srcu. To minimize overhead do this on every segment table entry or large page. Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com> Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com> Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Link: https://lore.kernel.org/r/20220530092706.11637-2-borntraeger@linux.ibm.com Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'arch/s390/mm/gmap.c')
-rw-r--r--arch/s390/mm/gmap.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index af03cacf34ec..f3694f6df8b7 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2601,6 +2601,18 @@ static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
return 0;
}
+/*
+ * Give a chance to schedule after setting a key to 256 pages.
+ * We only hold the mm lock, which is a rwsem and the kvm srcu.
+ * Both can sleep.
+ */
+static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ cond_resched();
+ return 0;
+}
+
static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
unsigned long hmask, unsigned long next,
struct mm_walk *walk)
@@ -2623,12 +2635,14 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
end = start + HPAGE_SIZE - 1;
__storage_key_init_range(start, end);
set_bit(PG_arch_1, &page->flags);
+ cond_resched();
return 0;
}
static const struct mm_walk_ops enable_skey_walk_ops = {
.hugetlb_entry = __s390_enable_skey_hugetlb,
.pte_entry = __s390_enable_skey_pte,
+ .pmd_entry = __s390_enable_skey_pmd,
};
int s390_enable_skey(void)