diff options
author | Paul E. McKenney <paulmck@kernel.org> | 2020-07-01 23:49:06 +0300 |
---|---|---|
committer | Paul E. McKenney <paulmck@kernel.org> | 2020-08-25 04:38:34 +0300 |
commit | 34e8c4837adb579962e528a4f7dd1f75cb120be4 (patch) | |
tree | 96a8b4d5ff899adf81d03cdb699148123c8f828f /kernel/scftorture.c | |
parent | 980205ee8489d53c4380f7762debac87312b0fb3 (diff) | |
download | linux-34e8c4837adb579962e528a4f7dd1f75cb120be4.tar.xz |
scftorture: Add smp_call_function() memory-ordering checks
This commit adds checks for memory misordering across calls to and
returns from smp_call_function() in the case where the caller waits.
Misordering results in a splat.
Note that in contrast to smp_call_function_single(), this code does not
test memory ordering into the handler in the no-wait case because none
of the handlers would be able to free the scf_check structure without
introducing heavy synchronization to work out which was last.
[ paulmck: s/GFP_KERNEL/GFP_ATOMIC/ per kernel test robot feedback. ]
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/scftorture.c')
-rw-r--r-- | kernel/scftorture.c | 25 |
1 files changed, 17 insertions, 8 deletions
diff --git a/kernel/scftorture.c b/kernel/scftorture.c index 3519ad1b3278..0d7299d32dd0 100644 --- a/kernel/scftorture.c +++ b/kernel/scftorture.c @@ -297,11 +297,13 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra cpus_read_lock(); else preempt_disable(); - switch (scfsp->scfs_prim) { - case SCF_PRIM_SINGLE: + if (scfsp->scfs_prim == SCF_PRIM_SINGLE || scfsp->scfs_wait) { scfcp = kmalloc(sizeof(*scfcp), GFP_ATOMIC); if (WARN_ON_ONCE(!scfcp)) atomic_inc(&n_alloc_errs); + } + switch (scfsp->scfs_prim) { + case SCF_PRIM_SINGLE: cpu = torture_random(trsp) % nr_cpu_ids; if (scfsp->scfs_wait) scfp->n_single_wait++; @@ -328,11 +330,6 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra } break; case SCF_PRIM_MANY: - if (scfsp->scfs_wait) { - scfcp = kmalloc(sizeof(*scfcp), GFP_ATOMIC); - if (WARN_ON_ONCE(!scfcp)) - atomic_inc(&n_alloc_errs); - } if (scfsp->scfs_wait) scfp->n_many_wait++; else @@ -356,7 +353,19 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra scfp->n_all_wait++; else scfp->n_all++; - smp_call_function(scf_handler, NULL, scfsp->scfs_wait); + if (scfcp) { + scfcp->scfc_cpu = -1; + scfcp->scfc_wait = true; + scfcp->scfc_out = false; + scfcp->scfc_in = true; + } + smp_call_function(scf_handler, scfcp, scfsp->scfs_wait); + if (scfcp) { + if (WARN_ON_ONCE(!scfcp->scfc_out)) + atomic_inc(&n_mb_out_errs); // Leak rather than trash! + else + kfree(scfcp); + } break; } if (use_cpus_read_lock) |