diff options
author | Waiman Long <longman@redhat.com> | 2019-01-30 00:53:46 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-02-04 11:03:30 +0300 |
commit | 412f34a82ccf7dd52f6b197f6450a33f03342523 (patch) | |
tree | 6b913e5cee60855278cefa0bb954531f40f07481 | |
parent | d682b596d99345ef0000e7017db714ba7f29e017 (diff) | |
download | linux-412f34a82ccf7dd52f6b197f6450a33f03342523.tar.xz |
locking/qspinlock_stat: Track the no MCS node available case
Track the number of slowpath locking operations that are being done
without any MCS node available as well renaming lock_index[123] to make
them more descriptive.
Using these stat counters is one way to find out if a code path is
being exercised.
Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: James Morse <james.morse@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: SRINIVAS <srinivas.eeda@oracle.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Zhenzhong Duan <zhenzhong.duan@oracle.com>
Link: https://lkml.kernel.org/r/1548798828-16156-3-git-send-email-longman@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/locking/qspinlock.c | 3 | ||||
-rw-r--r-- | kernel/locking/qspinlock_stat.h | 21 |
2 files changed, 17 insertions, 7 deletions
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 0875053c4050..21ee51b47961 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -422,6 +422,7 @@ pv_queue: * simple enough. */ if (unlikely(idx >= MAX_NODES)) { + qstat_inc(qstat_lock_no_node, true); while (!queued_spin_trylock(lock)) cpu_relax(); goto release; @@ -432,7 +433,7 @@ pv_queue: /* * Keep counts of non-zero index values: */ - qstat_inc(qstat_lock_idx1 + idx - 1, idx); + qstat_inc(qstat_lock_use_node2 + idx - 1, idx); /* * Ensure that we increment the head node->count before initialising diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h index 42d3d8dc8f49..d73f85388d5c 100644 --- a/kernel/locking/qspinlock_stat.h +++ b/kernel/locking/qspinlock_stat.h @@ -30,6 +30,13 @@ * pv_wait_node - # of vCPU wait's at a non-head queue node * lock_pending - # of locking operations via pending code * lock_slowpath - # of locking operations via MCS lock queue + * lock_use_node2 - # of locking operations that use 2nd per-CPU node + * lock_use_node3 - # of locking operations that use 3rd per-CPU node + * lock_use_node4 - # of locking operations that use 4th per-CPU node + * lock_no_node - # of locking operations without using per-CPU node + * + * Subtracting lock_use_node[234] from lock_slowpath will give you + * lock_use_node1. * * Writing to the "reset_counters" file will reset all the above counter * values. @@ -55,9 +62,10 @@ enum qlock_stats { qstat_pv_wait_node, qstat_lock_pending, qstat_lock_slowpath, - qstat_lock_idx1, - qstat_lock_idx2, - qstat_lock_idx3, + qstat_lock_use_node2, + qstat_lock_use_node3, + qstat_lock_use_node4, + qstat_lock_no_node, qstat_num, /* Total number of statistical counters */ qstat_reset_cnts = qstat_num, }; @@ -85,9 +93,10 @@ static const char * const qstat_names[qstat_num + 1] = { [qstat_pv_wait_node] = "pv_wait_node", [qstat_lock_pending] = "lock_pending", [qstat_lock_slowpath] = "lock_slowpath", - [qstat_lock_idx1] = "lock_index1", - [qstat_lock_idx2] = "lock_index2", - [qstat_lock_idx3] = "lock_index3", + [qstat_lock_use_node2] = "lock_use_node2", + [qstat_lock_use_node3] = "lock_use_node3", + [qstat_lock_use_node4] = "lock_use_node4", + [qstat_lock_no_node] = "lock_no_node", [qstat_reset_cnts] = "reset_counters", }; |