diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-27 22:42:01 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-27 22:42:01 +0300 |
commit | 95f1fa9e3418d50ce099e67280b5497b9c93843b (patch) | |
tree | b8617e471a9b9993ac1bad48e2d954335f8a6232 /lib | |
parent | 477093b3e144aa0ece07a5fd2a84013d037e2776 (diff) | |
parent | 16c0f03f629a89e6a1249497202b2c154ff46206 (diff) | |
download | linux-95f1fa9e3418d50ce099e67280b5497b9c93843b.tar.xz |
Merge tag 'trace-v5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"New tracing features:
- New PERMANENT flag to ftrace_ops when attaching a callback to a
function.
As /proc/sys/kernel/ftrace_enabled when set to zero will disable
all attached callbacks in ftrace, this has a detrimental impact on
live kernel tracing, as it disables all that it patched. If a
ftrace_ops is registered to ftrace with the PERMANENT flag set, it
will prevent ftrace_enabled from being disabled, and if
ftrace_enabled is already disabled, it will prevent a ftrace_ops
with PREMANENT flag set from being registered.
- New register_ftrace_direct().
As eBPF would like to register its own trampolines to be called by
the ftrace nop locations directly, without going through the ftrace
trampoline, this function has been added. This allows for eBPF
trampolines to live along side of ftrace, perf, kprobe and live
patching. It also utilizes the ftrace enabled_functions file that
keeps track of functions that have been modified in the kernel, to
allow for security auditing.
- Allow for kernel internal use of ftrace instances.
Subsystems in the kernel can now create and destroy their own
tracing instances which allows them to have their own tracing
buffer, and be able to record events without worrying about other
users from writing over their data.
- New seq_buf_hex_dump() that lets users use the hex_dump() in their
seq_buf usage.
- Notifications now added to tracing_max_latency to allow user space
to know when a new max latency is hit by one of the latency
tracers.
- Wider spread use of generic compare operations for use of bsearch
and friends.
- More synthetic event fields may be defined (32 up from 16)
- Use of xarray for architectures with sparse system calls, for the
system call trace events.
This along with small clean ups and fixes"
* tag 'trace-v5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (51 commits)
tracing: Enable syscall optimization for MIPS
tracing: Use xarray for syscall trace events
tracing: Sample module to demonstrate kernel access to Ftrace instances.
tracing: Adding new functions for kernel access to Ftrace instances
tracing: Fix Kconfig indentation
ring-buffer: Fix typos in function ring_buffer_producer
ftrace: Use BIT() macro
ftrace: Return ENOTSUPP when DYNAMIC_FTRACE_WITH_DIRECT_CALLS is not configured
ftrace: Rename ftrace_graph_stub to ftrace_stub_graph
ftrace: Add a helper function to modify_ftrace_direct() to allow arch optimization
ftrace: Add helper find_direct_entry() to consolidate code
ftrace: Add another check for match in register_ftrace_direct()
ftrace: Fix accounting bug with direct->count in register_ftrace_direct()
ftrace/selftests: Fix spelling mistake "wakeing" -> "waking"
tracing: Increase SYNTH_FIELDS_MAX for synthetic_events
ftrace/samples: Add a sample module that implements modify_ftrace_direct()
ftrace: Add modify_ftrace_direct()
tracing: Add missing "inline" in stub function of latency_fsnotify()
tracing: Remove stray tab in TRACE_EVAL_MAP_FILE's help text
tracing: Use seq_buf_hex_dump() to dump buffers
...
Diffstat (limited to 'lib')
-rw-r--r-- | lib/bsearch.c | 2 | ||||
-rw-r--r-- | lib/seq_buf.c | 62 | ||||
-rw-r--r-- | lib/sort.c | 15 |
3 files changed, 68 insertions, 11 deletions
diff --git a/lib/bsearch.c b/lib/bsearch.c index 8baa83968162..8b3aae5ae77a 100644 --- a/lib/bsearch.c +++ b/lib/bsearch.c @@ -29,7 +29,7 @@ * the same comparison function for both sort() and bsearch(). */ void *bsearch(const void *key, const void *base, size_t num, size_t size, - int (*cmp)(const void *key, const void *elt)) + cmp_func_t cmp) { const char *pivot; int result; diff --git a/lib/seq_buf.c b/lib/seq_buf.c index bd807f545a9d..4e865d42ab03 100644 --- a/lib/seq_buf.c +++ b/lib/seq_buf.c @@ -328,3 +328,65 @@ int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt) s->readpos += cnt; return cnt; } + +/** + * seq_buf_hex_dump - print formatted hex dump into the sequence buffer + * @s: seq_buf descriptor + * @prefix_str: string to prefix each line with; + * caller supplies trailing spaces for alignment if desired + * @prefix_type: controls whether prefix of an offset, address, or none + * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) + * @rowsize: number of bytes to print per line; must be 16 or 32 + * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1) + * @buf: data blob to dump + * @len: number of bytes in the @buf + * @ascii: include ASCII after the hex output + * + * Function is an analogue of print_hex_dump() and thus has similar interface. + * + * linebuf size is maximal length for one line. + * 32 * 3 - maximum bytes per line, each printed into 2 chars + 1 for + * separating space + * 2 - spaces separating hex dump and ascii representation + * 32 - ascii representation + * 1 - terminating '\0' + * + * Returns zero on success, -1 on overflow + */ +int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str, int prefix_type, + int rowsize, int groupsize, + const void *buf, size_t len, bool ascii) +{ + const u8 *ptr = buf; + int i, linelen, remaining = len; + unsigned char linebuf[32 * 3 + 2 + 32 + 1]; + int ret; + + if (rowsize != 16 && rowsize != 32) + rowsize = 16; + + for (i = 0; i < len; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + + hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, + linebuf, sizeof(linebuf), ascii); + + switch (prefix_type) { + case DUMP_PREFIX_ADDRESS: + ret = seq_buf_printf(s, "%s%p: %s\n", + prefix_str, ptr + i, linebuf); + break; + case DUMP_PREFIX_OFFSET: + ret = seq_buf_printf(s, "%s%.8x: %s\n", + prefix_str, i, linebuf); + break; + default: + ret = seq_buf_printf(s, "%s%s\n", prefix_str, linebuf); + break; + } + if (ret) + return ret; + } + return 0; +} diff --git a/lib/sort.c b/lib/sort.c index d54cf97e9548..3ad454411997 100644 --- a/lib/sort.c +++ b/lib/sort.c @@ -117,8 +117,6 @@ static void swap_bytes(void *a, void *b, size_t n) } while (n); } -typedef void (*swap_func_t)(void *a, void *b, int size); - /* * The values are arbitrary as long as they can't be confused with * a pointer, but small integers make for the smallest compare @@ -144,12 +142,9 @@ static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func) swap_func(a, b, (int)size); } -typedef int (*cmp_func_t)(const void *, const void *); -typedef int (*cmp_r_func_t)(const void *, const void *, const void *); #define _CMP_WRAPPER ((cmp_r_func_t)0L) -static int do_cmp(const void *a, const void *b, - cmp_r_func_t cmp, const void *priv) +static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv) { if (cmp == _CMP_WRAPPER) return ((cmp_func_t)(priv))(a, b); @@ -202,8 +197,8 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size) * it less suitable for kernel use. */ void sort_r(void *base, size_t num, size_t size, - int (*cmp_func)(const void *, const void *, const void *), - void (*swap_func)(void *, void *, int size), + cmp_r_func_t cmp_func, + swap_func_t swap_func, const void *priv) { /* pre-scale counters for performance */ @@ -269,8 +264,8 @@ void sort_r(void *base, size_t num, size_t size, EXPORT_SYMBOL(sort_r); void sort(void *base, size_t num, size_t size, - int (*cmp_func)(const void *, const void *), - void (*swap_func)(void *, void *, int size)) + cmp_func_t cmp_func, + swap_func_t swap_func) { return sort_r(base, num, size, _CMP_WRAPPER, swap_func, cmp_func); } |