diff options
Diffstat (limited to 'tools/lib')
| -rw-r--r-- | tools/lib/bpf/libbpf.c | 21 | ||||
| -rw-r--r-- | tools/lib/perf/cpumap.c | 49 | ||||
| -rw-r--r-- | tools/lib/perf/evsel.c | 10 | ||||
| -rw-r--r-- | tools/lib/perf/include/internal/cpumap.h | 6 | ||||
| -rw-r--r-- | tools/lib/perf/include/perf/cpumap.h | 4 | ||||
| -rw-r--r-- | tools/lib/python/kdoc/kdoc_parser.py | 5 |
6 files changed, 50 insertions, 45 deletions
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 8b0c3246097f..3a80a018fc7d 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -5852,11 +5852,12 @@ static int load_module_btfs(struct bpf_object *obj) info.name = ptr_to_u64(name); info.name_len = sizeof(name); + btf = NULL; err = bpf_btf_get_info_by_fd(fd, &info, &len); if (err) { err = -errno; pr_warn("failed to get BTF object #%d info: %s\n", id, errstr(err)); - goto err_out; + break; } /* ignore non-module BTFs */ @@ -5870,15 +5871,15 @@ static int load_module_btfs(struct bpf_object *obj) if (err) { pr_warn("failed to load module [%s]'s BTF object #%d: %s\n", name, id, errstr(err)); - goto err_out; + break; } err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap, sizeof(*obj->btf_modules), obj->btf_module_cnt + 1); if (err) - goto err_out; + break; - mod_btf = &obj->btf_modules[obj->btf_module_cnt++]; + mod_btf = &obj->btf_modules[obj->btf_module_cnt]; mod_btf->btf = btf; mod_btf->id = id; @@ -5886,16 +5887,16 @@ static int load_module_btfs(struct bpf_object *obj) mod_btf->name = strdup(name); if (!mod_btf->name) { err = -ENOMEM; - goto err_out; + break; } - continue; + obj->btf_module_cnt++; + } -err_out: + if (err) { + btf__free(btf); close(fd); - return err; } - - return 0; + return err; } static struct bpf_core_cand_list * diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c index 4160e7d2e120..e51b0490ad57 100644 --- a/tools/lib/perf/cpumap.c +++ b/tools/lib/perf/cpumap.c @@ -15,12 +15,12 @@ #define MAX_NR_CPUS 4096 -void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus) +void perf_cpu_map__set_nr(struct perf_cpu_map *map, unsigned int nr_cpus) { RC_CHK_ACCESS(map)->nr = nr_cpus; } -struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus) +struct perf_cpu_map *perf_cpu_map__alloc(unsigned int nr_cpus) { RC_STRUCT(perf_cpu_map) *cpus; struct perf_cpu_map *result; @@ -78,7 +78,7 @@ void perf_cpu_map__put(struct perf_cpu_map *map) static struct perf_cpu_map *cpu_map__new_sysconf(void) { struct perf_cpu_map *cpus; - int nr_cpus, nr_cpus_conf; + long nr_cpus, nr_cpus_conf; nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); if (nr_cpus < 0) @@ -86,15 +86,13 @@ static struct perf_cpu_map *cpu_map__new_sysconf(void) nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF); if (nr_cpus != nr_cpus_conf) { - pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.", + pr_warning("Number of online CPUs (%ld) differs from the number configured (%ld) the CPU map will only cover the first %ld CPUs.", nr_cpus, nr_cpus_conf, nr_cpus); } cpus = perf_cpu_map__alloc(nr_cpus); if (cpus != NULL) { - int i; - - for (i = 0; i < nr_cpus; ++i) + for (long i = 0; i < nr_cpus; ++i) RC_CHK_ACCESS(cpus)->map[i].cpu = i; } @@ -132,23 +130,23 @@ static int cmp_cpu(const void *a, const void *b) return cpu_a->cpu - cpu_b->cpu; } -static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx) +static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, unsigned int idx) { return RC_CHK_ACCESS(cpus)->map[idx]; } -static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus) +static struct perf_cpu_map *cpu_map__trim_new(unsigned int nr_cpus, const struct perf_cpu *tmp_cpus) { size_t payload_size = nr_cpus * sizeof(struct perf_cpu); struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus); - int i, j; if (cpus != NULL) { + unsigned int j = 0; + memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size); qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu); /* Remove dups */ - j = 0; - for (i = 0; i < nr_cpus; i++) { + for (unsigned int i = 0; i < nr_cpus; i++) { if (i == 0 || __perf_cpu_map__cpu(cpus, i).cpu != __perf_cpu_map__cpu(cpus, i - 1).cpu) { @@ -167,9 +165,8 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) struct perf_cpu_map *cpus = NULL; unsigned long start_cpu, end_cpu = 0; char *p = NULL; - int i, nr_cpus = 0; + unsigned int nr_cpus = 0, max_entries = 0; struct perf_cpu *tmp_cpus = NULL, *tmp; - int max_entries = 0; if (!cpu_list) return perf_cpu_map__new_online_cpus(); @@ -208,9 +205,10 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) for (; start_cpu <= end_cpu; start_cpu++) { /* check for duplicates */ - for (i = 0; i < nr_cpus; i++) + for (unsigned int i = 0; i < nr_cpus; i++) { if (tmp_cpus[i].cpu == (int16_t)start_cpu) goto invalid; + } if (nr_cpus == max_entries) { max_entries += max(end_cpu - start_cpu + 1, 16UL); @@ -252,12 +250,12 @@ struct perf_cpu_map *perf_cpu_map__new_int(int cpu) return cpus; } -static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus) +static unsigned int __perf_cpu_map__nr(const struct perf_cpu_map *cpus) { return RC_CHK_ACCESS(cpus)->nr; } -struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx) +struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, unsigned int idx) { struct perf_cpu result = { .cpu = -1 @@ -269,7 +267,7 @@ struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx) return result; } -int perf_cpu_map__nr(const struct perf_cpu_map *cpus) +unsigned int perf_cpu_map__nr(const struct perf_cpu_map *cpus) { return cpus ? __perf_cpu_map__nr(cpus) : 1; } @@ -294,7 +292,7 @@ bool perf_cpu_map__is_empty(const struct perf_cpu_map *map) int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu) { - int low, high; + unsigned int low, high; if (!cpus) return -1; @@ -324,7 +322,7 @@ bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu) bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_map *rhs) { - int nr; + unsigned int nr; if (lhs == rhs) return true; @@ -336,7 +334,7 @@ bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_m if (nr != __perf_cpu_map__nr(rhs)) return false; - for (int idx = 0; idx < nr; idx++) { + for (unsigned int idx = 0; idx < nr; idx++) { if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu) return false; } @@ -353,7 +351,7 @@ struct perf_cpu perf_cpu_map__min(const struct perf_cpu_map *map) struct perf_cpu cpu, result = { .cpu = -1 }; - int idx; + unsigned int idx; perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) { result = cpu; @@ -384,7 +382,7 @@ bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a)) return false; - for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) { + for (unsigned int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) { if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu) return false; if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) { @@ -410,8 +408,7 @@ bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu int perf_cpu_map__merge(struct perf_cpu_map **orig, struct perf_cpu_map *other) { struct perf_cpu *tmp_cpus; - int tmp_len; - int i, j, k; + unsigned int tmp_len, i, j, k; struct perf_cpu_map *merged; if (perf_cpu_map__is_subset(*orig, other)) @@ -455,7 +452,7 @@ int perf_cpu_map__merge(struct perf_cpu_map **orig, struct perf_cpu_map *other) struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig, struct perf_cpu_map *other) { - int i, j, k; + unsigned int i, j, k; struct perf_cpu_map *merged; if (perf_cpu_map__is_subset(other, orig)) diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c index 13a307fc75ae..f747c0bc692d 100644 --- a/tools/lib/perf/evsel.c +++ b/tools/lib/perf/evsel.c @@ -127,7 +127,8 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads) { struct perf_cpu cpu; - int idx, thread, err = 0; + unsigned int idx; + int thread, err = 0; if (cpus == NULL) { static struct perf_cpu_map *empty_cpu_map; @@ -460,7 +461,7 @@ int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx) int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread) { struct perf_cpu cpu __maybe_unused; - int idx; + unsigned int idx; int err; perf_cpu_map__for_each_cpu(cpu, idx, evsel->cpus) { @@ -499,12 +500,13 @@ int perf_evsel__disable(struct perf_evsel *evsel) int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter) { - int err = 0, i; + int err = 0; - for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++) + for (unsigned int i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++) { err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_SET_FILTER, (void *)filter, i); + } return err; } diff --git a/tools/lib/perf/include/internal/cpumap.h b/tools/lib/perf/include/internal/cpumap.h index e2be2d17c32b..c19678188b17 100644 --- a/tools/lib/perf/include/internal/cpumap.h +++ b/tools/lib/perf/include/internal/cpumap.h @@ -16,16 +16,16 @@ DECLARE_RC_STRUCT(perf_cpu_map) { refcount_t refcnt; /** Length of the map array. */ - int nr; + unsigned int nr; /** The CPU values. */ struct perf_cpu map[]; }; -struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus); +struct perf_cpu_map *perf_cpu_map__alloc(unsigned int nr_cpus); int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu); bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b); -void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus); +void perf_cpu_map__set_nr(struct perf_cpu_map *map, unsigned int nr_cpus); static inline refcount_t *perf_cpu_map__refcnt(struct perf_cpu_map *map) { diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h index 58cc5c5fa47c..a1dd25db65b6 100644 --- a/tools/lib/perf/include/perf/cpumap.h +++ b/tools/lib/perf/include/perf/cpumap.h @@ -49,7 +49,7 @@ LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map); * perf_cpu_map__cpu - get the CPU value at the given index. Returns -1 if index * is invalid. */ -LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); +LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, unsigned int idx); /** * perf_cpu_map__nr - for an empty map returns 1, as perf_cpu_map__cpu returns a * cpu of -1 for an invalid index, this makes an empty map @@ -57,7 +57,7 @@ LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, i * the result is the number CPUs in the map plus one if the * "any CPU"/dummy value is present. */ -LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus); +LIBPERF_API unsigned int perf_cpu_map__nr(const struct perf_cpu_map *cpus); /** * perf_cpu_map__has_any_cpu_or_is_empty - is map either empty or has the "any CPU"/dummy value. */ diff --git a/tools/lib/python/kdoc/kdoc_parser.py b/tools/lib/python/kdoc/kdoc_parser.py index 74af7ae47aa4..c3f966da533e 100644 --- a/tools/lib/python/kdoc/kdoc_parser.py +++ b/tools/lib/python/kdoc/kdoc_parser.py @@ -439,6 +439,11 @@ class KernelDoc: # Ignore argument attributes arg = KernRe(r'\sPOS0?\s').sub(' ', arg) + # Replace '[at_least ' with '[static '. This allows sphinx to parse + # array parameter declarations like 'char A[at_least 4]', where + # 'at_least' is #defined to 'static' by the kernel headers. + arg = arg.replace('[at_least ', '[static ') + # Strip leading/trailing spaces arg = arg.strip() arg = KernRe(r'\s+').sub(' ', arg, count=1) |
