From bc0496043edf9c78e443bcf74ed2fa04566fdc18 Mon Sep 17 00:00:00 2001 From: Riccardo Mancini Date: Sat, 21 Aug 2021 11:19:20 +0200 Subject: perf evsel: Remove retry_sample_id goto label As far as I can tell, there is no good reason, apart from optimization to have the retry_sample_id separate from fallback_missing_features. Probably, this label was added to avoid reapplying patches for missing features that had already been applied. However, missing features that have been added later have not used this optimization, always jumping to fallback_missing_features and reapplying all missing features. This patch removes that label, replacing it with fallback_missing_features. Signed-off-by: Riccardo Mancini Cc: Ian Rogers Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/340af0d03408d6621fd9c742e311db18b3585b3b.1629490974.git.rickyman7@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'tools/perf/util/evsel.c') diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index f61e5dd53f5d..7b4bb3229a16 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1825,7 +1825,6 @@ fallback_missing_features: evsel->core.attr.bpf_event = 0; if (perf_missing_features.branch_hw_idx) evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_HW_INDEX; -retry_sample_id: if (perf_missing_features.sample_id_all) evsel->core.attr.sample_id_all = 0; @@ -2006,7 +2005,7 @@ try_fallback: } else if (!perf_missing_features.sample_id_all) { perf_missing_features.sample_id_all = true; pr_debug2_peo("switching off sample_id_all\n"); - goto retry_sample_id; + goto fallback_missing_features; } else if (!perf_missing_features.lbr_flags && (evsel->core.attr.branch_sample_type & (PERF_SAMPLE_BRANCH_NO_CYCLES | -- cgit v1.2.3 From d45ce03434fd0f9177a0d3a7237fce4263eed24b Mon Sep 17 00:00:00 2001 From: Riccardo Mancini Date: Sat, 21 Aug 2021 11:19:21 +0200 Subject: perf evsel: Separate open preparation from open itself This is a preparatory patch for the following patches with the goal to separate in evlist__open_cpu the actual perf_event_open, which could be performed in parallel, from the existing fallback mechanisms, which should be handled sequentially. This patch separates the first lines of evsel__open_cpu into a new __evsel__prepare_open function. Signed-off-by: Riccardo Mancini Cc: Ian Rogers Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/e14118b934c338dbbf68b8677f20d0d7dbf9359a.1629490974.git.rickyman7@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 45 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 11 deletions(-) (limited to 'tools/perf/util/evsel.c') diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 7b4bb3229a16..ddf324e2e17a 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1746,22 +1746,20 @@ static int perf_event_open(struct evsel *evsel, return fd; } -static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, - struct perf_thread_map *threads, - int start_cpu, int end_cpu) + +static struct perf_cpu_map *empty_cpu_map; +static struct perf_thread_map *empty_thread_map; + +static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, + struct perf_thread_map *threads) { - int cpu, thread, nthreads; - unsigned long flags = PERF_FLAG_FD_CLOEXEC; - int pid = -1, err, old_errno; - enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; + int nthreads; if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) || (perf_missing_features.aux_output && evsel->core.attr.aux_output)) return -EINVAL; if (cpus == NULL) { - static struct perf_cpu_map *empty_cpu_map; - if (empty_cpu_map == NULL) { empty_cpu_map = perf_cpu_map__dummy_new(); if (empty_cpu_map == NULL) @@ -1772,8 +1770,6 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, } if (threads == NULL) { - static struct perf_thread_map *empty_thread_map; - if (empty_thread_map == NULL) { empty_thread_map = thread_map__new_by_tid(-1); if (empty_thread_map == NULL) @@ -1792,6 +1788,33 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, perf_evsel__alloc_fd(&evsel->core, cpus->nr, nthreads) < 0) return -ENOMEM; + return 0; +} + +static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, + struct perf_thread_map *threads, + int start_cpu, int end_cpu) +{ + int cpu, thread, nthreads; + unsigned long flags = PERF_FLAG_FD_CLOEXEC; + int pid = -1, err, old_errno; + enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; + + err = __evsel__prepare_open(evsel, cpus, threads); + if (err) + return err; + + if (cpus == NULL) + cpus = empty_cpu_map; + + if (threads == NULL) + threads = empty_thread_map; + + if (evsel->core.system_wide) + nthreads = 1; + else + nthreads = threads->nr; + if (evsel->cgrp) { flags |= PERF_FLAG_PID_CGROUP; pid = evsel->cgrp->fd; -- cgit v1.2.3 From 46def08f5db0d84275bd0a3ba4a279a2197aa3a6 Mon Sep 17 00:00:00 2001 From: Riccardo Mancini Date: Sat, 21 Aug 2021 11:19:22 +0200 Subject: perf evsel: Save open flags in evsel in prepare_open() This patch caches the flags used in perf_event_open() inside evsel, so that they can be set in __evsel__prepare_open() (this will be useful in patches in the workqueue series, when the fallback mechanisms will be handled outside the open itself). This also optimizes the code, by not having to recompute them everytime. Since flags are now saved in evsel, the flags argument in perf_event_open() is removed. Signed-off-by: Riccardo Mancini Cc: Ian Rogers Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/d9f63159098e56fa518eecf25171d72e6f74df37.1629490974.git.rickyman7@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 24 ++++++++++++------------ tools/perf/util/evsel.h | 1 + 2 files changed, 13 insertions(+), 12 deletions(-) (limited to 'tools/perf/util/evsel.c') diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index ddf324e2e17a..509a2970a94b 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1710,17 +1710,16 @@ static void display_attr(struct perf_event_attr *attr) } static int perf_event_open(struct evsel *evsel, - pid_t pid, int cpu, int group_fd, - unsigned long flags) + pid_t pid, int cpu, int group_fd) { int precise_ip = evsel->core.attr.precise_ip; int fd; while (1) { pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx", - pid, cpu, group_fd, flags); + pid, cpu, group_fd, evsel->open_flags); - fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, group_fd, flags); + fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, group_fd, evsel->open_flags); if (fd >= 0) break; @@ -1788,6 +1787,10 @@ static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, perf_evsel__alloc_fd(&evsel->core, cpus->nr, nthreads) < 0) return -ENOMEM; + evsel->open_flags = PERF_FLAG_FD_CLOEXEC; + if (evsel->cgrp) + evsel->open_flags |= PERF_FLAG_PID_CGROUP; + return 0; } @@ -1796,7 +1799,6 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int start_cpu, int end_cpu) { int cpu, thread, nthreads; - unsigned long flags = PERF_FLAG_FD_CLOEXEC; int pid = -1, err, old_errno; enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; @@ -1815,10 +1817,8 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, else nthreads = threads->nr; - if (evsel->cgrp) { - flags |= PERF_FLAG_PID_CGROUP; + if (evsel->cgrp) pid = evsel->cgrp->fd; - } fallback_missing_features: if (perf_missing_features.weight_struct) { @@ -1832,7 +1832,7 @@ fallback_missing_features: evsel->core.attr.clockid = 0; } if (perf_missing_features.cloexec) - flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC; + evsel->open_flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC; if (perf_missing_features.mmap2) evsel->core.attr.mmap2 = 0; if (perf_missing_features.exclude_guest) @@ -1866,7 +1866,7 @@ retry_open: test_attr__ready(); fd = perf_event_open(evsel, pid, cpus->map[cpu], - group_fd, flags); + group_fd); FD(evsel, cpu, thread) = fd; @@ -1874,7 +1874,7 @@ retry_open: if (unlikely(test_attr__enabled)) { test_attr__open(&evsel->core.attr, pid, cpus->map[cpu], - fd, group_fd, flags); + fd, group_fd, evsel->open_flags); } if (fd < 0) { @@ -2012,7 +2012,7 @@ try_fallback: perf_missing_features.clockid = true; pr_debug2_peo("switching off use_clockid\n"); goto fallback_missing_features; - } else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) { + } else if (!perf_missing_features.cloexec && (evsel->open_flags & PERF_FLAG_FD_CLOEXEC)) { perf_missing_features.cloexec = true; pr_debug2_peo("switching off cloexec flag\n"); goto fallback_missing_features; diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 80383096d51c..76efcfa3e14d 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -150,6 +150,7 @@ struct evsel { struct bperf_leader_bpf *leader_skel; struct bperf_follower_bpf *follower_skel; }; + unsigned long open_flags; }; struct perf_missing_features { -- cgit v1.2.3 From 588f4ac7639941d12fa669c30e64e0d68fb96b08 Mon Sep 17 00:00:00 2001 From: Riccardo Mancini Date: Sat, 21 Aug 2021 11:19:23 +0200 Subject: perf evsel: Separate missing feature disabling from evsel__open_cpu This is a preparatory patch for the patches in the workqueue series with the goal to separate in evlist__open_cpu() the actual opening, which could be performed in parallel, from the existing fallback mechanisms, which should be handled sequentially. This patch separates the disabling of missing features from evlist__open_cpu() into a new function evsel__disable_missing_features((). Signed-off-by: Riccardo Mancini Cc: Ian Rogers Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/48138bd2932646dde315505da733c2ca635ad2ee.1629490974.git.rickyman7@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 57 +++++++++++++++++++++++++++---------------------- 1 file changed, 31 insertions(+), 26 deletions(-) (limited to 'tools/perf/util/evsel.c') diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 509a2970a94b..f0bc89f74391 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1794,33 +1794,8 @@ static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, return 0; } -static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, - struct perf_thread_map *threads, - int start_cpu, int end_cpu) +static void evsel__disable_missing_features(struct evsel *evsel) { - int cpu, thread, nthreads; - int pid = -1, err, old_errno; - enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; - - err = __evsel__prepare_open(evsel, cpus, threads); - if (err) - return err; - - if (cpus == NULL) - cpus = empty_cpu_map; - - if (threads == NULL) - threads = empty_thread_map; - - if (evsel->core.system_wide) - nthreads = 1; - else - nthreads = threads->nr; - - if (evsel->cgrp) - pid = evsel->cgrp->fd; - -fallback_missing_features: if (perf_missing_features.weight_struct) { evsel__set_sample_bit(evsel, WEIGHT); evsel__reset_sample_bit(evsel, WEIGHT_STRUCT); @@ -1850,6 +1825,36 @@ fallback_missing_features: evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_HW_INDEX; if (perf_missing_features.sample_id_all) evsel->core.attr.sample_id_all = 0; +} + +static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, + struct perf_thread_map *threads, + int start_cpu, int end_cpu) +{ + int cpu, thread, nthreads; + int pid = -1, err, old_errno; + enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; + + err = __evsel__prepare_open(evsel, cpus, threads); + if (err) + return err; + + if (cpus == NULL) + cpus = empty_cpu_map; + + if (threads == NULL) + threads = empty_thread_map; + + if (evsel->core.system_wide) + nthreads = 1; + else + nthreads = threads->nr; + + if (evsel->cgrp) + pid = evsel->cgrp->fd; + +fallback_missing_features: + evsel__disable_missing_features(evsel); display_attr(&evsel->core.attr); -- cgit v1.2.3 From 6efd06e3741944e528bc8243c88d9e3320c1c80c Mon Sep 17 00:00:00 2001 From: Riccardo Mancini Date: Sat, 21 Aug 2021 11:19:24 +0200 Subject: perf evsel: Add evsel__prepare_open() This function will prepare the evsel and disable the missing features. It will be used in one of the following patches. Signed-off-by: Riccardo Mancini Cc: Ian Rogers Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/fa5e78bbb92c848226f044278fdcf777b3ce4583.1629490974.git.rickyman7@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 14 ++++++++++++++ tools/perf/util/evsel.h | 2 ++ 2 files changed, 16 insertions(+) (limited to 'tools/perf/util/evsel.c') diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index f0bc89f74391..45d778f063d4 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1827,6 +1827,20 @@ static void evsel__disable_missing_features(struct evsel *evsel) evsel->core.attr.sample_id_all = 0; } +int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, + struct perf_thread_map *threads) +{ + int err; + + err = __evsel__prepare_open(evsel, cpus, threads); + if (err) + return err; + + evsel__disable_missing_features(evsel); + + return err; +} + static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads, int start_cpu, int end_cpu) diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 76efcfa3e14d..b173700db8af 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -287,6 +287,8 @@ int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads) int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads); void evsel__close(struct evsel *evsel); +int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, + struct perf_thread_map *threads); struct perf_sample; -- cgit v1.2.3 From d21fc5f077f7760bbae5742e185691718518f255 Mon Sep 17 00:00:00 2001 From: Riccardo Mancini Date: Sat, 21 Aug 2021 11:19:25 +0200 Subject: perf evsel: Separate missing feature detection from evsel__open_cpu() This is a preparatory patch for the workqueue patches with the goal to separate in evlist__open_cpu() the actual opening, which could be performed in parallel, from the existing fallback mechanisms, which should be handled sequentially. This patch separates the missing feature detection in evsel__open_cpu() into a new evsel__detect_missing_features() function. Signed-off-by: Riccardo Mancini Cc: Ian Rogers Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/cba0b7d939862473662adeedb0f9c9b69566ee9a.1629490974.git.rickyman7@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 174 +++++++++++++++++++++++++----------------------- tools/perf/util/evsel.h | 1 + 2 files changed, 92 insertions(+), 83 deletions(-) (limited to 'tools/perf/util/evsel.c') diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 45d778f063d4..943ddea9b1f4 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1841,6 +1841,96 @@ int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, return err; } +bool evsel__detect_missing_features(struct evsel *evsel) +{ + /* + * Must probe features in the order they were added to the + * perf_event_attr interface. + */ + if (!perf_missing_features.weight_struct && + (evsel->core.attr.sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) { + perf_missing_features.weight_struct = true; + pr_debug2("switching off weight struct support\n"); + return true; + } else if (!perf_missing_features.code_page_size && + (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)) { + perf_missing_features.code_page_size = true; + pr_debug2_peo("Kernel has no PERF_SAMPLE_CODE_PAGE_SIZE support, bailing out\n"); + return false; + } else if (!perf_missing_features.data_page_size && + (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)) { + perf_missing_features.data_page_size = true; + pr_debug2_peo("Kernel has no PERF_SAMPLE_DATA_PAGE_SIZE support, bailing out\n"); + return false; + } else if (!perf_missing_features.cgroup && evsel->core.attr.cgroup) { + perf_missing_features.cgroup = true; + pr_debug2_peo("Kernel has no cgroup sampling support, bailing out\n"); + return false; + } else if (!perf_missing_features.branch_hw_idx && + (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX)) { + perf_missing_features.branch_hw_idx = true; + pr_debug2("switching off branch HW index support\n"); + return true; + } else if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) { + perf_missing_features.aux_output = true; + pr_debug2_peo("Kernel has no attr.aux_output support, bailing out\n"); + return false; + } else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) { + perf_missing_features.bpf = true; + pr_debug2_peo("switching off bpf_event\n"); + return true; + } else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) { + perf_missing_features.ksymbol = true; + pr_debug2_peo("switching off ksymbol\n"); + return true; + } else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) { + perf_missing_features.write_backward = true; + pr_debug2_peo("switching off write_backward\n"); + return false; + } else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) { + perf_missing_features.clockid_wrong = true; + pr_debug2_peo("switching off clockid\n"); + return true; + } else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) { + perf_missing_features.clockid = true; + pr_debug2_peo("switching off use_clockid\n"); + return true; + } else if (!perf_missing_features.cloexec && (evsel->open_flags & PERF_FLAG_FD_CLOEXEC)) { + perf_missing_features.cloexec = true; + pr_debug2_peo("switching off cloexec flag\n"); + return true; + } else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) { + perf_missing_features.mmap2 = true; + pr_debug2_peo("switching off mmap2\n"); + return true; + } else if (!perf_missing_features.exclude_guest && + (evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host)) { + perf_missing_features.exclude_guest = true; + pr_debug2_peo("switching off exclude_guest, exclude_host\n"); + return true; + } else if (!perf_missing_features.sample_id_all) { + perf_missing_features.sample_id_all = true; + pr_debug2_peo("switching off sample_id_all\n"); + return true; + } else if (!perf_missing_features.lbr_flags && + (evsel->core.attr.branch_sample_type & + (PERF_SAMPLE_BRANCH_NO_CYCLES | + PERF_SAMPLE_BRANCH_NO_FLAGS))) { + perf_missing_features.lbr_flags = true; + pr_debug2_peo("switching off branch sample type no (cycles/flags)\n"); + return true; + } else if (!perf_missing_features.group_read && + evsel->core.attr.inherit && + (evsel->core.attr.read_format & PERF_FORMAT_GROUP) && + evsel__is_group_leader(evsel)) { + perf_missing_features.group_read = true; + pr_debug2_peo("switching off group read\n"); + return true; + } else { + return false; + } +} + static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads, int start_cpu, int end_cpu) @@ -1979,90 +2069,8 @@ try_fallback: if (err != -EINVAL || cpu > 0 || thread > 0) goto out_close; - /* - * Must probe features in the order they were added to the - * perf_event_attr interface. - */ - if (!perf_missing_features.weight_struct && - (evsel->core.attr.sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) { - perf_missing_features.weight_struct = true; - pr_debug2("switching off weight struct support\n"); + if (evsel__detect_missing_features(evsel)) goto fallback_missing_features; - } else if (!perf_missing_features.code_page_size && - (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)) { - perf_missing_features.code_page_size = true; - pr_debug2_peo("Kernel has no PERF_SAMPLE_CODE_PAGE_SIZE support, bailing out\n"); - goto out_close; - } else if (!perf_missing_features.data_page_size && - (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)) { - perf_missing_features.data_page_size = true; - pr_debug2_peo("Kernel has no PERF_SAMPLE_DATA_PAGE_SIZE support, bailing out\n"); - goto out_close; - } else if (!perf_missing_features.cgroup && evsel->core.attr.cgroup) { - perf_missing_features.cgroup = true; - pr_debug2_peo("Kernel has no cgroup sampling support, bailing out\n"); - goto out_close; - } else if (!perf_missing_features.branch_hw_idx && - (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX)) { - perf_missing_features.branch_hw_idx = true; - pr_debug2("switching off branch HW index support\n"); - goto fallback_missing_features; - } else if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) { - perf_missing_features.aux_output = true; - pr_debug2_peo("Kernel has no attr.aux_output support, bailing out\n"); - goto out_close; - } else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) { - perf_missing_features.bpf = true; - pr_debug2_peo("switching off bpf_event\n"); - goto fallback_missing_features; - } else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) { - perf_missing_features.ksymbol = true; - pr_debug2_peo("switching off ksymbol\n"); - goto fallback_missing_features; - } else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) { - perf_missing_features.write_backward = true; - pr_debug2_peo("switching off write_backward\n"); - goto out_close; - } else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) { - perf_missing_features.clockid_wrong = true; - pr_debug2_peo("switching off clockid\n"); - goto fallback_missing_features; - } else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) { - perf_missing_features.clockid = true; - pr_debug2_peo("switching off use_clockid\n"); - goto fallback_missing_features; - } else if (!perf_missing_features.cloexec && (evsel->open_flags & PERF_FLAG_FD_CLOEXEC)) { - perf_missing_features.cloexec = true; - pr_debug2_peo("switching off cloexec flag\n"); - goto fallback_missing_features; - } else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) { - perf_missing_features.mmap2 = true; - pr_debug2_peo("switching off mmap2\n"); - goto fallback_missing_features; - } else if (!perf_missing_features.exclude_guest && - (evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host)) { - perf_missing_features.exclude_guest = true; - pr_debug2_peo("switching off exclude_guest, exclude_host\n"); - goto fallback_missing_features; - } else if (!perf_missing_features.sample_id_all) { - perf_missing_features.sample_id_all = true; - pr_debug2_peo("switching off sample_id_all\n"); - goto fallback_missing_features; - } else if (!perf_missing_features.lbr_flags && - (evsel->core.attr.branch_sample_type & - (PERF_SAMPLE_BRANCH_NO_CYCLES | - PERF_SAMPLE_BRANCH_NO_FLAGS))) { - perf_missing_features.lbr_flags = true; - pr_debug2_peo("switching off branch sample type no (cycles/flags)\n"); - goto fallback_missing_features; - } else if (!perf_missing_features.group_read && - evsel->core.attr.inherit && - (evsel->core.attr.read_format & PERF_FORMAT_GROUP) && - evsel__is_group_leader(evsel)) { - perf_missing_features.group_read = true; - pr_debug2_peo("switching off group read\n"); - goto fallback_missing_features; - } out_close: if (err) threads->err_thread = thread; diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index b173700db8af..e683f4a36e1a 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -289,6 +289,7 @@ int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, void evsel__close(struct evsel *evsel); int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads); +bool evsel__detect_missing_features(struct evsel *evsel); struct perf_sample; -- cgit v1.2.3 From 71efc48a4cbd208707b4ee4464b9eabd3b6438aa Mon Sep 17 00:00:00 2001 From: Riccardo Mancini Date: Sat, 21 Aug 2021 11:19:26 +0200 Subject: perf evsel: Separate rlimit increase from evsel__open_cpu() This is a preparatory patch for the workqueue patches with the goal to separate from evlist__open_cpu() the actual opening (which could be performed in parallel), from the existing fallback mechanisms, which should be handled sequentially. This patch separates the rlimit increase from evsel__open_cpu(). Signed-off-by: Riccardo Mancini Cc: Ian Rogers Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/2f256de8ec37b9809a5cef73c2fa7bce416af5d3.1629490974.git.rickyman7@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 50 +++++++++++++++++++++++++++++-------------------- tools/perf/util/evsel.h | 3 +++ 2 files changed, 33 insertions(+), 20 deletions(-) (limited to 'tools/perf/util/evsel.c') diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 943ddea9b1f4..f370b88ce1a0 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1931,13 +1931,40 @@ bool evsel__detect_missing_features(struct evsel *evsel) } } +bool evsel__increase_rlimit(enum rlimit_action *set_rlimit) +{ + int old_errno; + struct rlimit l; + + if (*set_rlimit < INCREASED_MAX) { + old_errno = errno; + + if (getrlimit(RLIMIT_NOFILE, &l) == 0) { + if (*set_rlimit == NO_CHANGE) { + l.rlim_cur = l.rlim_max; + } else { + l.rlim_cur = l.rlim_max + 1000; + l.rlim_max = l.rlim_cur; + } + if (setrlimit(RLIMIT_NOFILE, &l) == 0) { + (*set_rlimit) += 1; + errno = old_errno; + return true; + } + } + errno = old_errno; + } + + return false; +} + static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads, int start_cpu, int end_cpu) { int cpu, thread, nthreads; int pid = -1, err, old_errno; - enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; + enum rlimit_action set_rlimit = NO_CHANGE; err = __evsel__prepare_open(evsel, cpus, threads); if (err) @@ -2046,25 +2073,8 @@ try_fallback: * perf stat needs between 5 and 22 fds per CPU. When we run out * of them try to increase the limits. */ - if (err == -EMFILE && set_rlimit < INCREASED_MAX) { - struct rlimit l; - - old_errno = errno; - if (getrlimit(RLIMIT_NOFILE, &l) == 0) { - if (set_rlimit == NO_CHANGE) - l.rlim_cur = l.rlim_max; - else { - l.rlim_cur = l.rlim_max + 1000; - l.rlim_max = l.rlim_cur; - } - if (setrlimit(RLIMIT_NOFILE, &l) == 0) { - set_rlimit++; - errno = old_errno; - goto retry_open; - } - } - errno = old_errno; - } + if (err == -EMFILE && evsel__increase_rlimit(&set_rlimit)) + goto retry_open; if (err != -EINVAL || cpu > 0 || thread > 0) goto out_close; diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index e683f4a36e1a..47916db5d146 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -291,6 +291,9 @@ int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, struct perf_thread_map *threads); bool evsel__detect_missing_features(struct evsel *evsel); +enum rlimit_action { NO_CHANGE, SET_TO_MAX, INCREASED_MAX }; +bool evsel__increase_rlimit(enum rlimit_action *set_rlimit); + struct perf_sample; void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name); -- cgit v1.2.3 From da7c3b462293431ea2558765a977a55007a26379 Mon Sep 17 00:00:00 2001 From: Riccardo Mancini Date: Sat, 21 Aug 2021 11:19:27 +0200 Subject: perf evsel: Move ignore_missing_thread() to fallback code This patch moves ignore_missing_thread outside the perf_event_open loop. Doing so, we need to move the retry_open flag a few places higher, with minimal impact. Furthermore, thread need not be decreased since it won't get increased by the for loop (since we're jumping back inside), but we need to check that the nthreads decrease didn't put thread out of range. The goal is to have fallbacks handled in one place only, since in the future parallel code, these would be handled separately. Signed-off-by: Riccardo Mancini Cc: Ian Rogers Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/4eca51443c786baaf6811b7cd8e73aafd97f7606.1629490974.git.rickyman7@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 29 +++++++++++++---------------- tools/perf/util/evsel.h | 5 +++++ 2 files changed, 18 insertions(+), 16 deletions(-) (limited to 'tools/perf/util/evsel.c') diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index f370b88ce1a0..f691f9ee30ea 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1656,7 +1656,7 @@ static int update_fds(struct evsel *evsel, return 0; } -static bool ignore_missing_thread(struct evsel *evsel, +bool evsel__ignore_missing_thread(struct evsel *evsel, int nr_cpus, int cpu, struct perf_thread_map *threads, int thread, int err) @@ -1993,12 +1993,15 @@ fallback_missing_features: for (thread = 0; thread < nthreads; thread++) { int fd, group_fd; +retry_open: + if (thread >= nthreads) + break; if (!evsel->cgrp && !evsel->core.system_wide) pid = perf_thread_map__pid(threads, thread); group_fd = get_group_fd(evsel, cpu, thread); -retry_open: + test_attr__ready(); fd = perf_event_open(evsel, pid, cpus->map[cpu], @@ -2016,20 +2019,6 @@ retry_open: if (fd < 0) { err = -errno; - if (ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) { - /* - * We just removed 1 thread, so take a step - * back on thread index and lower the upper - * nthreads limit. - */ - nthreads--; - thread--; - - /* ... and pretend like nothing have happened. */ - err = 0; - continue; - } - pr_debug2_peo("\nsys_perf_event_open failed, error %d\n", err); goto try_fallback; @@ -2069,6 +2058,14 @@ retry_open: return 0; try_fallback: + if (evsel__ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) { + /* We just removed 1 thread, so lower the upper nthreads limit. */ + nthreads--; + + /* ... and pretend like nothing have happened. */ + err = 0; + goto retry_open; + } /* * perf stat needs between 5 and 22 fds per CPU. When we run out * of them try to increase the limits. diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 47916db5d146..b749bee2c7a6 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -294,6 +294,11 @@ bool evsel__detect_missing_features(struct evsel *evsel); enum rlimit_action { NO_CHANGE, SET_TO_MAX, INCREASED_MAX }; bool evsel__increase_rlimit(enum rlimit_action *set_rlimit); +bool evsel__ignore_missing_thread(struct evsel *evsel, + int nr_cpus, int cpu, + struct perf_thread_map *threads, + int thread, int err); + struct perf_sample; void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name); -- cgit v1.2.3 From ebfb045a417415d8ac3f588e443f001b1278c01c Mon Sep 17 00:00:00 2001 From: Riccardo Mancini Date: Sat, 21 Aug 2021 11:19:28 +0200 Subject: perf evsel: Move test_attr__open() to success path in evsel__open_cpu() test_attr__open() ignores the fd if -1, therefore it is safe to move it to the success path (fd >= 0). Signed-off-by: Riccardo Mancini Cc: Ian Rogers Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/b3baf11360ca96541c9631730614fd7d217496fc.1629490974.git.rickyman7@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'tools/perf/util/evsel.c') diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index f691f9ee30ea..8dc70dd045f3 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -2011,11 +2011,6 @@ retry_open: bpf_counter__install_pe(evsel, cpu, fd); - if (unlikely(test_attr__enabled)) { - test_attr__open(&evsel->core.attr, pid, cpus->map[cpu], - fd, group_fd, evsel->open_flags); - } - if (fd < 0) { err = -errno; @@ -2024,6 +2019,11 @@ retry_open: goto try_fallback; } + if (unlikely(test_attr__enabled)) { + test_attr__open(&evsel->core.attr, pid, cpus->map[cpu], + fd, group_fd, evsel->open_flags); + } + pr_debug2_peo(" = %d\n", fd); if (evsel->bpf_fd >= 0) { -- cgit v1.2.3 From 91233d003b0906f146765fbef3572509c99d9386 Mon Sep 17 00:00:00 2001 From: Riccardo Mancini Date: Sat, 21 Aug 2021 11:19:29 +0200 Subject: perf evsel: Move bpf_counter__install_pe() to success path in evsel__open_cpu() I don't see why bpf_counter__install_pe() should get called even if fd = -1, so I'm moving it to the success path. This will be useful in following patches to separate the actual open and the related operations from the fallback mechanisms. Signed-off-by: Riccardo Mancini Cc: Ian Rogers Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Song Liu Link: http://lore.kernel.org/lkml/64f8a1b0a838a6e6049cd43c1beafd432999ae57.1629490974.git.rickyman7@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/perf/util/evsel.c') diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 8dc70dd045f3..a576d985a7a7 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -2009,8 +2009,6 @@ retry_open: FD(evsel, cpu, thread) = fd; - bpf_counter__install_pe(evsel, cpu, fd); - if (fd < 0) { err = -errno; @@ -2019,6 +2017,8 @@ retry_open: goto try_fallback; } + bpf_counter__install_pe(evsel, cpu, fd); + if (unlikely(test_attr__enabled)) { test_attr__open(&evsel->core.attr, pid, cpus->map[cpu], fd, group_fd, evsel->open_flags); -- cgit v1.2.3 From 28667a526980bd713ccd55e955e9a9a9c9bc7724 Mon Sep 17 00:00:00 2001 From: Riccardo Mancini Date: Sat, 21 Aug 2021 11:19:30 +0200 Subject: perf evsel: Handle precise_ip fallback in evsel__open_cpu() This is another patch in the effort to separate the fallback mechanisms from the open itself. In case of precise_ip fallback, the original precise_ip will be stored in the evsel (it was stored in a local variable) and the open will be retried. Since the precise_ip fallback will be the first in the chain of fallbacks, there should be no functional change with this patch. Signed-off-by: Riccardo Mancini Cc: Ian Rogers Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/74208c433d2024a6c4af9c0b140b54ed6b5ea810.1629490974.git.rickyman7@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 59 ++++++++++++++++++++++--------------------------- tools/perf/util/evsel.h | 2 ++ 2 files changed, 28 insertions(+), 33 deletions(-) (limited to 'tools/perf/util/evsel.c') diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index a576d985a7a7..54d251327b5b 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1709,42 +1709,29 @@ static void display_attr(struct perf_event_attr *attr) } } -static int perf_event_open(struct evsel *evsel, - pid_t pid, int cpu, int group_fd) +bool evsel__precise_ip_fallback(struct evsel *evsel) { - int precise_ip = evsel->core.attr.precise_ip; - int fd; - - while (1) { - pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx", - pid, cpu, group_fd, evsel->open_flags); - - fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, group_fd, evsel->open_flags); - if (fd >= 0) - break; - - /* Do not try less precise if not requested. */ - if (!evsel->precise_max) - break; - - /* - * We tried all the precise_ip values, and it's - * still failing, so leave it to standard fallback. - */ - if (!evsel->core.attr.precise_ip) { - evsel->core.attr.precise_ip = precise_ip; - break; - } + /* Do not try less precise if not requested. */ + if (!evsel->precise_max) + return false; - pr_debug2_peo("\nsys_perf_event_open failed, error %d\n", -ENOTSUP); - evsel->core.attr.precise_ip--; - pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip); - display_attr(&evsel->core.attr); + /* + * We tried all the precise_ip values, and it's + * still failing, so leave it to standard fallback. + */ + if (!evsel->core.attr.precise_ip) { + evsel->core.attr.precise_ip = evsel->precise_ip_original; + return false; } - return fd; -} + if (!evsel->precise_ip_original) + evsel->precise_ip_original = evsel->core.attr.precise_ip; + evsel->core.attr.precise_ip--; + pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip); + display_attr(&evsel->core.attr); + return true; +} static struct perf_cpu_map *empty_cpu_map; static struct perf_thread_map *empty_thread_map; @@ -2004,8 +1991,11 @@ retry_open: test_attr__ready(); - fd = perf_event_open(evsel, pid, cpus->map[cpu], - group_fd); + pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx", + pid, cpus->map[cpu], group_fd, evsel->open_flags); + + fd = sys_perf_event_open(&evsel->core.attr, pid, cpus->map[cpu], + group_fd, evsel->open_flags); FD(evsel, cpu, thread) = fd; @@ -2058,6 +2048,9 @@ retry_open: return 0; try_fallback: + if (evsel__precise_ip_fallback(evsel)) + goto retry_open; + if (evsel__ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) { /* We just removed 1 thread, so lower the upper nthreads limit. */ nthreads--; diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index b749bee2c7a6..1b3eeab5f188 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -151,6 +151,7 @@ struct evsel { struct bperf_follower_bpf *follower_skel; }; unsigned long open_flags; + int precise_ip_original; }; struct perf_missing_features { @@ -298,6 +299,7 @@ bool evsel__ignore_missing_thread(struct evsel *evsel, int nr_cpus, int cpu, struct perf_thread_map *threads, int thread, int err); +bool evsel__precise_ip_fallback(struct evsel *evsel); struct perf_sample; -- cgit v1.2.3 From a7d212fc6c89d1619b9441f4c801cbff8ca34197 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 9 Sep 2021 15:55:07 +0300 Subject: perf tools: Factor out copy_config_terms() and free_config_terms() Factor out copy_config_terms() and free_config_terms() so that they can be reused. Signed-off-by: Adrian Hunter Acked-by: Jiri Olsa Cc: Jin Yao Cc: Kan Liang Link: https //lore.kernel.org/r/20210909125508.28693-2-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 20 +++++++++++++++----- tools/perf/util/evsel.h | 3 +++ tools/perf/util/parse-events.c | 9 +-------- 3 files changed, 19 insertions(+), 13 deletions(-) (limited to 'tools/perf/util/evsel.c') diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 54d251327b5b..dbfeceb2546c 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -333,11 +333,11 @@ error_free: goto out; } -static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src) +int copy_config_terms(struct list_head *dst, struct list_head *src) { struct evsel_config_term *pos, *tmp; - list_for_each_entry(pos, &src->config_terms, list) { + list_for_each_entry(pos, src, list) { tmp = malloc(sizeof(*tmp)); if (tmp == NULL) return -ENOMEM; @@ -350,11 +350,16 @@ static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src) return -ENOMEM; } } - list_add_tail(&tmp->list, &dst->config_terms); + list_add_tail(&tmp->list, dst); } return 0; } +static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src) +{ + return copy_config_terms(&dst->config_terms, &src->config_terms); +} + /** * evsel__clone - create a new evsel copied from @orig * @orig: original evsel @@ -1385,11 +1390,11 @@ int evsel__disable(struct evsel *evsel) return err; } -static void evsel__free_config_terms(struct evsel *evsel) +void free_config_terms(struct list_head *config_terms) { struct evsel_config_term *term, *h; - list_for_each_entry_safe(term, h, &evsel->config_terms, list) { + list_for_each_entry_safe(term, h, config_terms, list) { list_del_init(&term->list); if (term->free_str) zfree(&term->val.str); @@ -1397,6 +1402,11 @@ static void evsel__free_config_terms(struct evsel *evsel) } } +static void evsel__free_config_terms(struct evsel *evsel) +{ + free_config_terms(&evsel->config_terms); +} + void evsel__exit(struct evsel *evsel) { assert(list_empty(&evsel->core.node)); diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 1b3eeab5f188..1f7edfa8568a 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -213,6 +213,9 @@ static inline struct evsel *evsel__new(struct perf_event_attr *attr) struct evsel *evsel__clone(struct evsel *orig); struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx); +int copy_config_terms(struct list_head *dst, struct list_head *src); +void free_config_terms(struct list_head *config_terms); + /* * Returns pointer with encoded error via interface. */ diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index e5eae23cfceb..ded5808798f9 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1608,14 +1608,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, } if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) { - struct evsel_config_term *pos, *tmp; - - list_for_each_entry_safe(pos, tmp, &config_terms, list) { - list_del_init(&pos->list); - if (pos->free_str) - zfree(&pos->val.str); - free(pos); - } + free_config_terms(&config_terms); return -EINVAL; } -- cgit v1.2.3