1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
|
#include <linux/list.h>
#include "ordered-events.h"
#include "evlist.h"
#include "session.h"
#include "asm/bug.h"
#include "debug.h"
static void queue_event(struct ordered_events *oe, struct ordered_event *new)
{
struct ordered_event *last = oe->last;
u64 timestamp = new->timestamp;
struct list_head *p;
++oe->nr_events;
oe->last = new;
if (!last) {
list_add(&new->list, &oe->events);
oe->max_timestamp = timestamp;
return;
}
/*
* last event might point to some random place in the list as it's
* the last queued event. We expect that the new event is close to
* this.
*/
if (last->timestamp <= timestamp) {
while (last->timestamp <= timestamp) {
p = last->list.next;
if (p == &oe->events) {
list_add_tail(&new->list, &oe->events);
oe->max_timestamp = timestamp;
return;
}
last = list_entry(p, struct ordered_event, list);
}
list_add_tail(&new->list, &last->list);
} else {
while (last->timestamp > timestamp) {
p = last->list.prev;
if (p == &oe->events) {
list_add(&new->list, &oe->events);
return;
}
last = list_entry(p, struct ordered_event, list);
}
list_add(&new->list, &last->list);
}
}
#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
static struct ordered_event *alloc_event(struct ordered_events *oe)
{
struct list_head *cache = &oe->cache;
struct ordered_event *new = NULL;
if (!list_empty(cache)) {
new = list_entry(cache->next, struct ordered_event, list);
list_del(&new->list);
} else if (oe->buffer) {
new = oe->buffer + oe->buffer_idx;
if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
oe->buffer = NULL;
} else if (oe->cur_alloc_size < oe->max_alloc_size) {
size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
oe->buffer = malloc(size);
if (!oe->buffer)
return NULL;
oe->cur_alloc_size += size;
list_add(&oe->buffer->list, &oe->to_free);
/* First entry is abused to maintain the to_free list. */
oe->buffer_idx = 2;
new = oe->buffer + 1;
}
return new;
}
struct ordered_event *
ordered_events__new(struct ordered_events *oe, u64 timestamp)
{
struct ordered_event *new;
new = alloc_event(oe);
if (new) {
new->timestamp = timestamp;
queue_event(oe, new);
}
return new;
}
void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
{
list_move(&event->list, &oe->cache);
oe->nr_events--;
}
static int __ordered_events__flush(struct perf_session *s,
struct perf_tool *tool)
{
struct ordered_events *oe = &s->ordered_events;
struct list_head *head = &oe->events;
struct ordered_event *tmp, *iter;
struct perf_sample sample;
u64 limit = oe->next_flush;
u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
bool show_progress = limit == ULLONG_MAX;
struct ui_progress prog;
int ret;
if (!tool->ordered_events || !limit)
return 0;
if (show_progress)
ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
list_for_each_entry_safe(iter, tmp, head, list) {
if (session_done())
return 0;
if (iter->timestamp > limit)
break;
ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
if (ret)
pr_err("Can't parse sample, err = %d\n", ret);
else {
ret = perf_session__deliver_event(s, iter->event, &sample, tool,
iter->file_offset);
if (ret)
return ret;
}
ordered_events__delete(oe, iter);
oe->last_flush = iter->timestamp;
if (show_progress)
ui_progress__update(&prog, 1);
}
if (list_empty(head))
oe->last = NULL;
else if (last_ts <= limit)
oe->last = list_entry(head->prev, struct ordered_event, list);
return 0;
}
int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
enum oe_flush how)
{
struct ordered_events *oe = &s->ordered_events;
int err;
switch (how) {
case OE_FLUSH__FINAL:
oe->next_flush = ULLONG_MAX;
break;
case OE_FLUSH__HALF:
{
struct ordered_event *first, *last;
struct list_head *head = &oe->events;
first = list_entry(head->next, struct ordered_event, list);
last = oe->last;
/* Warn if we are called before any event got allocated. */
if (WARN_ONCE(!last || list_empty(head), "empty queue"))
return 0;
oe->next_flush = first->timestamp;
oe->next_flush += (last->timestamp - first->timestamp) / 2;
break;
}
case OE_FLUSH__ROUND:
default:
break;
};
err = __ordered_events__flush(s, tool);
if (!err) {
if (how == OE_FLUSH__ROUND)
oe->next_flush = oe->max_timestamp;
}
return err;
}
void ordered_events__init(struct ordered_events *oe)
{
INIT_LIST_HEAD(&oe->events);
INIT_LIST_HEAD(&oe->cache);
INIT_LIST_HEAD(&oe->to_free);
oe->max_alloc_size = (u64) -1;
oe->cur_alloc_size = 0;
}
void ordered_events__free(struct ordered_events *oe)
{
while (!list_empty(&oe->to_free)) {
struct ordered_event *event;
event = list_entry(oe->to_free.next, struct ordered_event, list);
list_del(&event->list);
free(event);
}
}
|