Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <errno.h>
0003 #include <inttypes.h>
0004 #include <linux/list.h>
0005 #include <linux/compiler.h>
0006 #include <linux/string.h>
0007 #include "ordered-events.h"
0008 #include "session.h"
0009 #include "asm/bug.h"
0010 #include "debug.h"
0011 #include "ui/progress.h"
0012 
0013 #define pr_N(n, fmt, ...) \
0014     eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
0015 
0016 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
0017 
0018 static void queue_event(struct ordered_events *oe, struct ordered_event *new)
0019 {
0020     struct ordered_event *last = oe->last;
0021     u64 timestamp = new->timestamp;
0022     struct list_head *p;
0023 
0024     ++oe->nr_events;
0025     oe->last = new;
0026 
0027     pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
0028 
0029     if (!last) {
0030         list_add(&new->list, &oe->events);
0031         oe->max_timestamp = timestamp;
0032         return;
0033     }
0034 
0035     /*
0036      * last event might point to some random place in the list as it's
0037      * the last queued event. We expect that the new event is close to
0038      * this.
0039      */
0040     if (last->timestamp <= timestamp) {
0041         while (last->timestamp <= timestamp) {
0042             p = last->list.next;
0043             if (p == &oe->events) {
0044                 list_add_tail(&new->list, &oe->events);
0045                 oe->max_timestamp = timestamp;
0046                 return;
0047             }
0048             last = list_entry(p, struct ordered_event, list);
0049         }
0050         list_add_tail(&new->list, &last->list);
0051     } else {
0052         while (last->timestamp > timestamp) {
0053             p = last->list.prev;
0054             if (p == &oe->events) {
0055                 list_add(&new->list, &oe->events);
0056                 return;
0057             }
0058             last = list_entry(p, struct ordered_event, list);
0059         }
0060         list_add(&new->list, &last->list);
0061     }
0062 }
0063 
0064 static union perf_event *__dup_event(struct ordered_events *oe,
0065                      union perf_event *event)
0066 {
0067     union perf_event *new_event = NULL;
0068 
0069     if (oe->cur_alloc_size < oe->max_alloc_size) {
0070         new_event = memdup(event, event->header.size);
0071         if (new_event)
0072             oe->cur_alloc_size += event->header.size;
0073     }
0074 
0075     return new_event;
0076 }
0077 
0078 static union perf_event *dup_event(struct ordered_events *oe,
0079                    union perf_event *event)
0080 {
0081     return oe->copy_on_queue ? __dup_event(oe, event) : event;
0082 }
0083 
0084 static void __free_dup_event(struct ordered_events *oe, union perf_event *event)
0085 {
0086     if (event) {
0087         oe->cur_alloc_size -= event->header.size;
0088         free(event);
0089     }
0090 }
0091 
0092 static void free_dup_event(struct ordered_events *oe, union perf_event *event)
0093 {
0094     if (oe->copy_on_queue)
0095         __free_dup_event(oe, event);
0096 }
0097 
0098 #define MAX_SAMPLE_BUFFER   (64 * 1024 / sizeof(struct ordered_event))
0099 static struct ordered_event *alloc_event(struct ordered_events *oe,
0100                      union perf_event *event)
0101 {
0102     struct list_head *cache = &oe->cache;
0103     struct ordered_event *new = NULL;
0104     union perf_event *new_event;
0105     size_t size;
0106 
0107     new_event = dup_event(oe, event);
0108     if (!new_event)
0109         return NULL;
0110 
0111     /*
0112      * We maintain the following scheme of buffers for ordered
0113      * event allocation:
0114      *
0115      *   to_free list -> buffer1 (64K)
0116      *                   buffer2 (64K)
0117      *                   ...
0118      *
0119      * Each buffer keeps an array of ordered events objects:
0120      *    buffer -> event[0]
0121      *              event[1]
0122      *              ...
0123      *
0124      * Each allocated ordered event is linked to one of
0125      * following lists:
0126      *   - time ordered list 'events'
0127      *   - list of currently removed events 'cache'
0128      *
0129      * Allocation of the ordered event uses the following order
0130      * to get the memory:
0131      *   - use recently removed object from 'cache' list
0132      *   - use available object in current allocation buffer
0133      *   - allocate new buffer if the current buffer is full
0134      *
0135      * Removal of ordered event object moves it from events to
0136      * the cache list.
0137      */
0138     size = sizeof(*oe->buffer) + MAX_SAMPLE_BUFFER * sizeof(*new);
0139 
0140     if (!list_empty(cache)) {
0141         new = list_entry(cache->next, struct ordered_event, list);
0142         list_del_init(&new->list);
0143     } else if (oe->buffer) {
0144         new = &oe->buffer->event[oe->buffer_idx];
0145         if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
0146             oe->buffer = NULL;
0147     } else if ((oe->cur_alloc_size + size) < oe->max_alloc_size) {
0148         oe->buffer = malloc(size);
0149         if (!oe->buffer) {
0150             free_dup_event(oe, new_event);
0151             return NULL;
0152         }
0153 
0154         pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
0155            oe->cur_alloc_size, size, oe->max_alloc_size);
0156 
0157         oe->cur_alloc_size += size;
0158         list_add(&oe->buffer->list, &oe->to_free);
0159 
0160         oe->buffer_idx = 1;
0161         new = &oe->buffer->event[0];
0162     } else {
0163         pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
0164         return NULL;
0165     }
0166 
0167     new->event = new_event;
0168     return new;
0169 }
0170 
0171 static struct ordered_event *
0172 ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
0173             union perf_event *event)
0174 {
0175     struct ordered_event *new;
0176 
0177     new = alloc_event(oe, event);
0178     if (new) {
0179         new->timestamp = timestamp;
0180         queue_event(oe, new);
0181     }
0182 
0183     return new;
0184 }
0185 
0186 void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
0187 {
0188     list_move(&event->list, &oe->cache);
0189     oe->nr_events--;
0190     free_dup_event(oe, event->event);
0191     event->event = NULL;
0192 }
0193 
0194 int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
0195               u64 timestamp, u64 file_offset, const char *file_path)
0196 {
0197     struct ordered_event *oevent;
0198 
0199     if (!timestamp || timestamp == ~0ULL)
0200         return -ETIME;
0201 
0202     if (timestamp < oe->last_flush) {
0203         pr_oe_time(timestamp,      "out of order event\n");
0204         pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
0205                oe->last_flush_type);
0206 
0207         oe->nr_unordered_events++;
0208     }
0209 
0210     oevent = ordered_events__new_event(oe, timestamp, event);
0211     if (!oevent) {
0212         ordered_events__flush(oe, OE_FLUSH__HALF);
0213         oevent = ordered_events__new_event(oe, timestamp, event);
0214     }
0215 
0216     if (!oevent)
0217         return -ENOMEM;
0218 
0219     oevent->file_offset = file_offset;
0220     oevent->file_path = file_path;
0221     return 0;
0222 }
0223 
0224 static int do_flush(struct ordered_events *oe, bool show_progress)
0225 {
0226     struct list_head *head = &oe->events;
0227     struct ordered_event *tmp, *iter;
0228     u64 limit = oe->next_flush;
0229     u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
0230     struct ui_progress prog;
0231     int ret;
0232 
0233     if (!limit)
0234         return 0;
0235 
0236     if (show_progress)
0237         ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
0238 
0239     list_for_each_entry_safe(iter, tmp, head, list) {
0240         if (session_done())
0241             return 0;
0242 
0243         if (iter->timestamp > limit)
0244             break;
0245         ret = oe->deliver(oe, iter);
0246         if (ret)
0247             return ret;
0248 
0249         ordered_events__delete(oe, iter);
0250         oe->last_flush = iter->timestamp;
0251 
0252         if (show_progress)
0253             ui_progress__update(&prog, 1);
0254     }
0255 
0256     if (list_empty(head))
0257         oe->last = NULL;
0258     else if (last_ts <= limit)
0259         oe->last = list_entry(head->prev, struct ordered_event, list);
0260 
0261     if (show_progress)
0262         ui_progress__finish();
0263 
0264     return 0;
0265 }
0266 
0267 static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
0268                    u64 timestamp)
0269 {
0270     static const char * const str[] = {
0271         "NONE",
0272         "FINAL",
0273         "ROUND",
0274         "HALF ",
0275         "TOP  ",
0276         "TIME ",
0277     };
0278     int err;
0279     bool show_progress = false;
0280 
0281     if (oe->nr_events == 0)
0282         return 0;
0283 
0284     switch (how) {
0285     case OE_FLUSH__FINAL:
0286         show_progress = true;
0287         __fallthrough;
0288     case OE_FLUSH__TOP:
0289         oe->next_flush = ULLONG_MAX;
0290         break;
0291 
0292     case OE_FLUSH__HALF:
0293     {
0294         struct ordered_event *first, *last;
0295         struct list_head *head = &oe->events;
0296 
0297         first = list_entry(head->next, struct ordered_event, list);
0298         last = oe->last;
0299 
0300         /* Warn if we are called before any event got allocated. */
0301         if (WARN_ONCE(!last || list_empty(head), "empty queue"))
0302             return 0;
0303 
0304         oe->next_flush  = first->timestamp;
0305         oe->next_flush += (last->timestamp - first->timestamp) / 2;
0306         break;
0307     }
0308 
0309     case OE_FLUSH__TIME:
0310         oe->next_flush = timestamp;
0311         show_progress = false;
0312         break;
0313 
0314     case OE_FLUSH__ROUND:
0315     case OE_FLUSH__NONE:
0316     default:
0317         break;
0318     }
0319 
0320     pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE  %s, nr_events %u\n",
0321            str[how], oe->nr_events);
0322     pr_oe_time(oe->max_timestamp, "max_timestamp\n");
0323 
0324     err = do_flush(oe, show_progress);
0325 
0326     if (!err) {
0327         if (how == OE_FLUSH__ROUND)
0328             oe->next_flush = oe->max_timestamp;
0329 
0330         oe->last_flush_type = how;
0331     }
0332 
0333     pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
0334            str[how], oe->nr_events);
0335     pr_oe_time(oe->last_flush, "last_flush\n");
0336 
0337     return err;
0338 }
0339 
0340 int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
0341 {
0342     return __ordered_events__flush(oe, how, 0);
0343 }
0344 
0345 int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp)
0346 {
0347     return __ordered_events__flush(oe, OE_FLUSH__TIME, timestamp);
0348 }
0349 
0350 u64 ordered_events__first_time(struct ordered_events *oe)
0351 {
0352     struct ordered_event *event;
0353 
0354     if (list_empty(&oe->events))
0355         return 0;
0356 
0357     event = list_first_entry(&oe->events, struct ordered_event, list);
0358     return event->timestamp;
0359 }
0360 
0361 void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
0362               void *data)
0363 {
0364     INIT_LIST_HEAD(&oe->events);
0365     INIT_LIST_HEAD(&oe->cache);
0366     INIT_LIST_HEAD(&oe->to_free);
0367     oe->max_alloc_size = (u64) -1;
0368     oe->cur_alloc_size = 0;
0369     oe->deliver    = deliver;
0370     oe->data       = data;
0371 }
0372 
0373 static void
0374 ordered_events_buffer__free(struct ordered_events_buffer *buffer,
0375                 unsigned int max, struct ordered_events *oe)
0376 {
0377     if (oe->copy_on_queue) {
0378         unsigned int i;
0379 
0380         for (i = 0; i < max; i++)
0381             __free_dup_event(oe, buffer->event[i].event);
0382     }
0383 
0384     free(buffer);
0385 }
0386 
0387 void ordered_events__free(struct ordered_events *oe)
0388 {
0389     struct ordered_events_buffer *buffer, *tmp;
0390 
0391     if (list_empty(&oe->to_free))
0392         return;
0393 
0394     /*
0395      * Current buffer might not have all the events allocated
0396      * yet, we need to free only allocated ones ...
0397      */
0398     if (oe->buffer) {
0399         list_del_init(&oe->buffer->list);
0400         ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
0401     }
0402 
0403     /* ... and continue with the rest */
0404     list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
0405         list_del_init(&buffer->list);
0406         ordered_events_buffer__free(buffer, MAX_SAMPLE_BUFFER, oe);
0407     }
0408 }
0409 
0410 void ordered_events__reinit(struct ordered_events *oe)
0411 {
0412     ordered_events__deliver_t old_deliver = oe->deliver;
0413 
0414     ordered_events__free(oe);
0415     memset(oe, '\0', sizeof(*oe));
0416     ordered_events__init(oe, old_deliver, oe->data);
0417 }