![]() |
|
|||
0001 // SPDX-License-Identifier: GPL-2.0 0002 /* 0003 * tracing clocks 0004 * 0005 * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 0006 * 0007 * Implements 3 trace clock variants, with differing scalability/precision 0008 * tradeoffs: 0009 * 0010 * - local: CPU-local trace clock 0011 * - medium: scalable global clock with some jitter 0012 * - global: globally monotonic, serialized clock 0013 * 0014 * Tracer plugins will chose a default from these clocks. 0015 */ 0016 #include <linux/spinlock.h> 0017 #include <linux/irqflags.h> 0018 #include <linux/hardirq.h> 0019 #include <linux/module.h> 0020 #include <linux/percpu.h> 0021 #include <linux/sched.h> 0022 #include <linux/sched/clock.h> 0023 #include <linux/ktime.h> 0024 #include <linux/trace_clock.h> 0025 0026 /* 0027 * trace_clock_local(): the simplest and least coherent tracing clock. 0028 * 0029 * Useful for tracing that does not cross to other CPUs nor 0030 * does it go through idle events. 0031 */ 0032 u64 notrace trace_clock_local(void) 0033 { 0034 u64 clock; 0035 0036 /* 0037 * sched_clock() is an architecture implemented, fast, scalable, 0038 * lockless clock. It is not guaranteed to be coherent across 0039 * CPUs, nor across CPU idle events. 0040 */ 0041 preempt_disable_notrace(); 0042 clock = sched_clock(); 0043 preempt_enable_notrace(); 0044 0045 return clock; 0046 } 0047 EXPORT_SYMBOL_GPL(trace_clock_local); 0048 0049 /* 0050 * trace_clock(): 'between' trace clock. Not completely serialized, 0051 * but not completely incorrect when crossing CPUs either. 0052 * 0053 * This is based on cpu_clock(), which will allow at most ~1 jiffy of 0054 * jitter between CPUs. So it's a pretty scalable clock, but there 0055 * can be offsets in the trace data. 0056 */ 0057 u64 notrace trace_clock(void) 0058 { 0059 return local_clock(); 0060 } 0061 EXPORT_SYMBOL_GPL(trace_clock); 0062 0063 /* 0064 * trace_jiffy_clock(): Simply use jiffies as a clock counter. 0065 * Note that this use of jiffies_64 is not completely safe on 0066 * 32-bit systems. But the window is tiny, and the effect if 0067 * we are affected is that we will have an obviously bogus 0068 * timestamp on a trace event - i.e. not life threatening. 0069 */ 0070 u64 notrace trace_clock_jiffies(void) 0071 { 0072 return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES); 0073 } 0074 EXPORT_SYMBOL_GPL(trace_clock_jiffies); 0075 0076 /* 0077 * trace_clock_global(): special globally coherent trace clock 0078 * 0079 * It has higher overhead than the other trace clocks but is still 0080 * an order of magnitude faster than GTOD derived hardware clocks. 0081 * 0082 * Used by plugins that need globally coherent timestamps. 0083 */ 0084 0085 /* keep prev_time and lock in the same cacheline. */ 0086 static struct { 0087 u64 prev_time; 0088 arch_spinlock_t lock; 0089 } trace_clock_struct ____cacheline_aligned_in_smp = 0090 { 0091 .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, 0092 }; 0093 0094 u64 notrace trace_clock_global(void) 0095 { 0096 unsigned long flags; 0097 int this_cpu; 0098 u64 now, prev_time; 0099 0100 raw_local_irq_save(flags); 0101 0102 this_cpu = raw_smp_processor_id(); 0103 0104 /* 0105 * The global clock "guarantees" that the events are ordered 0106 * between CPUs. But if two events on two different CPUS call 0107 * trace_clock_global at roughly the same time, it really does 0108 * not matter which one gets the earlier time. Just make sure 0109 * that the same CPU will always show a monotonic clock. 0110 * 0111 * Use a read memory barrier to get the latest written 0112 * time that was recorded. 0113 */ 0114 smp_rmb(); 0115 prev_time = READ_ONCE(trace_clock_struct.prev_time); 0116 now = sched_clock_cpu(this_cpu); 0117 0118 /* Make sure that now is always greater than or equal to prev_time */ 0119 if ((s64)(now - prev_time) < 0) 0120 now = prev_time; 0121 0122 /* 0123 * If in an NMI context then dont risk lockups and simply return 0124 * the current time. 0125 */ 0126 if (unlikely(in_nmi())) 0127 goto out; 0128 0129 /* Tracing can cause strange recursion, always use a try lock */ 0130 if (arch_spin_trylock(&trace_clock_struct.lock)) { 0131 /* Reread prev_time in case it was already updated */ 0132 prev_time = READ_ONCE(trace_clock_struct.prev_time); 0133 if ((s64)(now - prev_time) < 0) 0134 now = prev_time; 0135 0136 trace_clock_struct.prev_time = now; 0137 0138 /* The unlock acts as the wmb for the above rmb */ 0139 arch_spin_unlock(&trace_clock_struct.lock); 0140 } 0141 out: 0142 raw_local_irq_restore(flags); 0143 0144 return now; 0145 } 0146 EXPORT_SYMBOL_GPL(trace_clock_global); 0147 0148 static atomic64_t trace_counter; 0149 0150 /* 0151 * trace_clock_counter(): simply an atomic counter. 0152 * Use the trace_counter "counter" for cases where you do not care 0153 * about timings, but are interested in strict ordering. 0154 */ 0155 u64 notrace trace_clock_counter(void) 0156 { 0157 return atomic64_add_return(1, &trace_counter); 0158 }
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |