0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/slab.h>
0011 #include <linux/spinlock.h>
0012 #include <asm/smp.h>
0013 #include <linux/uaccess.h>
0014 #include <linux/debugfs.h>
0015 #include <asm/firmware.h>
0016 #include <asm/dtl.h>
0017 #include <asm/lppaca.h>
0018 #include <asm/plpar_wrappers.h>
0019 #include <asm/machdep.h>
0020
0021 struct dtl {
0022 struct dtl_entry *buf;
0023 int cpu;
0024 int buf_entries;
0025 u64 last_idx;
0026 spinlock_t lock;
0027 };
0028 static DEFINE_PER_CPU(struct dtl, cpu_dtl);
0029
0030 static u8 dtl_event_mask = DTL_LOG_ALL;
0031
0032
0033
0034
0035
0036
0037 static int dtl_buf_entries = N_DISPATCH_LOG;
0038
0039 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
0040 struct dtl_ring {
0041 u64 write_index;
0042 struct dtl_entry *write_ptr;
0043 struct dtl_entry *buf;
0044 struct dtl_entry *buf_end;
0045 };
0046
0047 static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
0048
0049 static atomic_t dtl_count;
0050
0051
0052
0053
0054
0055 static void consume_dtle(struct dtl_entry *dtle, u64 index)
0056 {
0057 struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
0058 struct dtl_entry *wp = dtlr->write_ptr;
0059 struct lppaca *vpa = local_paca->lppaca_ptr;
0060
0061 if (!wp)
0062 return;
0063
0064 *wp = *dtle;
0065 barrier();
0066
0067
0068 if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
0069 return;
0070
0071 ++wp;
0072 if (wp == dtlr->buf_end)
0073 wp = dtlr->buf;
0074 dtlr->write_ptr = wp;
0075
0076
0077 smp_wmb();
0078 ++dtlr->write_index;
0079 }
0080
0081 static int dtl_start(struct dtl *dtl)
0082 {
0083 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
0084
0085 dtlr->buf = dtl->buf;
0086 dtlr->buf_end = dtl->buf + dtl->buf_entries;
0087 dtlr->write_index = 0;
0088
0089
0090 smp_wmb();
0091 dtlr->write_ptr = dtl->buf;
0092
0093
0094 lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
0095
0096 dtl_consumer = consume_dtle;
0097 atomic_inc(&dtl_count);
0098 return 0;
0099 }
0100
0101 static void dtl_stop(struct dtl *dtl)
0102 {
0103 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
0104
0105 dtlr->write_ptr = NULL;
0106 smp_wmb();
0107
0108 dtlr->buf = NULL;
0109
0110
0111 lppaca_of(dtl->cpu).dtl_enable_mask = DTL_LOG_PREEMPT;
0112
0113 if (atomic_dec_and_test(&dtl_count))
0114 dtl_consumer = NULL;
0115 }
0116
0117 static u64 dtl_current_index(struct dtl *dtl)
0118 {
0119 return per_cpu(dtl_rings, dtl->cpu).write_index;
0120 }
0121
0122 #else
0123
0124 static int dtl_start(struct dtl *dtl)
0125 {
0126 unsigned long addr;
0127 int ret, hwcpu;
0128
0129
0130
0131 ((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
0132
0133 hwcpu = get_hard_smp_processor_id(dtl->cpu);
0134 addr = __pa(dtl->buf);
0135 ret = register_dtl(hwcpu, addr);
0136 if (ret) {
0137 printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
0138 "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
0139 return -EIO;
0140 }
0141
0142
0143 lppaca_of(dtl->cpu).dtl_idx = 0;
0144
0145
0146
0147 smp_wmb();
0148
0149
0150 lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
0151
0152 return 0;
0153 }
0154
0155 static void dtl_stop(struct dtl *dtl)
0156 {
0157 int hwcpu = get_hard_smp_processor_id(dtl->cpu);
0158
0159 lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
0160
0161 unregister_dtl(hwcpu);
0162 }
0163
0164 static u64 dtl_current_index(struct dtl *dtl)
0165 {
0166 return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
0167 }
0168 #endif
0169
0170 static int dtl_enable(struct dtl *dtl)
0171 {
0172 long int n_entries;
0173 long int rc;
0174 struct dtl_entry *buf = NULL;
0175
0176 if (!dtl_cache)
0177 return -ENOMEM;
0178
0179
0180 if (dtl->buf)
0181 return -EBUSY;
0182
0183
0184 if (!read_trylock(&dtl_access_lock))
0185 return -EBUSY;
0186
0187 n_entries = dtl_buf_entries;
0188 buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
0189 if (!buf) {
0190 printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
0191 __func__, dtl->cpu);
0192 read_unlock(&dtl_access_lock);
0193 return -ENOMEM;
0194 }
0195
0196 spin_lock(&dtl->lock);
0197 rc = -EBUSY;
0198 if (!dtl->buf) {
0199
0200 dtl->buf_entries = n_entries;
0201 dtl->buf = buf;
0202 dtl->last_idx = 0;
0203 rc = dtl_start(dtl);
0204 if (rc)
0205 dtl->buf = NULL;
0206 }
0207 spin_unlock(&dtl->lock);
0208
0209 if (rc) {
0210 read_unlock(&dtl_access_lock);
0211 kmem_cache_free(dtl_cache, buf);
0212 }
0213
0214 return rc;
0215 }
0216
0217 static void dtl_disable(struct dtl *dtl)
0218 {
0219 spin_lock(&dtl->lock);
0220 dtl_stop(dtl);
0221 kmem_cache_free(dtl_cache, dtl->buf);
0222 dtl->buf = NULL;
0223 dtl->buf_entries = 0;
0224 spin_unlock(&dtl->lock);
0225 read_unlock(&dtl_access_lock);
0226 }
0227
0228
0229
0230 static int dtl_file_open(struct inode *inode, struct file *filp)
0231 {
0232 struct dtl *dtl = inode->i_private;
0233 int rc;
0234
0235 rc = dtl_enable(dtl);
0236 if (rc)
0237 return rc;
0238
0239 filp->private_data = dtl;
0240 return 0;
0241 }
0242
0243 static int dtl_file_release(struct inode *inode, struct file *filp)
0244 {
0245 struct dtl *dtl = inode->i_private;
0246 dtl_disable(dtl);
0247 return 0;
0248 }
0249
0250 static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
0251 loff_t *pos)
0252 {
0253 long int rc, n_read, n_req, read_size;
0254 struct dtl *dtl;
0255 u64 cur_idx, last_idx, i;
0256
0257 if ((len % sizeof(struct dtl_entry)) != 0)
0258 return -EINVAL;
0259
0260 dtl = filp->private_data;
0261
0262
0263 n_req = len / sizeof(struct dtl_entry);
0264
0265
0266 n_read = 0;
0267
0268 spin_lock(&dtl->lock);
0269
0270 cur_idx = dtl_current_index(dtl);
0271 last_idx = dtl->last_idx;
0272
0273 if (last_idx + dtl->buf_entries <= cur_idx)
0274 last_idx = cur_idx - dtl->buf_entries + 1;
0275
0276 if (last_idx + n_req > cur_idx)
0277 n_req = cur_idx - last_idx;
0278
0279 if (n_req > 0)
0280 dtl->last_idx = last_idx + n_req;
0281
0282 spin_unlock(&dtl->lock);
0283
0284 if (n_req <= 0)
0285 return 0;
0286
0287 i = last_idx % dtl->buf_entries;
0288
0289
0290 if (i + n_req > dtl->buf_entries) {
0291 read_size = dtl->buf_entries - i;
0292
0293 rc = copy_to_user(buf, &dtl->buf[i],
0294 read_size * sizeof(struct dtl_entry));
0295 if (rc)
0296 return -EFAULT;
0297
0298 i = 0;
0299 n_req -= read_size;
0300 n_read += read_size;
0301 buf += read_size * sizeof(struct dtl_entry);
0302 }
0303
0304
0305 rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry));
0306 if (rc)
0307 return -EFAULT;
0308
0309 n_read += n_req;
0310
0311 return n_read * sizeof(struct dtl_entry);
0312 }
0313
0314 static const struct file_operations dtl_fops = {
0315 .open = dtl_file_open,
0316 .release = dtl_file_release,
0317 .read = dtl_file_read,
0318 .llseek = no_llseek,
0319 };
0320
0321 static struct dentry *dtl_dir;
0322
0323 static void dtl_setup_file(struct dtl *dtl)
0324 {
0325 char name[10];
0326
0327 sprintf(name, "cpu-%d", dtl->cpu);
0328
0329 debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);
0330 }
0331
0332 static int dtl_init(void)
0333 {
0334 int i;
0335
0336 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
0337 return -ENODEV;
0338
0339
0340
0341 dtl_dir = debugfs_create_dir("dtl", arch_debugfs_dir);
0342
0343 debugfs_create_x8("dtl_event_mask", 0600, dtl_dir, &dtl_event_mask);
0344 debugfs_create_u32("dtl_buf_entries", 0400, dtl_dir, &dtl_buf_entries);
0345
0346
0347 for_each_possible_cpu(i) {
0348 struct dtl *dtl = &per_cpu(cpu_dtl, i);
0349 spin_lock_init(&dtl->lock);
0350 dtl->cpu = i;
0351
0352 dtl_setup_file(dtl);
0353 }
0354
0355 return 0;
0356 }
0357 machine_arch_initcall(pseries, dtl_init);