0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/interrupt.h>
0013 #include <linux/irqdomain.h>
0014 #include <linux/types.h>
0015 #include <linux/export.h>
0016 #include <asm/io.h>
0017 #include <asm/irq_regs.h>
0018 #include <asm/machdep.h>
0019 #include <asm/pmc.h>
0020 #include <asm/reg.h>
0021 #include <asm/spu.h>
0022 #include <asm/cell-regs.h>
0023
0024 #include "interrupt.h"
0025
0026
0027
0028
0029
0030
0031
0032 #define WRITE_WO_MMIO(reg, x) \
0033 do { \
0034 u32 _x = (x); \
0035 struct cbe_pmd_regs __iomem *pmd_regs; \
0036 struct cbe_pmd_shadow_regs *shadow_regs; \
0037 pmd_regs = cbe_get_cpu_pmd_regs(cpu); \
0038 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
0039 out_be64(&(pmd_regs->reg), (((u64)_x) << 32)); \
0040 shadow_regs->reg = _x; \
0041 } while (0)
0042
0043 #define READ_SHADOW_REG(val, reg) \
0044 do { \
0045 struct cbe_pmd_shadow_regs *shadow_regs; \
0046 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
0047 (val) = shadow_regs->reg; \
0048 } while (0)
0049
0050 #define READ_MMIO_UPPER32(val, reg) \
0051 do { \
0052 struct cbe_pmd_regs __iomem *pmd_regs; \
0053 pmd_regs = cbe_get_cpu_pmd_regs(cpu); \
0054 (val) = (u32)(in_be64(&pmd_regs->reg) >> 32); \
0055 } while (0)
0056
0057
0058
0059
0060
0061
0062 u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr)
0063 {
0064 u32 val_in_latch, val = 0;
0065
0066 if (phys_ctr < NR_PHYS_CTRS) {
0067 READ_SHADOW_REG(val_in_latch, counter_value_in_latch);
0068
0069
0070 if (val_in_latch & (1 << phys_ctr)) {
0071 READ_SHADOW_REG(val, pm_ctr[phys_ctr]);
0072 } else {
0073 READ_MMIO_UPPER32(val, pm_ctr[phys_ctr]);
0074 }
0075 }
0076
0077 return val;
0078 }
0079 EXPORT_SYMBOL_GPL(cbe_read_phys_ctr);
0080
0081 void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val)
0082 {
0083 struct cbe_pmd_shadow_regs *shadow_regs;
0084 u32 pm_ctrl;
0085
0086 if (phys_ctr < NR_PHYS_CTRS) {
0087
0088
0089
0090
0091 WRITE_WO_MMIO(pm_ctr[phys_ctr], val);
0092
0093 pm_ctrl = cbe_read_pm(cpu, pm_control);
0094 if (pm_ctrl & CBE_PM_ENABLE_PERF_MON) {
0095
0096
0097
0098
0099 cbe_write_pm(cpu, pm_control, pm_ctrl);
0100 } else {
0101 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
0102 shadow_regs->counter_value_in_latch |= (1 << phys_ctr);
0103 }
0104 }
0105 }
0106 EXPORT_SYMBOL_GPL(cbe_write_phys_ctr);
0107
0108
0109
0110
0111
0112
0113
0114 u32 cbe_read_ctr(u32 cpu, u32 ctr)
0115 {
0116 u32 val;
0117 u32 phys_ctr = ctr & (NR_PHYS_CTRS - 1);
0118
0119 val = cbe_read_phys_ctr(cpu, phys_ctr);
0120
0121 if (cbe_get_ctr_size(cpu, phys_ctr) == 16)
0122 val = (ctr < NR_PHYS_CTRS) ? (val >> 16) : (val & 0xffff);
0123
0124 return val;
0125 }
0126 EXPORT_SYMBOL_GPL(cbe_read_ctr);
0127
0128 void cbe_write_ctr(u32 cpu, u32 ctr, u32 val)
0129 {
0130 u32 phys_ctr;
0131 u32 phys_val;
0132
0133 phys_ctr = ctr & (NR_PHYS_CTRS - 1);
0134
0135 if (cbe_get_ctr_size(cpu, phys_ctr) == 16) {
0136 phys_val = cbe_read_phys_ctr(cpu, phys_ctr);
0137
0138 if (ctr < NR_PHYS_CTRS)
0139 val = (val << 16) | (phys_val & 0xffff);
0140 else
0141 val = (val & 0xffff) | (phys_val & 0xffff0000);
0142 }
0143
0144 cbe_write_phys_ctr(cpu, phys_ctr, val);
0145 }
0146 EXPORT_SYMBOL_GPL(cbe_write_ctr);
0147
0148
0149
0150
0151
0152
0153 u32 cbe_read_pm07_control(u32 cpu, u32 ctr)
0154 {
0155 u32 pm07_control = 0;
0156
0157 if (ctr < NR_CTRS)
0158 READ_SHADOW_REG(pm07_control, pm07_control[ctr]);
0159
0160 return pm07_control;
0161 }
0162 EXPORT_SYMBOL_GPL(cbe_read_pm07_control);
0163
0164 void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val)
0165 {
0166 if (ctr < NR_CTRS)
0167 WRITE_WO_MMIO(pm07_control[ctr], val);
0168 }
0169 EXPORT_SYMBOL_GPL(cbe_write_pm07_control);
0170
0171
0172
0173
0174
0175 u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg)
0176 {
0177 u32 val = 0;
0178
0179 switch (reg) {
0180 case group_control:
0181 READ_SHADOW_REG(val, group_control);
0182 break;
0183
0184 case debug_bus_control:
0185 READ_SHADOW_REG(val, debug_bus_control);
0186 break;
0187
0188 case trace_address:
0189 READ_MMIO_UPPER32(val, trace_address);
0190 break;
0191
0192 case ext_tr_timer:
0193 READ_SHADOW_REG(val, ext_tr_timer);
0194 break;
0195
0196 case pm_status:
0197 READ_MMIO_UPPER32(val, pm_status);
0198 break;
0199
0200 case pm_control:
0201 READ_SHADOW_REG(val, pm_control);
0202 break;
0203
0204 case pm_interval:
0205 READ_MMIO_UPPER32(val, pm_interval);
0206 break;
0207
0208 case pm_start_stop:
0209 READ_SHADOW_REG(val, pm_start_stop);
0210 break;
0211 }
0212
0213 return val;
0214 }
0215 EXPORT_SYMBOL_GPL(cbe_read_pm);
0216
0217 void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
0218 {
0219 switch (reg) {
0220 case group_control:
0221 WRITE_WO_MMIO(group_control, val);
0222 break;
0223
0224 case debug_bus_control:
0225 WRITE_WO_MMIO(debug_bus_control, val);
0226 break;
0227
0228 case trace_address:
0229 WRITE_WO_MMIO(trace_address, val);
0230 break;
0231
0232 case ext_tr_timer:
0233 WRITE_WO_MMIO(ext_tr_timer, val);
0234 break;
0235
0236 case pm_status:
0237 WRITE_WO_MMIO(pm_status, val);
0238 break;
0239
0240 case pm_control:
0241 WRITE_WO_MMIO(pm_control, val);
0242 break;
0243
0244 case pm_interval:
0245 WRITE_WO_MMIO(pm_interval, val);
0246 break;
0247
0248 case pm_start_stop:
0249 WRITE_WO_MMIO(pm_start_stop, val);
0250 break;
0251 }
0252 }
0253 EXPORT_SYMBOL_GPL(cbe_write_pm);
0254
0255
0256
0257
0258
0259 u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr)
0260 {
0261 u32 pm_ctrl, size = 0;
0262
0263 if (phys_ctr < NR_PHYS_CTRS) {
0264 pm_ctrl = cbe_read_pm(cpu, pm_control);
0265 size = (pm_ctrl & CBE_PM_16BIT_CTR(phys_ctr)) ? 16 : 32;
0266 }
0267
0268 return size;
0269 }
0270 EXPORT_SYMBOL_GPL(cbe_get_ctr_size);
0271
0272 void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size)
0273 {
0274 u32 pm_ctrl;
0275
0276 if (phys_ctr < NR_PHYS_CTRS) {
0277 pm_ctrl = cbe_read_pm(cpu, pm_control);
0278 switch (ctr_size) {
0279 case 16:
0280 pm_ctrl |= CBE_PM_16BIT_CTR(phys_ctr);
0281 break;
0282
0283 case 32:
0284 pm_ctrl &= ~CBE_PM_16BIT_CTR(phys_ctr);
0285 break;
0286 }
0287 cbe_write_pm(cpu, pm_control, pm_ctrl);
0288 }
0289 }
0290 EXPORT_SYMBOL_GPL(cbe_set_ctr_size);
0291
0292
0293
0294
0295
0296
0297 void cbe_enable_pm(u32 cpu)
0298 {
0299 struct cbe_pmd_shadow_regs *shadow_regs;
0300 u32 pm_ctrl;
0301
0302 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
0303 shadow_regs->counter_value_in_latch = 0;
0304
0305 pm_ctrl = cbe_read_pm(cpu, pm_control) | CBE_PM_ENABLE_PERF_MON;
0306 cbe_write_pm(cpu, pm_control, pm_ctrl);
0307 }
0308 EXPORT_SYMBOL_GPL(cbe_enable_pm);
0309
0310 void cbe_disable_pm(u32 cpu)
0311 {
0312 u32 pm_ctrl;
0313 pm_ctrl = cbe_read_pm(cpu, pm_control) & ~CBE_PM_ENABLE_PERF_MON;
0314 cbe_write_pm(cpu, pm_control, pm_ctrl);
0315 }
0316 EXPORT_SYMBOL_GPL(cbe_disable_pm);
0317
0318
0319
0320
0321
0322
0323
0324 void cbe_read_trace_buffer(u32 cpu, u64 *buf)
0325 {
0326 struct cbe_pmd_regs __iomem *pmd_regs = cbe_get_cpu_pmd_regs(cpu);
0327
0328 *buf++ = in_be64(&pmd_regs->trace_buffer_0_63);
0329 *buf++ = in_be64(&pmd_regs->trace_buffer_64_127);
0330 }
0331 EXPORT_SYMBOL_GPL(cbe_read_trace_buffer);
0332
0333
0334
0335
0336
0337 u32 cbe_get_and_clear_pm_interrupts(u32 cpu)
0338 {
0339
0340 return cbe_read_pm(cpu, pm_status);
0341 }
0342 EXPORT_SYMBOL_GPL(cbe_get_and_clear_pm_interrupts);
0343
0344 void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
0345 {
0346
0347 iic_set_interrupt_routing(cpu, thread, 0);
0348
0349
0350 if (mask)
0351 cbe_write_pm(cpu, pm_status, mask);
0352 }
0353 EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts);
0354
0355 void cbe_disable_pm_interrupts(u32 cpu)
0356 {
0357 cbe_get_and_clear_pm_interrupts(cpu);
0358 cbe_write_pm(cpu, pm_status, 0);
0359 }
0360 EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts);
0361
0362 static irqreturn_t cbe_pm_irq(int irq, void *dev_id)
0363 {
0364 perf_irq(get_irq_regs());
0365 return IRQ_HANDLED;
0366 }
0367
0368 static int __init cbe_init_pm_irq(void)
0369 {
0370 unsigned int irq;
0371 int rc, node;
0372
0373 for_each_online_node(node) {
0374 irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
0375 (node << IIC_IRQ_NODE_SHIFT));
0376 if (!irq) {
0377 printk("ERROR: Unable to allocate irq for node %d\n",
0378 node);
0379 return -EINVAL;
0380 }
0381
0382 rc = request_irq(irq, cbe_pm_irq,
0383 0, "cbe-pmu-0", NULL);
0384 if (rc) {
0385 printk("ERROR: Request for irq on node %d failed\n",
0386 node);
0387 return rc;
0388 }
0389 }
0390
0391 return 0;
0392 }
0393 machine_arch_initcall(cell, cbe_init_pm_irq);
0394
0395 void cbe_sync_irq(int node)
0396 {
0397 unsigned int irq;
0398
0399 irq = irq_find_mapping(NULL,
0400 IIC_IRQ_IOEX_PMI
0401 | (node << IIC_IRQ_NODE_SHIFT));
0402
0403 if (!irq) {
0404 printk(KERN_WARNING "ERROR, unable to get existing irq %d " \
0405 "for node %d\n", irq, node);
0406 return;
0407 }
0408
0409 synchronize_irq(irq);
0410 }
0411 EXPORT_SYMBOL_GPL(cbe_sync_irq);
0412