0001
0002
0003
0004
0005
0006 #include <linux/interrupt.h>
0007 #include <linux/irqdomain.h>
0008 #include <linux/workqueue.h>
0009 #include <linux/sched.h>
0010 #include <linux/wait.h>
0011 #include <linux/slab.h>
0012 #include <linux/pid.h>
0013 #include <asm/cputable.h>
0014 #include <misc/cxl-base.h>
0015
0016 #include "cxl.h"
0017 #include "trace.h"
0018
0019 static int afu_irq_range_start(void)
0020 {
0021 if (cpu_has_feature(CPU_FTR_HVMODE))
0022 return 1;
0023 return 0;
0024 }
0025
0026 static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar)
0027 {
0028 ctx->dsisr = dsisr;
0029 ctx->dar = dar;
0030 schedule_work(&ctx->fault_work);
0031 return IRQ_HANDLED;
0032 }
0033
0034 irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
0035 {
0036 u64 dsisr, dar;
0037
0038 dsisr = irq_info->dsisr;
0039 dar = irq_info->dar;
0040
0041 trace_cxl_psl9_irq(ctx, irq, dsisr, dar);
0042
0043 pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
0044
0045 if (dsisr & CXL_PSL9_DSISR_An_TF) {
0046 pr_devel("CXL interrupt: Scheduling translation fault handling for later (pe: %i)\n", ctx->pe);
0047 return schedule_cxl_fault(ctx, dsisr, dar);
0048 }
0049
0050 if (dsisr & CXL_PSL9_DSISR_An_PE)
0051 return cxl_ops->handle_psl_slice_error(ctx, dsisr,
0052 irq_info->errstat);
0053 if (dsisr & CXL_PSL9_DSISR_An_AE) {
0054 pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
0055
0056 if (ctx->pending_afu_err) {
0057
0058
0059
0060
0061
0062
0063
0064 dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error undelivered to pe %i: 0x%016llx\n",
0065 ctx->pe, irq_info->afu_err);
0066 } else {
0067 spin_lock(&ctx->lock);
0068 ctx->afu_err = irq_info->afu_err;
0069 ctx->pending_afu_err = 1;
0070 spin_unlock(&ctx->lock);
0071
0072 wake_up_all(&ctx->wq);
0073 }
0074
0075 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
0076 return IRQ_HANDLED;
0077 }
0078 if (dsisr & CXL_PSL9_DSISR_An_OC)
0079 pr_devel("CXL interrupt: OS Context Warning\n");
0080
0081 WARN(1, "Unhandled CXL PSL IRQ\n");
0082 return IRQ_HANDLED;
0083 }
0084
0085 irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
0086 {
0087 u64 dsisr, dar;
0088
0089 dsisr = irq_info->dsisr;
0090 dar = irq_info->dar;
0091
0092 trace_cxl_psl_irq(ctx, irq, dsisr, dar);
0093
0094 pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
0095
0096 if (dsisr & CXL_PSL_DSISR_An_DS) {
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107 pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe);
0108 return schedule_cxl_fault(ctx, dsisr, dar);
0109 }
0110
0111 if (dsisr & CXL_PSL_DSISR_An_M)
0112 pr_devel("CXL interrupt: PTE not found\n");
0113 if (dsisr & CXL_PSL_DSISR_An_P)
0114 pr_devel("CXL interrupt: Storage protection violation\n");
0115 if (dsisr & CXL_PSL_DSISR_An_A)
0116 pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n");
0117 if (dsisr & CXL_PSL_DSISR_An_S)
0118 pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n");
0119 if (dsisr & CXL_PSL_DSISR_An_K)
0120 pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n");
0121
0122 if (dsisr & CXL_PSL_DSISR_An_DM) {
0123
0124
0125
0126
0127
0128 pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe);
0129 return schedule_cxl_fault(ctx, dsisr, dar);
0130 }
0131 if (dsisr & CXL_PSL_DSISR_An_ST)
0132 WARN(1, "CXL interrupt: Segment Table PTE not found\n");
0133 if (dsisr & CXL_PSL_DSISR_An_UR)
0134 pr_devel("CXL interrupt: AURP PTE not found\n");
0135 if (dsisr & CXL_PSL_DSISR_An_PE)
0136 return cxl_ops->handle_psl_slice_error(ctx, dsisr,
0137 irq_info->errstat);
0138 if (dsisr & CXL_PSL_DSISR_An_AE) {
0139 pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
0140
0141 if (ctx->pending_afu_err) {
0142
0143
0144
0145
0146
0147
0148
0149 dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
0150 "undelivered to pe %i: 0x%016llx\n",
0151 ctx->pe, irq_info->afu_err);
0152 } else {
0153 spin_lock(&ctx->lock);
0154 ctx->afu_err = irq_info->afu_err;
0155 ctx->pending_afu_err = true;
0156 spin_unlock(&ctx->lock);
0157
0158 wake_up_all(&ctx->wq);
0159 }
0160
0161 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
0162 return IRQ_HANDLED;
0163 }
0164 if (dsisr & CXL_PSL_DSISR_An_OC)
0165 pr_devel("CXL interrupt: OS Context Warning\n");
0166
0167 WARN(1, "Unhandled CXL PSL IRQ\n");
0168 return IRQ_HANDLED;
0169 }
0170
0171 static irqreturn_t cxl_irq_afu(int irq, void *data)
0172 {
0173 struct cxl_context *ctx = data;
0174 irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq));
0175 int irq_off, afu_irq = 0;
0176 __u16 range;
0177 int r;
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191 for (r = 0; r < CXL_IRQ_RANGES; r++) {
0192 irq_off = hwirq - ctx->irqs.offset[r];
0193 range = ctx->irqs.range[r];
0194 if (irq_off >= 0 && irq_off < range) {
0195 afu_irq += irq_off;
0196 break;
0197 }
0198 afu_irq += range;
0199 }
0200 if (unlikely(r >= CXL_IRQ_RANGES)) {
0201 WARN(1, "Received AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
0202 ctx->pe, irq, hwirq);
0203 return IRQ_HANDLED;
0204 }
0205
0206 trace_cxl_afu_irq(ctx, afu_irq, irq, hwirq);
0207 pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n",
0208 afu_irq, ctx->pe, irq, hwirq);
0209
0210 if (unlikely(!ctx->irq_bitmap)) {
0211 WARN(1, "Received AFU IRQ for context with no IRQ bitmap\n");
0212 return IRQ_HANDLED;
0213 }
0214 spin_lock(&ctx->lock);
0215 set_bit(afu_irq - 1, ctx->irq_bitmap);
0216 ctx->pending_irq = true;
0217 spin_unlock(&ctx->lock);
0218
0219 wake_up_all(&ctx->wq);
0220
0221 return IRQ_HANDLED;
0222 }
0223
0224 unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
0225 irq_handler_t handler, void *cookie, const char *name)
0226 {
0227 unsigned int virq;
0228 int result;
0229
0230
0231 virq = irq_create_mapping(NULL, hwirq);
0232 if (!virq) {
0233 dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n");
0234 return 0;
0235 }
0236
0237 if (cxl_ops->setup_irq)
0238 cxl_ops->setup_irq(adapter, hwirq, virq);
0239
0240 pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq);
0241
0242 result = request_irq(virq, handler, 0, name, cookie);
0243 if (result) {
0244 dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result);
0245 return 0;
0246 }
0247
0248 return virq;
0249 }
0250
0251 void cxl_unmap_irq(unsigned int virq, void *cookie)
0252 {
0253 free_irq(virq, cookie);
0254 }
0255
0256 int cxl_register_one_irq(struct cxl *adapter,
0257 irq_handler_t handler,
0258 void *cookie,
0259 irq_hw_number_t *dest_hwirq,
0260 unsigned int *dest_virq,
0261 const char *name)
0262 {
0263 int hwirq, virq;
0264
0265 if ((hwirq = cxl_ops->alloc_one_irq(adapter)) < 0)
0266 return hwirq;
0267
0268 if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name)))
0269 goto err;
0270
0271 *dest_hwirq = hwirq;
0272 *dest_virq = virq;
0273
0274 return 0;
0275
0276 err:
0277 cxl_ops->release_one_irq(adapter, hwirq);
0278 return -ENOMEM;
0279 }
0280
0281 void afu_irq_name_free(struct cxl_context *ctx)
0282 {
0283 struct cxl_irq_name *irq_name, *tmp;
0284
0285 list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) {
0286 kfree(irq_name->name);
0287 list_del(&irq_name->list);
0288 kfree(irq_name);
0289 }
0290 }
0291
0292 int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
0293 {
0294 int rc, r, i, j = 1;
0295 struct cxl_irq_name *irq_name;
0296 int alloc_count;
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306 if (cpu_has_feature(CPU_FTR_HVMODE))
0307 alloc_count = count;
0308 else
0309 alloc_count = count + 1;
0310
0311 if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter,
0312 alloc_count)))
0313 return rc;
0314
0315 if (cpu_has_feature(CPU_FTR_HVMODE)) {
0316
0317 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
0318 ctx->irqs.range[0] = 1;
0319 }
0320
0321 ctx->irq_count = count;
0322 ctx->irq_bitmap = bitmap_zalloc(count, GFP_KERNEL);
0323 if (!ctx->irq_bitmap)
0324 goto out;
0325
0326
0327
0328
0329
0330 for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
0331 for (i = 0; i < ctx->irqs.range[r]; i++) {
0332 irq_name = kmalloc(sizeof(struct cxl_irq_name),
0333 GFP_KERNEL);
0334 if (!irq_name)
0335 goto out;
0336 irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i",
0337 dev_name(&ctx->afu->dev),
0338 ctx->pe, j);
0339 if (!irq_name->name) {
0340 kfree(irq_name);
0341 goto out;
0342 }
0343
0344 list_add_tail(&irq_name->list, &ctx->irq_names);
0345 j++;
0346 }
0347 }
0348 return 0;
0349
0350 out:
0351 cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
0352 bitmap_free(ctx->irq_bitmap);
0353 afu_irq_name_free(ctx);
0354 return -ENOMEM;
0355 }
0356
0357 static void afu_register_hwirqs(struct cxl_context *ctx)
0358 {
0359 irq_hw_number_t hwirq;
0360 struct cxl_irq_name *irq_name;
0361 int r, i;
0362 irqreturn_t (*handler)(int irq, void *data);
0363
0364
0365 irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list);
0366 for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
0367 hwirq = ctx->irqs.offset[r];
0368 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
0369 if (r == 0 && i == 0)
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380 handler = cxl_ops->psl_interrupt;
0381 else
0382 handler = cxl_irq_afu;
0383 cxl_map_irq(ctx->afu->adapter, hwirq, handler, ctx,
0384 irq_name->name);
0385 irq_name = list_next_entry(irq_name, list);
0386 }
0387 }
0388 }
0389
0390 int afu_register_irqs(struct cxl_context *ctx, u32 count)
0391 {
0392 int rc;
0393
0394 rc = afu_allocate_irqs(ctx, count);
0395 if (rc)
0396 return rc;
0397
0398 afu_register_hwirqs(ctx);
0399 return 0;
0400 }
0401
0402 void afu_release_irqs(struct cxl_context *ctx, void *cookie)
0403 {
0404 irq_hw_number_t hwirq;
0405 unsigned int virq;
0406 int r, i;
0407
0408 for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
0409 hwirq = ctx->irqs.offset[r];
0410 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
0411 virq = irq_find_mapping(NULL, hwirq);
0412 if (virq)
0413 cxl_unmap_irq(virq, cookie);
0414 }
0415 }
0416
0417 afu_irq_name_free(ctx);
0418 cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
0419
0420 ctx->irq_count = 0;
0421 }
0422
0423 void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr)
0424 {
0425 dev_crit(&afu->dev,
0426 "PSL Slice error received. Check AFU for root cause.\n");
0427 dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
0428 if (serr & CXL_PSL_SERR_An_afuto)
0429 dev_crit(&afu->dev, "AFU MMIO Timeout\n");
0430 if (serr & CXL_PSL_SERR_An_afudis)
0431 dev_crit(&afu->dev,
0432 "MMIO targeted Accelerator that was not enabled\n");
0433 if (serr & CXL_PSL_SERR_An_afuov)
0434 dev_crit(&afu->dev, "AFU CTAG Overflow\n");
0435 if (serr & CXL_PSL_SERR_An_badsrc)
0436 dev_crit(&afu->dev, "Bad Interrupt Source\n");
0437 if (serr & CXL_PSL_SERR_An_badctx)
0438 dev_crit(&afu->dev, "Bad Context Handle\n");
0439 if (serr & CXL_PSL_SERR_An_llcmdis)
0440 dev_crit(&afu->dev, "LLCMD to Disabled AFU\n");
0441 if (serr & CXL_PSL_SERR_An_llcmdto)
0442 dev_crit(&afu->dev, "LLCMD Timeout to AFU\n");
0443 if (serr & CXL_PSL_SERR_An_afupar)
0444 dev_crit(&afu->dev, "AFU MMIO Parity Error\n");
0445 if (serr & CXL_PSL_SERR_An_afudup)
0446 dev_crit(&afu->dev, "AFU MMIO Duplicate CTAG Error\n");
0447 if (serr & CXL_PSL_SERR_An_AE)
0448 dev_crit(&afu->dev,
0449 "AFU asserted JDONE with JERROR in AFU Directed Mode\n");
0450 }