Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Copyright 2015 IBM Corp.
0004  */
0005 
0006 #include <linux/spinlock.h>
0007 #include <linux/uaccess.h>
0008 #include <linux/delay.h>
0009 #include <linux/irqdomain.h>
0010 #include <linux/platform_device.h>
0011 
0012 #include "cxl.h"
0013 #include "hcalls.h"
0014 #include "trace.h"
0015 
0016 #define CXL_ERROR_DETECTED_EVENT    1
0017 #define CXL_SLOT_RESET_EVENT        2
0018 #define CXL_RESUME_EVENT        3
0019 
0020 static void pci_error_handlers(struct cxl_afu *afu,
0021                 int bus_error_event,
0022                 pci_channel_state_t state)
0023 {
0024     struct pci_dev *afu_dev;
0025     struct pci_driver *afu_drv;
0026     const struct pci_error_handlers *err_handler;
0027 
0028     if (afu->phb == NULL)
0029         return;
0030 
0031     list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
0032         afu_drv = to_pci_driver(afu_dev->dev.driver);
0033         if (!afu_drv)
0034             continue;
0035 
0036         err_handler = afu_drv->err_handler;
0037         switch (bus_error_event) {
0038         case CXL_ERROR_DETECTED_EVENT:
0039             afu_dev->error_state = state;
0040 
0041             if (err_handler &&
0042                 err_handler->error_detected)
0043                 err_handler->error_detected(afu_dev, state);
0044             break;
0045         case CXL_SLOT_RESET_EVENT:
0046             afu_dev->error_state = state;
0047 
0048             if (err_handler &&
0049                 err_handler->slot_reset)
0050                 err_handler->slot_reset(afu_dev);
0051             break;
0052         case CXL_RESUME_EVENT:
0053             if (err_handler &&
0054                 err_handler->resume)
0055                 err_handler->resume(afu_dev);
0056             break;
0057         }
0058     }
0059 }
0060 
0061 static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr,
0062                     u64 errstat)
0063 {
0064     pr_devel("in %s\n", __func__);
0065     dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
0066 
0067     return cxl_ops->ack_irq(ctx, 0, errstat);
0068 }
0069 
0070 static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu,
0071             void *buf, size_t len)
0072 {
0073     unsigned int entries, mod;
0074     unsigned long **vpd_buf = NULL;
0075     struct sg_list *le;
0076     int rc = 0, i, tocopy;
0077     u64 out = 0;
0078 
0079     if (buf == NULL)
0080         return -EINVAL;
0081 
0082     /* number of entries in the list */
0083     entries = len / SG_BUFFER_SIZE;
0084     mod = len % SG_BUFFER_SIZE;
0085     if (mod)
0086         entries++;
0087 
0088     if (entries > SG_MAX_ENTRIES) {
0089         entries = SG_MAX_ENTRIES;
0090         len = SG_MAX_ENTRIES * SG_BUFFER_SIZE;
0091         mod = 0;
0092     }
0093 
0094     vpd_buf = kcalloc(entries, sizeof(unsigned long *), GFP_KERNEL);
0095     if (!vpd_buf)
0096         return -ENOMEM;
0097 
0098     le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
0099     if (!le) {
0100         rc = -ENOMEM;
0101         goto err1;
0102     }
0103 
0104     for (i = 0; i < entries; i++) {
0105         vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
0106         if (!vpd_buf[i]) {
0107             rc = -ENOMEM;
0108             goto err2;
0109         }
0110         le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i]));
0111         le[i].len = cpu_to_be64(SG_BUFFER_SIZE);
0112         if ((i == (entries - 1)) && mod)
0113             le[i].len = cpu_to_be64(mod);
0114     }
0115 
0116     if (adapter)
0117         rc = cxl_h_collect_vpd_adapter(adapter->guest->handle,
0118                     virt_to_phys(le), entries, &out);
0119     else
0120         rc = cxl_h_collect_vpd(afu->guest->handle, 0,
0121                 virt_to_phys(le), entries, &out);
0122     pr_devel("length of available (entries: %i), vpd: %#llx\n",
0123         entries, out);
0124 
0125     if (!rc) {
0126         /*
0127          * hcall returns in 'out' the size of available VPDs.
0128          * It fills the buffer with as much data as possible.
0129          */
0130         if (out < len)
0131             len = out;
0132         rc = len;
0133         if (out) {
0134             for (i = 0; i < entries; i++) {
0135                 if (len < SG_BUFFER_SIZE)
0136                     tocopy = len;
0137                 else
0138                     tocopy = SG_BUFFER_SIZE;
0139                 memcpy(buf, vpd_buf[i], tocopy);
0140                 buf += tocopy;
0141                 len -= tocopy;
0142             }
0143         }
0144     }
0145 err2:
0146     for (i = 0; i < entries; i++) {
0147         if (vpd_buf[i])
0148             free_page((unsigned long) vpd_buf[i]);
0149     }
0150     free_page((unsigned long) le);
0151 err1:
0152     kfree(vpd_buf);
0153     return rc;
0154 }
0155 
0156 static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info)
0157 {
0158     return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info);
0159 }
0160 
0161 static irqreturn_t guest_psl_irq(int irq, void *data)
0162 {
0163     struct cxl_context *ctx = data;
0164     struct cxl_irq_info irq_info;
0165     int rc;
0166 
0167     pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq);
0168     rc = guest_get_irq_info(ctx, &irq_info);
0169     if (rc) {
0170         WARN(1, "Unable to get IRQ info: %i\n", rc);
0171         return IRQ_HANDLED;
0172     }
0173 
0174     rc = cxl_irq_psl8(irq, ctx, &irq_info);
0175     return rc;
0176 }
0177 
0178 static int afu_read_error_state(struct cxl_afu *afu, int *state_out)
0179 {
0180     u64 state;
0181     int rc = 0;
0182 
0183     if (!afu)
0184         return -EIO;
0185 
0186     rc = cxl_h_read_error_state(afu->guest->handle, &state);
0187     if (!rc) {
0188         WARN_ON(state != H_STATE_NORMAL &&
0189             state != H_STATE_DISABLE &&
0190             state != H_STATE_TEMP_UNAVAILABLE &&
0191             state != H_STATE_PERM_UNAVAILABLE);
0192         *state_out = state & 0xffffffff;
0193     }
0194     return rc;
0195 }
0196 
0197 static irqreturn_t guest_slice_irq_err(int irq, void *data)
0198 {
0199     struct cxl_afu *afu = data;
0200     int rc;
0201     u64 serr, afu_error, dsisr;
0202 
0203     rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr);
0204     if (rc) {
0205         dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc);
0206         return IRQ_HANDLED;
0207     }
0208     afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
0209     dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
0210     cxl_afu_decode_psl_serr(afu, serr);
0211     dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
0212     dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
0213 
0214     rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr);
0215     if (rc)
0216         dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n",
0217             rc);
0218 
0219     return IRQ_HANDLED;
0220 }
0221 
0222 
0223 static int irq_alloc_range(struct cxl *adapter, int len, int *irq)
0224 {
0225     int i, n;
0226     struct irq_avail *cur;
0227 
0228     for (i = 0; i < adapter->guest->irq_nranges; i++) {
0229         cur = &adapter->guest->irq_avail[i];
0230         n = bitmap_find_next_zero_area(cur->bitmap, cur->range,
0231                     0, len, 0);
0232         if (n < cur->range) {
0233             bitmap_set(cur->bitmap, n, len);
0234             *irq = cur->offset + n;
0235             pr_devel("guest: allocate IRQs %#x->%#x\n",
0236                 *irq, *irq + len - 1);
0237 
0238             return 0;
0239         }
0240     }
0241     return -ENOSPC;
0242 }
0243 
0244 static int irq_free_range(struct cxl *adapter, int irq, int len)
0245 {
0246     int i, n;
0247     struct irq_avail *cur;
0248 
0249     if (len == 0)
0250         return -ENOENT;
0251 
0252     for (i = 0; i < adapter->guest->irq_nranges; i++) {
0253         cur = &adapter->guest->irq_avail[i];
0254         if (irq >= cur->offset &&
0255             (irq + len) <= (cur->offset + cur->range)) {
0256             n = irq - cur->offset;
0257             bitmap_clear(cur->bitmap, n, len);
0258             pr_devel("guest: release IRQs %#x->%#x\n",
0259                 irq, irq + len - 1);
0260             return 0;
0261         }
0262     }
0263     return -ENOENT;
0264 }
0265 
0266 static int guest_reset(struct cxl *adapter)
0267 {
0268     struct cxl_afu *afu = NULL;
0269     int i, rc;
0270 
0271     pr_devel("Adapter reset request\n");
0272     spin_lock(&adapter->afu_list_lock);
0273     for (i = 0; i < adapter->slices; i++) {
0274         if ((afu = adapter->afu[i])) {
0275             pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
0276                     pci_channel_io_frozen);
0277             cxl_context_detach_all(afu);
0278         }
0279     }
0280 
0281     rc = cxl_h_reset_adapter(adapter->guest->handle);
0282     for (i = 0; i < adapter->slices; i++) {
0283         if (!rc && (afu = adapter->afu[i])) {
0284             pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
0285                     pci_channel_io_normal);
0286             pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
0287         }
0288     }
0289     spin_unlock(&adapter->afu_list_lock);
0290     return rc;
0291 }
0292 
0293 static int guest_alloc_one_irq(struct cxl *adapter)
0294 {
0295     int irq;
0296 
0297     spin_lock(&adapter->guest->irq_alloc_lock);
0298     if (irq_alloc_range(adapter, 1, &irq))
0299         irq = -ENOSPC;
0300     spin_unlock(&adapter->guest->irq_alloc_lock);
0301     return irq;
0302 }
0303 
0304 static void guest_release_one_irq(struct cxl *adapter, int irq)
0305 {
0306     spin_lock(&adapter->guest->irq_alloc_lock);
0307     irq_free_range(adapter, irq, 1);
0308     spin_unlock(&adapter->guest->irq_alloc_lock);
0309 }
0310 
0311 static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
0312                 struct cxl *adapter, unsigned int num)
0313 {
0314     int i, try, irq;
0315 
0316     memset(irqs, 0, sizeof(struct cxl_irq_ranges));
0317 
0318     spin_lock(&adapter->guest->irq_alloc_lock);
0319     for (i = 0; i < CXL_IRQ_RANGES && num; i++) {
0320         try = num;
0321         while (try) {
0322             if (irq_alloc_range(adapter, try, &irq) == 0)
0323                 break;
0324             try /= 2;
0325         }
0326         if (!try)
0327             goto error;
0328         irqs->offset[i] = irq;
0329         irqs->range[i] = try;
0330         num -= try;
0331     }
0332     if (num)
0333         goto error;
0334     spin_unlock(&adapter->guest->irq_alloc_lock);
0335     return 0;
0336 
0337 error:
0338     for (i = 0; i < CXL_IRQ_RANGES; i++)
0339         irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
0340     spin_unlock(&adapter->guest->irq_alloc_lock);
0341     return -ENOSPC;
0342 }
0343 
0344 static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs,
0345                 struct cxl *adapter)
0346 {
0347     int i;
0348 
0349     spin_lock(&adapter->guest->irq_alloc_lock);
0350     for (i = 0; i < CXL_IRQ_RANGES; i++)
0351         irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
0352     spin_unlock(&adapter->guest->irq_alloc_lock);
0353 }
0354 
0355 static int guest_register_serr_irq(struct cxl_afu *afu)
0356 {
0357     afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
0358                       dev_name(&afu->dev));
0359     if (!afu->err_irq_name)
0360         return -ENOMEM;
0361 
0362     if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq,
0363                  guest_slice_irq_err, afu, afu->err_irq_name))) {
0364         kfree(afu->err_irq_name);
0365         afu->err_irq_name = NULL;
0366         return -ENOMEM;
0367     }
0368 
0369     return 0;
0370 }
0371 
0372 static void guest_release_serr_irq(struct cxl_afu *afu)
0373 {
0374     cxl_unmap_irq(afu->serr_virq, afu);
0375     cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
0376     kfree(afu->err_irq_name);
0377 }
0378 
0379 static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
0380 {
0381     return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token,
0382                 tfc >> 32, (psl_reset_mask != 0));
0383 }
0384 
0385 static void disable_afu_irqs(struct cxl_context *ctx)
0386 {
0387     irq_hw_number_t hwirq;
0388     unsigned int virq;
0389     int r, i;
0390 
0391     pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice);
0392     for (r = 0; r < CXL_IRQ_RANGES; r++) {
0393         hwirq = ctx->irqs.offset[r];
0394         for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
0395             virq = irq_find_mapping(NULL, hwirq);
0396             disable_irq(virq);
0397         }
0398     }
0399 }
0400 
0401 static void enable_afu_irqs(struct cxl_context *ctx)
0402 {
0403     irq_hw_number_t hwirq;
0404     unsigned int virq;
0405     int r, i;
0406 
0407     pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice);
0408     for (r = 0; r < CXL_IRQ_RANGES; r++) {
0409         hwirq = ctx->irqs.offset[r];
0410         for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
0411             virq = irq_find_mapping(NULL, hwirq);
0412             enable_irq(virq);
0413         }
0414     }
0415 }
0416 
0417 static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx,
0418             u64 offset, u64 *val)
0419 {
0420     unsigned long cr;
0421     char c;
0422     int rc = 0;
0423 
0424     if (afu->crs_len < sz)
0425         return -ENOENT;
0426 
0427     if (unlikely(offset >= afu->crs_len))
0428         return -ERANGE;
0429 
0430     cr = get_zeroed_page(GFP_KERNEL);
0431     if (!cr)
0432         return -ENOMEM;
0433 
0434     rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset,
0435             virt_to_phys((void *)cr), sz);
0436     if (rc)
0437         goto err;
0438 
0439     switch (sz) {
0440     case 1:
0441         c = *((char *) cr);
0442         *val = c;
0443         break;
0444     case 2:
0445         *val = in_le16((u16 *)cr);
0446         break;
0447     case 4:
0448         *val = in_le32((unsigned *)cr);
0449         break;
0450     case 8:
0451         *val = in_le64((u64 *)cr);
0452         break;
0453     default:
0454         WARN_ON(1);
0455     }
0456 err:
0457     free_page(cr);
0458     return rc;
0459 }
0460 
0461 static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset,
0462             u32 *out)
0463 {
0464     int rc;
0465     u64 val;
0466 
0467     rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val);
0468     if (!rc)
0469         *out = (u32) val;
0470     return rc;
0471 }
0472 
0473 static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset,
0474             u16 *out)
0475 {
0476     int rc;
0477     u64 val;
0478 
0479     rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val);
0480     if (!rc)
0481         *out = (u16) val;
0482     return rc;
0483 }
0484 
0485 static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset,
0486             u8 *out)
0487 {
0488     int rc;
0489     u64 val;
0490 
0491     rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val);
0492     if (!rc)
0493         *out = (u8) val;
0494     return rc;
0495 }
0496 
0497 static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset,
0498             u64 *out)
0499 {
0500     return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out);
0501 }
0502 
0503 static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
0504 {
0505     /* config record is not writable from guest */
0506     return -EPERM;
0507 }
0508 
0509 static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
0510 {
0511     /* config record is not writable from guest */
0512     return -EPERM;
0513 }
0514 
0515 static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
0516 {
0517     /* config record is not writable from guest */
0518     return -EPERM;
0519 }
0520 
0521 static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
0522 {
0523     struct cxl_process_element_hcall *elem;
0524     struct cxl *adapter = ctx->afu->adapter;
0525     const struct cred *cred;
0526     u32 pid, idx;
0527     int rc, r, i;
0528     u64 mmio_addr, mmio_size;
0529     __be64 flags = 0;
0530 
0531     /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */
0532     if (!(elem = (struct cxl_process_element_hcall *)
0533             get_zeroed_page(GFP_KERNEL)))
0534         return -ENOMEM;
0535 
0536     elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION);
0537     if (ctx->kernel) {
0538         pid = 0;
0539         flags |= CXL_PE_TRANSLATION_ENABLED;
0540         flags |= CXL_PE_PRIVILEGED_PROCESS;
0541         if (mfmsr() & MSR_SF)
0542             flags |= CXL_PE_64_BIT;
0543     } else {
0544         pid = current->pid;
0545         flags |= CXL_PE_PROBLEM_STATE;
0546         flags |= CXL_PE_TRANSLATION_ENABLED;
0547         if (!test_tsk_thread_flag(current, TIF_32BIT))
0548             flags |= CXL_PE_64_BIT;
0549         cred = get_current_cred();
0550         if (uid_eq(cred->euid, GLOBAL_ROOT_UID))
0551             flags |= CXL_PE_PRIVILEGED_PROCESS;
0552         put_cred(cred);
0553     }
0554     elem->flags         = cpu_to_be64(flags);
0555     elem->common.tid    = cpu_to_be32(0); /* Unused */
0556     elem->common.pid    = cpu_to_be32(pid);
0557     elem->common.csrp   = cpu_to_be64(0); /* disable */
0558     elem->common.u.psl8.aurp0  = cpu_to_be64(0); /* disable */
0559     elem->common.u.psl8.aurp1  = cpu_to_be64(0); /* disable */
0560 
0561     cxl_prefault(ctx, wed);
0562 
0563     elem->common.u.psl8.sstp0  = cpu_to_be64(ctx->sstp0);
0564     elem->common.u.psl8.sstp1  = cpu_to_be64(ctx->sstp1);
0565 
0566     /*
0567      * Ensure we have at least one interrupt allocated to take faults for
0568      * kernel contexts that may not have allocated any AFU IRQs at all:
0569      */
0570     if (ctx->irqs.range[0] == 0) {
0571         rc = afu_register_irqs(ctx, 0);
0572         if (rc)
0573             goto out_free;
0574     }
0575 
0576     for (r = 0; r < CXL_IRQ_RANGES; r++) {
0577         for (i = 0; i < ctx->irqs.range[r]; i++) {
0578             if (r == 0 && i == 0) {
0579                 elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]);
0580             } else {
0581                 idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset;
0582                 elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8);
0583             }
0584         }
0585     }
0586     elem->common.amr = cpu_to_be64(amr);
0587     elem->common.wed = cpu_to_be64(wed);
0588 
0589     disable_afu_irqs(ctx);
0590 
0591     rc = cxl_h_attach_process(ctx->afu->guest->handle, elem,
0592                 &ctx->process_token, &mmio_addr, &mmio_size);
0593     if (rc == H_SUCCESS) {
0594         if (ctx->master || !ctx->afu->pp_psa) {
0595             ctx->psn_phys = ctx->afu->psn_phys;
0596             ctx->psn_size = ctx->afu->adapter->ps_size;
0597         } else {
0598             ctx->psn_phys = mmio_addr;
0599             ctx->psn_size = mmio_size;
0600         }
0601         if (ctx->afu->pp_psa && mmio_size &&
0602             ctx->afu->pp_size == 0) {
0603             /*
0604              * There's no property in the device tree to read the
0605              * pp_size. We only find out at the 1st attach.
0606              * Compared to bare-metal, it is too late and we
0607              * should really lock here. However, on powerVM,
0608              * pp_size is really only used to display in /sys.
0609              * Being discussed with pHyp for their next release.
0610              */
0611             ctx->afu->pp_size = mmio_size;
0612         }
0613         /* from PAPR: process element is bytes 4-7 of process token */
0614         ctx->external_pe = ctx->process_token & 0xFFFFFFFF;
0615         pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx",
0616             ctx->pe, ctx->external_pe, ctx->psn_size);
0617         ctx->pe_inserted = true;
0618         enable_afu_irqs(ctx);
0619     }
0620 
0621 out_free:
0622     free_page((u64)elem);
0623     return rc;
0624 }
0625 
0626 static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
0627 {
0628     pr_devel("in %s\n", __func__);
0629 
0630     ctx->kernel = kernel;
0631     if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
0632         return attach_afu_directed(ctx, wed, amr);
0633 
0634     /* dedicated mode not supported on FW840 */
0635 
0636     return -EINVAL;
0637 }
0638 
0639 static int detach_afu_directed(struct cxl_context *ctx)
0640 {
0641     if (!ctx->pe_inserted)
0642         return 0;
0643     if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token))
0644         return -1;
0645     return 0;
0646 }
0647 
0648 static int guest_detach_process(struct cxl_context *ctx)
0649 {
0650     pr_devel("in %s\n", __func__);
0651     trace_cxl_detach(ctx);
0652 
0653     if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
0654         return -EIO;
0655 
0656     if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
0657         return detach_afu_directed(ctx);
0658 
0659     return -EINVAL;
0660 }
0661 
0662 static void guest_release_afu(struct device *dev)
0663 {
0664     struct cxl_afu *afu = to_cxl_afu(dev);
0665 
0666     pr_devel("%s\n", __func__);
0667 
0668     idr_destroy(&afu->contexts_idr);
0669 
0670     kfree(afu->guest);
0671     kfree(afu);
0672 }
0673 
0674 ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len)
0675 {
0676     return guest_collect_vpd(NULL, afu, buf, len);
0677 }
0678 
0679 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
0680 static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
0681                     loff_t off, size_t count)
0682 {
0683     void *tbuf = NULL;
0684     int rc = 0;
0685 
0686     tbuf = (void *) get_zeroed_page(GFP_KERNEL);
0687     if (!tbuf)
0688         return -ENOMEM;
0689 
0690     rc = cxl_h_get_afu_err(afu->guest->handle,
0691                    off & 0x7,
0692                    virt_to_phys(tbuf),
0693                    count);
0694     if (rc)
0695         goto err;
0696 
0697     if (count > ERR_BUFF_MAX_COPY_SIZE)
0698         count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
0699     memcpy(buf, tbuf, count);
0700 err:
0701     free_page((u64)tbuf);
0702 
0703     return rc;
0704 }
0705 
0706 static int guest_afu_check_and_enable(struct cxl_afu *afu)
0707 {
0708     return 0;
0709 }
0710 
0711 static bool guest_support_attributes(const char *attr_name,
0712                      enum cxl_attrs type)
0713 {
0714     switch (type) {
0715     case CXL_ADAPTER_ATTRS:
0716         if ((strcmp(attr_name, "base_image") == 0) ||
0717             (strcmp(attr_name, "load_image_on_perst") == 0) ||
0718             (strcmp(attr_name, "perst_reloads_same_image") == 0) ||
0719             (strcmp(attr_name, "image_loaded") == 0))
0720             return false;
0721         break;
0722     case CXL_AFU_MASTER_ATTRS:
0723         if ((strcmp(attr_name, "pp_mmio_off") == 0))
0724             return false;
0725         break;
0726     case CXL_AFU_ATTRS:
0727         break;
0728     default:
0729         break;
0730     }
0731 
0732     return true;
0733 }
0734 
0735 static int activate_afu_directed(struct cxl_afu *afu)
0736 {
0737     int rc;
0738 
0739     dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice);
0740 
0741     afu->current_mode = CXL_MODE_DIRECTED;
0742 
0743     afu->num_procs = afu->max_procs_virtualised;
0744 
0745     if ((rc = cxl_chardev_m_afu_add(afu)))
0746         return rc;
0747 
0748     if ((rc = cxl_sysfs_afu_m_add(afu)))
0749         goto err;
0750 
0751     if ((rc = cxl_chardev_s_afu_add(afu)))
0752         goto err1;
0753 
0754     return 0;
0755 err1:
0756     cxl_sysfs_afu_m_remove(afu);
0757 err:
0758     cxl_chardev_afu_remove(afu);
0759     return rc;
0760 }
0761 
0762 static int guest_afu_activate_mode(struct cxl_afu *afu, int mode)
0763 {
0764     if (!mode)
0765         return 0;
0766     if (!(mode & afu->modes_supported))
0767         return -EINVAL;
0768 
0769     if (mode == CXL_MODE_DIRECTED)
0770         return activate_afu_directed(afu);
0771 
0772     if (mode == CXL_MODE_DEDICATED)
0773         dev_err(&afu->dev, "Dedicated mode not supported\n");
0774 
0775     return -EINVAL;
0776 }
0777 
0778 static int deactivate_afu_directed(struct cxl_afu *afu)
0779 {
0780     dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice);
0781 
0782     afu->current_mode = 0;
0783     afu->num_procs = 0;
0784 
0785     cxl_sysfs_afu_m_remove(afu);
0786     cxl_chardev_afu_remove(afu);
0787 
0788     cxl_ops->afu_reset(afu);
0789 
0790     return 0;
0791 }
0792 
0793 static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode)
0794 {
0795     if (!mode)
0796         return 0;
0797     if (!(mode & afu->modes_supported))
0798         return -EINVAL;
0799 
0800     if (mode == CXL_MODE_DIRECTED)
0801         return deactivate_afu_directed(afu);
0802     return 0;
0803 }
0804 
0805 static int guest_afu_reset(struct cxl_afu *afu)
0806 {
0807     pr_devel("AFU(%d) reset request\n", afu->slice);
0808     return cxl_h_reset_afu(afu->guest->handle);
0809 }
0810 
0811 static int guest_map_slice_regs(struct cxl_afu *afu)
0812 {
0813     if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) {
0814         dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n",
0815             afu->slice);
0816         return -ENOMEM;
0817     }
0818     return 0;
0819 }
0820 
0821 static void guest_unmap_slice_regs(struct cxl_afu *afu)
0822 {
0823     if (afu->p2n_mmio)
0824         iounmap(afu->p2n_mmio);
0825 }
0826 
0827 static int afu_update_state(struct cxl_afu *afu)
0828 {
0829     int rc, cur_state;
0830 
0831     rc = afu_read_error_state(afu, &cur_state);
0832     if (rc)
0833         return rc;
0834 
0835     if (afu->guest->previous_state == cur_state)
0836         return 0;
0837 
0838     pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state);
0839 
0840     switch (cur_state) {
0841     case H_STATE_NORMAL:
0842         afu->guest->previous_state = cur_state;
0843         break;
0844 
0845     case H_STATE_DISABLE:
0846         pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
0847                 pci_channel_io_frozen);
0848 
0849         cxl_context_detach_all(afu);
0850         if ((rc = cxl_ops->afu_reset(afu)))
0851             pr_devel("reset hcall failed %d\n", rc);
0852 
0853         rc = afu_read_error_state(afu, &cur_state);
0854         if (!rc && cur_state == H_STATE_NORMAL) {
0855             pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
0856                     pci_channel_io_normal);
0857             pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
0858         }
0859         afu->guest->previous_state = 0;
0860         break;
0861 
0862     case H_STATE_TEMP_UNAVAILABLE:
0863         afu->guest->previous_state = cur_state;
0864         break;
0865 
0866     case H_STATE_PERM_UNAVAILABLE:
0867         dev_err(&afu->dev, "AFU is in permanent error state\n");
0868         pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
0869                 pci_channel_io_perm_failure);
0870         afu->guest->previous_state = cur_state;
0871         break;
0872 
0873     default:
0874         pr_err("Unexpected AFU(%d) error state: %#x\n",
0875                afu->slice, cur_state);
0876         return -EINVAL;
0877     }
0878 
0879     return rc;
0880 }
0881 
0882 static void afu_handle_errstate(struct work_struct *work)
0883 {
0884     struct cxl_afu_guest *afu_guest =
0885         container_of(to_delayed_work(work), struct cxl_afu_guest, work_err);
0886 
0887     if (!afu_update_state(afu_guest->parent) &&
0888         afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE)
0889         return;
0890 
0891     if (afu_guest->handle_err)
0892         schedule_delayed_work(&afu_guest->work_err,
0893                       msecs_to_jiffies(3000));
0894 }
0895 
0896 static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu)
0897 {
0898     int state;
0899 
0900     if (afu && (!afu_read_error_state(afu, &state))) {
0901         if (state == H_STATE_NORMAL)
0902             return true;
0903     }
0904 
0905     return false;
0906 }
0907 
0908 static int afu_properties_look_ok(struct cxl_afu *afu)
0909 {
0910     if (afu->pp_irqs < 0) {
0911         dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n");
0912         return -EINVAL;
0913     }
0914 
0915     if (afu->max_procs_virtualised < 1) {
0916         dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n");
0917         return -EINVAL;
0918     }
0919 
0920     return 0;
0921 }
0922 
0923 int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np)
0924 {
0925     struct cxl_afu *afu;
0926     bool free = true;
0927     int rc;
0928 
0929     pr_devel("in %s - AFU(%d)\n", __func__, slice);
0930     if (!(afu = cxl_alloc_afu(adapter, slice)))
0931         return -ENOMEM;
0932 
0933     if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) {
0934         kfree(afu);
0935         return -ENOMEM;
0936     }
0937 
0938     if ((rc = dev_set_name(&afu->dev, "afu%i.%i",
0939                       adapter->adapter_num,
0940                       slice)))
0941         goto err1;
0942 
0943     adapter->slices++;
0944 
0945     if ((rc = cxl_of_read_afu_handle(afu, afu_np)))
0946         goto err1;
0947 
0948     if ((rc = cxl_ops->afu_reset(afu)))
0949         goto err1;
0950 
0951     if ((rc = cxl_of_read_afu_properties(afu, afu_np)))
0952         goto err1;
0953 
0954     if ((rc = afu_properties_look_ok(afu)))
0955         goto err1;
0956 
0957     if ((rc = guest_map_slice_regs(afu)))
0958         goto err1;
0959 
0960     if ((rc = guest_register_serr_irq(afu)))
0961         goto err2;
0962 
0963     /*
0964      * After we call this function we must not free the afu directly, even
0965      * if it returns an error!
0966      */
0967     if ((rc = cxl_register_afu(afu)))
0968         goto err_put1;
0969 
0970     if ((rc = cxl_sysfs_afu_add(afu)))
0971         goto err_put1;
0972 
0973     /*
0974      * pHyp doesn't expose the programming models supported by the
0975      * AFU. pHyp currently only supports directed mode. If it adds
0976      * dedicated mode later, this version of cxl has no way to
0977      * detect it. So we'll initialize the driver, but the first
0978      * attach will fail.
0979      * Being discussed with pHyp to do better (likely new property)
0980      */
0981     if (afu->max_procs_virtualised == 1)
0982         afu->modes_supported = CXL_MODE_DEDICATED;
0983     else
0984         afu->modes_supported = CXL_MODE_DIRECTED;
0985 
0986     if ((rc = cxl_afu_select_best_mode(afu)))
0987         goto err_put2;
0988 
0989     adapter->afu[afu->slice] = afu;
0990 
0991     afu->enabled = true;
0992 
0993     /*
0994      * wake up the cpu periodically to check the state
0995      * of the AFU using "afu" stored in the guest structure.
0996      */
0997     afu->guest->parent = afu;
0998     afu->guest->handle_err = true;
0999     INIT_DELAYED_WORK(&afu->guest->work_err, afu_handle_errstate);
1000     schedule_delayed_work(&afu->guest->work_err, msecs_to_jiffies(1000));
1001 
1002     if ((rc = cxl_pci_vphb_add(afu)))
1003         dev_info(&afu->dev, "Can't register vPHB\n");
1004 
1005     return 0;
1006 
1007 err_put2:
1008     cxl_sysfs_afu_remove(afu);
1009 err_put1:
1010     device_unregister(&afu->dev);
1011     free = false;
1012     guest_release_serr_irq(afu);
1013 err2:
1014     guest_unmap_slice_regs(afu);
1015 err1:
1016     if (free) {
1017         kfree(afu->guest);
1018         kfree(afu);
1019     }
1020     return rc;
1021 }
1022 
1023 void cxl_guest_remove_afu(struct cxl_afu *afu)
1024 {
1025     if (!afu)
1026         return;
1027 
1028     /* flush and stop pending job */
1029     afu->guest->handle_err = false;
1030     flush_delayed_work(&afu->guest->work_err);
1031 
1032     cxl_pci_vphb_remove(afu);
1033     cxl_sysfs_afu_remove(afu);
1034 
1035     spin_lock(&afu->adapter->afu_list_lock);
1036     afu->adapter->afu[afu->slice] = NULL;
1037     spin_unlock(&afu->adapter->afu_list_lock);
1038 
1039     cxl_context_detach_all(afu);
1040     cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
1041     guest_release_serr_irq(afu);
1042     guest_unmap_slice_regs(afu);
1043 
1044     device_unregister(&afu->dev);
1045 }
1046 
1047 static void free_adapter(struct cxl *adapter)
1048 {
1049     struct irq_avail *cur;
1050     int i;
1051 
1052     if (adapter->guest) {
1053         if (adapter->guest->irq_avail) {
1054             for (i = 0; i < adapter->guest->irq_nranges; i++) {
1055                 cur = &adapter->guest->irq_avail[i];
1056                 bitmap_free(cur->bitmap);
1057             }
1058             kfree(adapter->guest->irq_avail);
1059         }
1060         kfree(adapter->guest->status);
1061         kfree(adapter->guest);
1062     }
1063     cxl_remove_adapter_nr(adapter);
1064     kfree(adapter);
1065 }
1066 
1067 static int properties_look_ok(struct cxl *adapter)
1068 {
1069     /* The absence of this property means that the operational
1070      * status is unknown or okay
1071      */
1072     if (strlen(adapter->guest->status) &&
1073         strcmp(adapter->guest->status, "okay")) {
1074         pr_err("ABORTING:Bad operational status of the device\n");
1075         return -EINVAL;
1076     }
1077 
1078     return 0;
1079 }
1080 
1081 ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
1082 {
1083     return guest_collect_vpd(adapter, NULL, buf, len);
1084 }
1085 
1086 void cxl_guest_remove_adapter(struct cxl *adapter)
1087 {
1088     pr_devel("in %s\n", __func__);
1089 
1090     cxl_sysfs_adapter_remove(adapter);
1091 
1092     cxl_guest_remove_chardev(adapter);
1093     device_unregister(&adapter->dev);
1094 }
1095 
1096 static void release_adapter(struct device *dev)
1097 {
1098     free_adapter(to_cxl_adapter(dev));
1099 }
1100 
1101 struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev)
1102 {
1103     struct cxl *adapter;
1104     bool free = true;
1105     int rc;
1106 
1107     if (!(adapter = cxl_alloc_adapter()))
1108         return ERR_PTR(-ENOMEM);
1109 
1110     if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) {
1111         free_adapter(adapter);
1112         return ERR_PTR(-ENOMEM);
1113     }
1114 
1115     adapter->slices = 0;
1116     adapter->guest->pdev = pdev;
1117     adapter->dev.parent = &pdev->dev;
1118     adapter->dev.release = release_adapter;
1119     dev_set_drvdata(&pdev->dev, adapter);
1120 
1121     /*
1122      * Hypervisor controls PSL timebase initialization (p1 register).
1123      * On FW840, PSL is initialized.
1124      */
1125     adapter->psl_timebase_synced = true;
1126 
1127     if ((rc = cxl_of_read_adapter_handle(adapter, np)))
1128         goto err1;
1129 
1130     if ((rc = cxl_of_read_adapter_properties(adapter, np)))
1131         goto err1;
1132 
1133     if ((rc = properties_look_ok(adapter)))
1134         goto err1;
1135 
1136     if ((rc = cxl_guest_add_chardev(adapter)))
1137         goto err1;
1138 
1139     /*
1140      * After we call this function we must not free the adapter directly,
1141      * even if it returns an error!
1142      */
1143     if ((rc = cxl_register_adapter(adapter)))
1144         goto err_put1;
1145 
1146     if ((rc = cxl_sysfs_adapter_add(adapter)))
1147         goto err_put1;
1148 
1149     /* release the context lock as the adapter is configured */
1150     cxl_adapter_context_unlock(adapter);
1151 
1152     return adapter;
1153 
1154 err_put1:
1155     device_unregister(&adapter->dev);
1156     free = false;
1157     cxl_guest_remove_chardev(adapter);
1158 err1:
1159     if (free)
1160         free_adapter(adapter);
1161     return ERR_PTR(rc);
1162 }
1163 
1164 void cxl_guest_reload_module(struct cxl *adapter)
1165 {
1166     struct platform_device *pdev;
1167 
1168     pdev = adapter->guest->pdev;
1169     cxl_guest_remove_adapter(adapter);
1170 
1171     cxl_of_probe(pdev);
1172 }
1173 
1174 const struct cxl_backend_ops cxl_guest_ops = {
1175     .module = THIS_MODULE,
1176     .adapter_reset = guest_reset,
1177     .alloc_one_irq = guest_alloc_one_irq,
1178     .release_one_irq = guest_release_one_irq,
1179     .alloc_irq_ranges = guest_alloc_irq_ranges,
1180     .release_irq_ranges = guest_release_irq_ranges,
1181     .setup_irq = NULL,
1182     .handle_psl_slice_error = guest_handle_psl_slice_error,
1183     .psl_interrupt = guest_psl_irq,
1184     .ack_irq = guest_ack_irq,
1185     .attach_process = guest_attach_process,
1186     .detach_process = guest_detach_process,
1187     .update_ivtes = NULL,
1188     .support_attributes = guest_support_attributes,
1189     .link_ok = guest_link_ok,
1190     .release_afu = guest_release_afu,
1191     .afu_read_err_buffer = guest_afu_read_err_buffer,
1192     .afu_check_and_enable = guest_afu_check_and_enable,
1193     .afu_activate_mode = guest_afu_activate_mode,
1194     .afu_deactivate_mode = guest_afu_deactivate_mode,
1195     .afu_reset = guest_afu_reset,
1196     .afu_cr_read8 = guest_afu_cr_read8,
1197     .afu_cr_read16 = guest_afu_cr_read16,
1198     .afu_cr_read32 = guest_afu_cr_read32,
1199     .afu_cr_read64 = guest_afu_cr_read64,
1200     .afu_cr_write8 = guest_afu_cr_write8,
1201     .afu_cr_write16 = guest_afu_cr_write16,
1202     .afu_cr_write32 = guest_afu_cr_write32,
1203     .read_adapter_vpd = cxl_guest_read_adapter_vpd,
1204 };