0001
0002
0003
0004
0005
0006 #include <linux/pci.h>
0007 #include <linux/slab.h>
0008 #include <linux/file.h>
0009 #include <misc/cxl.h>
0010 #include <linux/module.h>
0011 #include <linux/mount.h>
0012 #include <linux/pseudo_fs.h>
0013 #include <linux/sched/mm.h>
0014 #include <linux/mmu_context.h>
0015 #include <linux/irqdomain.h>
0016
0017 #include "cxl.h"
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #define CXL_PSEUDO_FS_MAGIC 0x1697697f
0034
0035 static int cxl_fs_cnt;
0036 static struct vfsmount *cxl_vfs_mount;
0037
0038 static int cxl_fs_init_fs_context(struct fs_context *fc)
0039 {
0040 return init_pseudo(fc, CXL_PSEUDO_FS_MAGIC) ? 0 : -ENOMEM;
0041 }
0042
0043 static struct file_system_type cxl_fs_type = {
0044 .name = "cxl",
0045 .owner = THIS_MODULE,
0046 .init_fs_context = cxl_fs_init_fs_context,
0047 .kill_sb = kill_anon_super,
0048 };
0049
0050
0051 void cxl_release_mapping(struct cxl_context *ctx)
0052 {
0053 if (ctx->kernelapi && ctx->mapping)
0054 simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
0055 }
0056
0057 static struct file *cxl_getfile(const char *name,
0058 const struct file_operations *fops,
0059 void *priv, int flags)
0060 {
0061 struct file *file;
0062 struct inode *inode;
0063 int rc;
0064
0065
0066
0067 if (fops->owner && !try_module_get(fops->owner))
0068 return ERR_PTR(-ENOENT);
0069
0070 rc = simple_pin_fs(&cxl_fs_type, &cxl_vfs_mount, &cxl_fs_cnt);
0071 if (rc < 0) {
0072 pr_err("Cannot mount cxl pseudo filesystem: %d\n", rc);
0073 file = ERR_PTR(rc);
0074 goto err_module;
0075 }
0076
0077 inode = alloc_anon_inode(cxl_vfs_mount->mnt_sb);
0078 if (IS_ERR(inode)) {
0079 file = ERR_CAST(inode);
0080 goto err_fs;
0081 }
0082
0083 file = alloc_file_pseudo(inode, cxl_vfs_mount, name,
0084 flags & (O_ACCMODE | O_NONBLOCK), fops);
0085 if (IS_ERR(file))
0086 goto err_inode;
0087
0088 file->private_data = priv;
0089
0090 return file;
0091
0092 err_inode:
0093 iput(inode);
0094 err_fs:
0095 simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
0096 err_module:
0097 module_put(fops->owner);
0098 return file;
0099 }
0100
0101 struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
0102 {
0103 struct cxl_afu *afu;
0104 struct cxl_context *ctx;
0105 int rc;
0106
0107 afu = cxl_pci_to_afu(dev);
0108 if (IS_ERR(afu))
0109 return ERR_CAST(afu);
0110
0111 ctx = cxl_context_alloc();
0112 if (!ctx)
0113 return ERR_PTR(-ENOMEM);
0114
0115 ctx->kernelapi = true;
0116
0117
0118 rc = cxl_context_init(ctx, afu, false);
0119 if (rc)
0120 goto err_ctx;
0121
0122 return ctx;
0123
0124 err_ctx:
0125 kfree(ctx);
0126 return ERR_PTR(rc);
0127 }
0128 EXPORT_SYMBOL_GPL(cxl_dev_context_init);
0129
0130 struct cxl_context *cxl_get_context(struct pci_dev *dev)
0131 {
0132 return dev->dev.archdata.cxl_ctx;
0133 }
0134 EXPORT_SYMBOL_GPL(cxl_get_context);
0135
0136 int cxl_release_context(struct cxl_context *ctx)
0137 {
0138 if (ctx->status >= STARTED)
0139 return -EBUSY;
0140
0141 cxl_context_free(ctx);
0142
0143 return 0;
0144 }
0145 EXPORT_SYMBOL_GPL(cxl_release_context);
0146
0147 static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
0148 {
0149 __u16 range;
0150 int r;
0151
0152 for (r = 0; r < CXL_IRQ_RANGES; r++) {
0153 range = ctx->irqs.range[r];
0154 if (num < range) {
0155 return ctx->irqs.offset[r] + num;
0156 }
0157 num -= range;
0158 }
0159 return 0;
0160 }
0161
0162
0163 int cxl_set_priv(struct cxl_context *ctx, void *priv)
0164 {
0165 if (!ctx)
0166 return -EINVAL;
0167
0168 ctx->priv = priv;
0169
0170 return 0;
0171 }
0172 EXPORT_SYMBOL_GPL(cxl_set_priv);
0173
0174 void *cxl_get_priv(struct cxl_context *ctx)
0175 {
0176 if (!ctx)
0177 return ERR_PTR(-EINVAL);
0178
0179 return ctx->priv;
0180 }
0181 EXPORT_SYMBOL_GPL(cxl_get_priv);
0182
0183 int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
0184 {
0185 int res;
0186 irq_hw_number_t hwirq;
0187
0188 if (num == 0)
0189 num = ctx->afu->pp_irqs;
0190 res = afu_allocate_irqs(ctx, num);
0191 if (res)
0192 return res;
0193
0194 if (!cpu_has_feature(CPU_FTR_HVMODE)) {
0195
0196
0197
0198 hwirq = cxl_find_afu_irq(ctx, 0);
0199 if (hwirq)
0200 cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl");
0201 }
0202
0203 if (ctx->status == STARTED) {
0204 if (cxl_ops->update_ivtes)
0205 cxl_ops->update_ivtes(ctx);
0206 else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n");
0207 }
0208
0209 return res;
0210 }
0211 EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
0212
0213 void cxl_free_afu_irqs(struct cxl_context *ctx)
0214 {
0215 irq_hw_number_t hwirq;
0216 unsigned int virq;
0217
0218 if (!cpu_has_feature(CPU_FTR_HVMODE)) {
0219 hwirq = cxl_find_afu_irq(ctx, 0);
0220 if (hwirq) {
0221 virq = irq_find_mapping(NULL, hwirq);
0222 if (virq)
0223 cxl_unmap_irq(virq, ctx);
0224 }
0225 }
0226 afu_irq_name_free(ctx);
0227 cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
0228 }
0229 EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
0230
0231 int cxl_map_afu_irq(struct cxl_context *ctx, int num,
0232 irq_handler_t handler, void *cookie, char *name)
0233 {
0234 irq_hw_number_t hwirq;
0235
0236
0237
0238
0239 hwirq = cxl_find_afu_irq(ctx, num);
0240 if (!hwirq)
0241 return -ENOENT;
0242
0243 return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name);
0244 }
0245 EXPORT_SYMBOL_GPL(cxl_map_afu_irq);
0246
0247 void cxl_unmap_afu_irq(struct cxl_context *ctx, int num, void *cookie)
0248 {
0249 irq_hw_number_t hwirq;
0250 unsigned int virq;
0251
0252 hwirq = cxl_find_afu_irq(ctx, num);
0253 if (!hwirq)
0254 return;
0255
0256 virq = irq_find_mapping(NULL, hwirq);
0257 if (virq)
0258 cxl_unmap_irq(virq, cookie);
0259 }
0260 EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq);
0261
0262
0263
0264
0265
0266 int cxl_start_context(struct cxl_context *ctx, u64 wed,
0267 struct task_struct *task)
0268 {
0269 int rc = 0;
0270 bool kernel = true;
0271
0272 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
0273
0274 mutex_lock(&ctx->status_mutex);
0275 if (ctx->status == STARTED)
0276 goto out;
0277
0278
0279
0280
0281
0282 rc = cxl_adapter_context_get(ctx->afu->adapter);
0283 if (rc)
0284 goto out;
0285
0286 if (task) {
0287 ctx->pid = get_task_pid(task, PIDTYPE_PID);
0288 kernel = false;
0289
0290
0291 ctx->mm = get_task_mm(current);
0292
0293
0294 cxl_context_mm_count_get(ctx);
0295
0296 if (ctx->mm) {
0297
0298 mmput(ctx->mm);
0299
0300 mm_context_add_copro(ctx->mm);
0301 }
0302 }
0303
0304
0305
0306
0307
0308 cxl_ctx_get();
0309
0310
0311 smp_mb();
0312
0313 if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
0314 put_pid(ctx->pid);
0315 ctx->pid = NULL;
0316 cxl_adapter_context_put(ctx->afu->adapter);
0317 cxl_ctx_put();
0318 if (task) {
0319 cxl_context_mm_count_put(ctx);
0320 if (ctx->mm)
0321 mm_context_remove_copro(ctx->mm);
0322 }
0323 goto out;
0324 }
0325
0326 ctx->status = STARTED;
0327 out:
0328 mutex_unlock(&ctx->status_mutex);
0329 return rc;
0330 }
0331 EXPORT_SYMBOL_GPL(cxl_start_context);
0332
0333 int cxl_process_element(struct cxl_context *ctx)
0334 {
0335 return ctx->external_pe;
0336 }
0337 EXPORT_SYMBOL_GPL(cxl_process_element);
0338
0339
0340 int cxl_stop_context(struct cxl_context *ctx)
0341 {
0342 return __detach_context(ctx);
0343 }
0344 EXPORT_SYMBOL_GPL(cxl_stop_context);
0345
0346 void cxl_set_master(struct cxl_context *ctx)
0347 {
0348 ctx->master = true;
0349 }
0350 EXPORT_SYMBOL_GPL(cxl_set_master);
0351
0352
0353 int cxl_fd_open(struct inode *inode, struct file *file)
0354 {
0355 return afu_open(inode, file);
0356 }
0357 EXPORT_SYMBOL_GPL(cxl_fd_open);
0358 int cxl_fd_release(struct inode *inode, struct file *file)
0359 {
0360 return afu_release(inode, file);
0361 }
0362 EXPORT_SYMBOL_GPL(cxl_fd_release);
0363 long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
0364 {
0365 return afu_ioctl(file, cmd, arg);
0366 }
0367 EXPORT_SYMBOL_GPL(cxl_fd_ioctl);
0368 int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm)
0369 {
0370 return afu_mmap(file, vm);
0371 }
0372 EXPORT_SYMBOL_GPL(cxl_fd_mmap);
0373 __poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
0374 {
0375 return afu_poll(file, poll);
0376 }
0377 EXPORT_SYMBOL_GPL(cxl_fd_poll);
0378 ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
0379 loff_t *off)
0380 {
0381 return afu_read(file, buf, count, off);
0382 }
0383 EXPORT_SYMBOL_GPL(cxl_fd_read);
0384
0385 #define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME
0386
0387
0388 struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
0389 int *fd)
0390 {
0391 struct file *file;
0392 int rc, flags, fdtmp;
0393 char *name = NULL;
0394
0395
0396 if (ctx->mapping)
0397 return ERR_PTR(-EEXIST);
0398
0399 flags = O_RDWR | O_CLOEXEC;
0400
0401
0402 rc = get_unused_fd_flags(flags);
0403 if (rc < 0)
0404 return ERR_PTR(rc);
0405 fdtmp = rc;
0406
0407
0408
0409
0410 if (fops) {
0411 PATCH_FOPS(open);
0412 PATCH_FOPS(poll);
0413 PATCH_FOPS(read);
0414 PATCH_FOPS(release);
0415 PATCH_FOPS(unlocked_ioctl);
0416 PATCH_FOPS(compat_ioctl);
0417 PATCH_FOPS(mmap);
0418 } else
0419 fops = (struct file_operations *)&afu_fops;
0420
0421 name = kasprintf(GFP_KERNEL, "cxl:%d", ctx->pe);
0422 file = cxl_getfile(name, fops, ctx, flags);
0423 kfree(name);
0424 if (IS_ERR(file))
0425 goto err_fd;
0426
0427 cxl_context_set_mapping(ctx, file->f_mapping);
0428 *fd = fdtmp;
0429 return file;
0430
0431 err_fd:
0432 put_unused_fd(fdtmp);
0433 return NULL;
0434 }
0435 EXPORT_SYMBOL_GPL(cxl_get_fd);
0436
0437 struct cxl_context *cxl_fops_get_context(struct file *file)
0438 {
0439 return file->private_data;
0440 }
0441 EXPORT_SYMBOL_GPL(cxl_fops_get_context);
0442
0443 void cxl_set_driver_ops(struct cxl_context *ctx,
0444 struct cxl_afu_driver_ops *ops)
0445 {
0446 WARN_ON(!ops->fetch_event || !ops->event_delivered);
0447 atomic_set(&ctx->afu_driver_events, 0);
0448 ctx->afu_driver_ops = ops;
0449 }
0450 EXPORT_SYMBOL_GPL(cxl_set_driver_ops);
0451
0452 void cxl_context_events_pending(struct cxl_context *ctx,
0453 unsigned int new_events)
0454 {
0455 atomic_add(new_events, &ctx->afu_driver_events);
0456 wake_up_all(&ctx->wq);
0457 }
0458 EXPORT_SYMBOL_GPL(cxl_context_events_pending);
0459
0460 int cxl_start_work(struct cxl_context *ctx,
0461 struct cxl_ioctl_start_work *work)
0462 {
0463 int rc;
0464
0465
0466 if (!(work->flags & CXL_START_WORK_NUM_IRQS))
0467 work->num_interrupts = ctx->afu->pp_irqs;
0468 else if ((work->num_interrupts < ctx->afu->pp_irqs) ||
0469 (work->num_interrupts > ctx->afu->irqs_max)) {
0470 return -EINVAL;
0471 }
0472
0473 rc = afu_register_irqs(ctx, work->num_interrupts);
0474 if (rc)
0475 return rc;
0476
0477 rc = cxl_start_context(ctx, work->work_element_descriptor, current);
0478 if (rc < 0) {
0479 afu_release_irqs(ctx, ctx);
0480 return rc;
0481 }
0482
0483 return 0;
0484 }
0485 EXPORT_SYMBOL_GPL(cxl_start_work);
0486
0487 void __iomem *cxl_psa_map(struct cxl_context *ctx)
0488 {
0489 if (ctx->status != STARTED)
0490 return NULL;
0491
0492 pr_devel("%s: psn_phys%llx size:%llx\n",
0493 __func__, ctx->psn_phys, ctx->psn_size);
0494 return ioremap(ctx->psn_phys, ctx->psn_size);
0495 }
0496 EXPORT_SYMBOL_GPL(cxl_psa_map);
0497
0498 void cxl_psa_unmap(void __iomem *addr)
0499 {
0500 iounmap(addr);
0501 }
0502 EXPORT_SYMBOL_GPL(cxl_psa_unmap);
0503
0504 int cxl_afu_reset(struct cxl_context *ctx)
0505 {
0506 struct cxl_afu *afu = ctx->afu;
0507 int rc;
0508
0509 rc = cxl_ops->afu_reset(afu);
0510 if (rc)
0511 return rc;
0512
0513 return cxl_ops->afu_check_and_enable(afu);
0514 }
0515 EXPORT_SYMBOL_GPL(cxl_afu_reset);
0516
0517 void cxl_perst_reloads_same_image(struct cxl_afu *afu,
0518 bool perst_reloads_same_image)
0519 {
0520 afu->adapter->perst_same_image = perst_reloads_same_image;
0521 }
0522 EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
0523
0524 ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count)
0525 {
0526 struct cxl_afu *afu = cxl_pci_to_afu(dev);
0527 if (IS_ERR(afu))
0528 return -ENODEV;
0529
0530 return cxl_ops->read_adapter_vpd(afu->adapter, buf, count);
0531 }
0532 EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd);