Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * SPU file system -- file contents
0004  *
0005  * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
0006  *
0007  * Author: Arnd Bergmann <arndb@de.ibm.com>
0008  */
0009 
0010 #undef DEBUG
0011 
0012 #include <linux/coredump.h>
0013 #include <linux/fs.h>
0014 #include <linux/ioctl.h>
0015 #include <linux/export.h>
0016 #include <linux/pagemap.h>
0017 #include <linux/poll.h>
0018 #include <linux/ptrace.h>
0019 #include <linux/seq_file.h>
0020 #include <linux/slab.h>
0021 
0022 #include <asm/io.h>
0023 #include <asm/time.h>
0024 #include <asm/spu.h>
0025 #include <asm/spu_info.h>
0026 #include <linux/uaccess.h>
0027 
0028 #include "spufs.h"
0029 #include "sputrace.h"
0030 
0031 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
0032 
0033 /* Simple attribute files */
0034 struct spufs_attr {
0035     int (*get)(void *, u64 *);
0036     int (*set)(void *, u64);
0037     char get_buf[24];       /* enough to store a u64 and "\n\0" */
0038     char set_buf[24];
0039     void *data;
0040     const char *fmt;        /* format for read operation */
0041     struct mutex mutex;     /* protects access to these buffers */
0042 };
0043 
0044 static int spufs_attr_open(struct inode *inode, struct file *file,
0045         int (*get)(void *, u64 *), int (*set)(void *, u64),
0046         const char *fmt)
0047 {
0048     struct spufs_attr *attr;
0049 
0050     attr = kmalloc(sizeof(*attr), GFP_KERNEL);
0051     if (!attr)
0052         return -ENOMEM;
0053 
0054     attr->get = get;
0055     attr->set = set;
0056     attr->data = inode->i_private;
0057     attr->fmt = fmt;
0058     mutex_init(&attr->mutex);
0059     file->private_data = attr;
0060 
0061     return nonseekable_open(inode, file);
0062 }
0063 
0064 static int spufs_attr_release(struct inode *inode, struct file *file)
0065 {
0066        kfree(file->private_data);
0067     return 0;
0068 }
0069 
0070 static ssize_t spufs_attr_read(struct file *file, char __user *buf,
0071         size_t len, loff_t *ppos)
0072 {
0073     struct spufs_attr *attr;
0074     size_t size;
0075     ssize_t ret;
0076 
0077     attr = file->private_data;
0078     if (!attr->get)
0079         return -EACCES;
0080 
0081     ret = mutex_lock_interruptible(&attr->mutex);
0082     if (ret)
0083         return ret;
0084 
0085     if (*ppos) {        /* continued read */
0086         size = strlen(attr->get_buf);
0087     } else {        /* first read */
0088         u64 val;
0089         ret = attr->get(attr->data, &val);
0090         if (ret)
0091             goto out;
0092 
0093         size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
0094                  attr->fmt, (unsigned long long)val);
0095     }
0096 
0097     ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
0098 out:
0099     mutex_unlock(&attr->mutex);
0100     return ret;
0101 }
0102 
0103 static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
0104         size_t len, loff_t *ppos)
0105 {
0106     struct spufs_attr *attr;
0107     u64 val;
0108     size_t size;
0109     ssize_t ret;
0110 
0111     attr = file->private_data;
0112     if (!attr->set)
0113         return -EACCES;
0114 
0115     ret = mutex_lock_interruptible(&attr->mutex);
0116     if (ret)
0117         return ret;
0118 
0119     ret = -EFAULT;
0120     size = min(sizeof(attr->set_buf) - 1, len);
0121     if (copy_from_user(attr->set_buf, buf, size))
0122         goto out;
0123 
0124     ret = len; /* claim we got the whole input */
0125     attr->set_buf[size] = '\0';
0126     val = simple_strtol(attr->set_buf, NULL, 0);
0127     attr->set(attr->data, val);
0128 out:
0129     mutex_unlock(&attr->mutex);
0130     return ret;
0131 }
0132 
0133 static ssize_t spufs_dump_emit(struct coredump_params *cprm, void *buf,
0134         size_t size)
0135 {
0136     if (!dump_emit(cprm, buf, size))
0137         return -EIO;
0138     return size;
0139 }
0140 
0141 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt)  \
0142 static int __fops ## _open(struct inode *inode, struct file *file)  \
0143 {                                   \
0144     __simple_attr_check_format(__fmt, 0ull);            \
0145     return spufs_attr_open(inode, file, __get, __set, __fmt);   \
0146 }                                   \
0147 static const struct file_operations __fops = {              \
0148     .open    = __fops ## _open,                 \
0149     .release = spufs_attr_release,                  \
0150     .read    = spufs_attr_read,                 \
0151     .write   = spufs_attr_write,                    \
0152     .llseek  = generic_file_llseek,                 \
0153 };
0154 
0155 
0156 static int
0157 spufs_mem_open(struct inode *inode, struct file *file)
0158 {
0159     struct spufs_inode_info *i = SPUFS_I(inode);
0160     struct spu_context *ctx = i->i_ctx;
0161 
0162     mutex_lock(&ctx->mapping_lock);
0163     file->private_data = ctx;
0164     if (!i->i_openers++)
0165         ctx->local_store = inode->i_mapping;
0166     mutex_unlock(&ctx->mapping_lock);
0167     return 0;
0168 }
0169 
0170 static int
0171 spufs_mem_release(struct inode *inode, struct file *file)
0172 {
0173     struct spufs_inode_info *i = SPUFS_I(inode);
0174     struct spu_context *ctx = i->i_ctx;
0175 
0176     mutex_lock(&ctx->mapping_lock);
0177     if (!--i->i_openers)
0178         ctx->local_store = NULL;
0179     mutex_unlock(&ctx->mapping_lock);
0180     return 0;
0181 }
0182 
0183 static ssize_t
0184 spufs_mem_dump(struct spu_context *ctx, struct coredump_params *cprm)
0185 {
0186     return spufs_dump_emit(cprm, ctx->ops->get_ls(ctx), LS_SIZE);
0187 }
0188 
0189 static ssize_t
0190 spufs_mem_read(struct file *file, char __user *buffer,
0191                 size_t size, loff_t *pos)
0192 {
0193     struct spu_context *ctx = file->private_data;
0194     ssize_t ret;
0195 
0196     ret = spu_acquire(ctx);
0197     if (ret)
0198         return ret;
0199     ret = simple_read_from_buffer(buffer, size, pos, ctx->ops->get_ls(ctx),
0200                       LS_SIZE);
0201     spu_release(ctx);
0202 
0203     return ret;
0204 }
0205 
0206 static ssize_t
0207 spufs_mem_write(struct file *file, const char __user *buffer,
0208                     size_t size, loff_t *ppos)
0209 {
0210     struct spu_context *ctx = file->private_data;
0211     char *local_store;
0212     loff_t pos = *ppos;
0213     int ret;
0214 
0215     if (pos > LS_SIZE)
0216         return -EFBIG;
0217 
0218     ret = spu_acquire(ctx);
0219     if (ret)
0220         return ret;
0221 
0222     local_store = ctx->ops->get_ls(ctx);
0223     size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size);
0224     spu_release(ctx);
0225 
0226     return size;
0227 }
0228 
0229 static vm_fault_t
0230 spufs_mem_mmap_fault(struct vm_fault *vmf)
0231 {
0232     struct vm_area_struct *vma = vmf->vma;
0233     struct spu_context *ctx = vma->vm_file->private_data;
0234     unsigned long pfn, offset;
0235     vm_fault_t ret;
0236 
0237     offset = vmf->pgoff << PAGE_SHIFT;
0238     if (offset >= LS_SIZE)
0239         return VM_FAULT_SIGBUS;
0240 
0241     pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
0242             vmf->address, offset);
0243 
0244     if (spu_acquire(ctx))
0245         return VM_FAULT_NOPAGE;
0246 
0247     if (ctx->state == SPU_STATE_SAVED) {
0248         vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
0249         pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
0250     } else {
0251         vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
0252         pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
0253     }
0254     ret = vmf_insert_pfn(vma, vmf->address, pfn);
0255 
0256     spu_release(ctx);
0257 
0258     return ret;
0259 }
0260 
0261 static int spufs_mem_mmap_access(struct vm_area_struct *vma,
0262                 unsigned long address,
0263                 void *buf, int len, int write)
0264 {
0265     struct spu_context *ctx = vma->vm_file->private_data;
0266     unsigned long offset = address - vma->vm_start;
0267     char *local_store;
0268 
0269     if (write && !(vma->vm_flags & VM_WRITE))
0270         return -EACCES;
0271     if (spu_acquire(ctx))
0272         return -EINTR;
0273     if ((offset + len) > vma->vm_end)
0274         len = vma->vm_end - offset;
0275     local_store = ctx->ops->get_ls(ctx);
0276     if (write)
0277         memcpy_toio(local_store + offset, buf, len);
0278     else
0279         memcpy_fromio(buf, local_store + offset, len);
0280     spu_release(ctx);
0281     return len;
0282 }
0283 
0284 static const struct vm_operations_struct spufs_mem_mmap_vmops = {
0285     .fault = spufs_mem_mmap_fault,
0286     .access = spufs_mem_mmap_access,
0287 };
0288 
0289 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
0290 {
0291     if (!(vma->vm_flags & VM_SHARED))
0292         return -EINVAL;
0293 
0294     vma->vm_flags |= VM_IO | VM_PFNMAP;
0295     vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
0296 
0297     vma->vm_ops = &spufs_mem_mmap_vmops;
0298     return 0;
0299 }
0300 
0301 static const struct file_operations spufs_mem_fops = {
0302     .open           = spufs_mem_open,
0303     .release        = spufs_mem_release,
0304     .read           = spufs_mem_read,
0305     .write          = spufs_mem_write,
0306     .llseek         = generic_file_llseek,
0307     .mmap           = spufs_mem_mmap,
0308 };
0309 
0310 static vm_fault_t spufs_ps_fault(struct vm_fault *vmf,
0311                     unsigned long ps_offs,
0312                     unsigned long ps_size)
0313 {
0314     struct spu_context *ctx = vmf->vma->vm_file->private_data;
0315     unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
0316     int err = 0;
0317     vm_fault_t ret = VM_FAULT_NOPAGE;
0318 
0319     spu_context_nospu_trace(spufs_ps_fault__enter, ctx);
0320 
0321     if (offset >= ps_size)
0322         return VM_FAULT_SIGBUS;
0323 
0324     if (fatal_signal_pending(current))
0325         return VM_FAULT_SIGBUS;
0326 
0327     /*
0328      * Because we release the mmap_lock, the context may be destroyed while
0329      * we're in spu_wait. Grab an extra reference so it isn't destroyed
0330      * in the meantime.
0331      */
0332     get_spu_context(ctx);
0333 
0334     /*
0335      * We have to wait for context to be loaded before we have
0336      * pages to hand out to the user, but we don't want to wait
0337      * with the mmap_lock held.
0338      * It is possible to drop the mmap_lock here, but then we need
0339      * to return VM_FAULT_NOPAGE because the mappings may have
0340      * hanged.
0341      */
0342     if (spu_acquire(ctx))
0343         goto refault;
0344 
0345     if (ctx->state == SPU_STATE_SAVED) {
0346         mmap_read_unlock(current->mm);
0347         spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
0348         err = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
0349         spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
0350         mmap_read_lock(current->mm);
0351     } else {
0352         area = ctx->spu->problem_phys + ps_offs;
0353         ret = vmf_insert_pfn(vmf->vma, vmf->address,
0354                 (area + offset) >> PAGE_SHIFT);
0355         spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
0356     }
0357 
0358     if (!err)
0359         spu_release(ctx);
0360 
0361 refault:
0362     put_spu_context(ctx);
0363     return ret;
0364 }
0365 
0366 #if SPUFS_MMAP_4K
0367 static vm_fault_t spufs_cntl_mmap_fault(struct vm_fault *vmf)
0368 {
0369     return spufs_ps_fault(vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
0370 }
0371 
0372 static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
0373     .fault = spufs_cntl_mmap_fault,
0374 };
0375 
0376 /*
0377  * mmap support for problem state control area [0x4000 - 0x4fff].
0378  */
0379 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
0380 {
0381     if (!(vma->vm_flags & VM_SHARED))
0382         return -EINVAL;
0383 
0384     vma->vm_flags |= VM_IO | VM_PFNMAP;
0385     vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
0386 
0387     vma->vm_ops = &spufs_cntl_mmap_vmops;
0388     return 0;
0389 }
0390 #else /* SPUFS_MMAP_4K */
0391 #define spufs_cntl_mmap NULL
0392 #endif /* !SPUFS_MMAP_4K */
0393 
0394 static int spufs_cntl_get(void *data, u64 *val)
0395 {
0396     struct spu_context *ctx = data;
0397     int ret;
0398 
0399     ret = spu_acquire(ctx);
0400     if (ret)
0401         return ret;
0402     *val = ctx->ops->status_read(ctx);
0403     spu_release(ctx);
0404 
0405     return 0;
0406 }
0407 
0408 static int spufs_cntl_set(void *data, u64 val)
0409 {
0410     struct spu_context *ctx = data;
0411     int ret;
0412 
0413     ret = spu_acquire(ctx);
0414     if (ret)
0415         return ret;
0416     ctx->ops->runcntl_write(ctx, val);
0417     spu_release(ctx);
0418 
0419     return 0;
0420 }
0421 
0422 static int spufs_cntl_open(struct inode *inode, struct file *file)
0423 {
0424     struct spufs_inode_info *i = SPUFS_I(inode);
0425     struct spu_context *ctx = i->i_ctx;
0426 
0427     mutex_lock(&ctx->mapping_lock);
0428     file->private_data = ctx;
0429     if (!i->i_openers++)
0430         ctx->cntl = inode->i_mapping;
0431     mutex_unlock(&ctx->mapping_lock);
0432     return simple_attr_open(inode, file, spufs_cntl_get,
0433                     spufs_cntl_set, "0x%08lx");
0434 }
0435 
0436 static int
0437 spufs_cntl_release(struct inode *inode, struct file *file)
0438 {
0439     struct spufs_inode_info *i = SPUFS_I(inode);
0440     struct spu_context *ctx = i->i_ctx;
0441 
0442     simple_attr_release(inode, file);
0443 
0444     mutex_lock(&ctx->mapping_lock);
0445     if (!--i->i_openers)
0446         ctx->cntl = NULL;
0447     mutex_unlock(&ctx->mapping_lock);
0448     return 0;
0449 }
0450 
0451 static const struct file_operations spufs_cntl_fops = {
0452     .open = spufs_cntl_open,
0453     .release = spufs_cntl_release,
0454     .read = simple_attr_read,
0455     .write = simple_attr_write,
0456     .llseek = no_llseek,
0457     .mmap = spufs_cntl_mmap,
0458 };
0459 
0460 static int
0461 spufs_regs_open(struct inode *inode, struct file *file)
0462 {
0463     struct spufs_inode_info *i = SPUFS_I(inode);
0464     file->private_data = i->i_ctx;
0465     return 0;
0466 }
0467 
0468 static ssize_t
0469 spufs_regs_dump(struct spu_context *ctx, struct coredump_params *cprm)
0470 {
0471     return spufs_dump_emit(cprm, ctx->csa.lscsa->gprs,
0472                    sizeof(ctx->csa.lscsa->gprs));
0473 }
0474 
0475 static ssize_t
0476 spufs_regs_read(struct file *file, char __user *buffer,
0477         size_t size, loff_t *pos)
0478 {
0479     int ret;
0480     struct spu_context *ctx = file->private_data;
0481 
0482     /* pre-check for file position: if we'd return EOF, there's no point
0483      * causing a deschedule */
0484     if (*pos >= sizeof(ctx->csa.lscsa->gprs))
0485         return 0;
0486 
0487     ret = spu_acquire_saved(ctx);
0488     if (ret)
0489         return ret;
0490     ret = simple_read_from_buffer(buffer, size, pos, ctx->csa.lscsa->gprs,
0491                       sizeof(ctx->csa.lscsa->gprs));
0492     spu_release_saved(ctx);
0493     return ret;
0494 }
0495 
0496 static ssize_t
0497 spufs_regs_write(struct file *file, const char __user *buffer,
0498          size_t size, loff_t *pos)
0499 {
0500     struct spu_context *ctx = file->private_data;
0501     struct spu_lscsa *lscsa = ctx->csa.lscsa;
0502     int ret;
0503 
0504     if (*pos >= sizeof(lscsa->gprs))
0505         return -EFBIG;
0506 
0507     ret = spu_acquire_saved(ctx);
0508     if (ret)
0509         return ret;
0510 
0511     size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos,
0512                     buffer, size);
0513 
0514     spu_release_saved(ctx);
0515     return size;
0516 }
0517 
0518 static const struct file_operations spufs_regs_fops = {
0519     .open    = spufs_regs_open,
0520     .read    = spufs_regs_read,
0521     .write   = spufs_regs_write,
0522     .llseek  = generic_file_llseek,
0523 };
0524 
0525 static ssize_t
0526 spufs_fpcr_dump(struct spu_context *ctx, struct coredump_params *cprm)
0527 {
0528     return spufs_dump_emit(cprm, &ctx->csa.lscsa->fpcr,
0529                    sizeof(ctx->csa.lscsa->fpcr));
0530 }
0531 
0532 static ssize_t
0533 spufs_fpcr_read(struct file *file, char __user * buffer,
0534         size_t size, loff_t * pos)
0535 {
0536     int ret;
0537     struct spu_context *ctx = file->private_data;
0538 
0539     ret = spu_acquire_saved(ctx);
0540     if (ret)
0541         return ret;
0542     ret = simple_read_from_buffer(buffer, size, pos, &ctx->csa.lscsa->fpcr,
0543                       sizeof(ctx->csa.lscsa->fpcr));
0544     spu_release_saved(ctx);
0545     return ret;
0546 }
0547 
0548 static ssize_t
0549 spufs_fpcr_write(struct file *file, const char __user * buffer,
0550          size_t size, loff_t * pos)
0551 {
0552     struct spu_context *ctx = file->private_data;
0553     struct spu_lscsa *lscsa = ctx->csa.lscsa;
0554     int ret;
0555 
0556     if (*pos >= sizeof(lscsa->fpcr))
0557         return -EFBIG;
0558 
0559     ret = spu_acquire_saved(ctx);
0560     if (ret)
0561         return ret;
0562 
0563     size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos,
0564                     buffer, size);
0565 
0566     spu_release_saved(ctx);
0567     return size;
0568 }
0569 
0570 static const struct file_operations spufs_fpcr_fops = {
0571     .open = spufs_regs_open,
0572     .read = spufs_fpcr_read,
0573     .write = spufs_fpcr_write,
0574     .llseek = generic_file_llseek,
0575 };
0576 
0577 /* generic open function for all pipe-like files */
0578 static int spufs_pipe_open(struct inode *inode, struct file *file)
0579 {
0580     struct spufs_inode_info *i = SPUFS_I(inode);
0581     file->private_data = i->i_ctx;
0582 
0583     return stream_open(inode, file);
0584 }
0585 
0586 /*
0587  * Read as many bytes from the mailbox as possible, until
0588  * one of the conditions becomes true:
0589  *
0590  * - no more data available in the mailbox
0591  * - end of the user provided buffer
0592  * - end of the mapped area
0593  */
0594 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
0595             size_t len, loff_t *pos)
0596 {
0597     struct spu_context *ctx = file->private_data;
0598     u32 mbox_data, __user *udata = (void __user *)buf;
0599     ssize_t count;
0600 
0601     if (len < 4)
0602         return -EINVAL;
0603 
0604     count = spu_acquire(ctx);
0605     if (count)
0606         return count;
0607 
0608     for (count = 0; (count + 4) <= len; count += 4, udata++) {
0609         int ret;
0610         ret = ctx->ops->mbox_read(ctx, &mbox_data);
0611         if (ret == 0)
0612             break;
0613 
0614         /*
0615          * at the end of the mapped area, we can fault
0616          * but still need to return the data we have
0617          * read successfully so far.
0618          */
0619         ret = put_user(mbox_data, udata);
0620         if (ret) {
0621             if (!count)
0622                 count = -EFAULT;
0623             break;
0624         }
0625     }
0626     spu_release(ctx);
0627 
0628     if (!count)
0629         count = -EAGAIN;
0630 
0631     return count;
0632 }
0633 
0634 static const struct file_operations spufs_mbox_fops = {
0635     .open   = spufs_pipe_open,
0636     .read   = spufs_mbox_read,
0637     .llseek = no_llseek,
0638 };
0639 
0640 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
0641             size_t len, loff_t *pos)
0642 {
0643     struct spu_context *ctx = file->private_data;
0644     ssize_t ret;
0645     u32 mbox_stat;
0646 
0647     if (len < 4)
0648         return -EINVAL;
0649 
0650     ret = spu_acquire(ctx);
0651     if (ret)
0652         return ret;
0653 
0654     mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
0655 
0656     spu_release(ctx);
0657 
0658     if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
0659         return -EFAULT;
0660 
0661     return 4;
0662 }
0663 
0664 static const struct file_operations spufs_mbox_stat_fops = {
0665     .open   = spufs_pipe_open,
0666     .read   = spufs_mbox_stat_read,
0667     .llseek = no_llseek,
0668 };
0669 
0670 /* low-level ibox access function */
0671 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
0672 {
0673     return ctx->ops->ibox_read(ctx, data);
0674 }
0675 
0676 /* interrupt-level ibox callback function. */
0677 void spufs_ibox_callback(struct spu *spu)
0678 {
0679     struct spu_context *ctx = spu->ctx;
0680 
0681     if (ctx)
0682         wake_up_all(&ctx->ibox_wq);
0683 }
0684 
0685 /*
0686  * Read as many bytes from the interrupt mailbox as possible, until
0687  * one of the conditions becomes true:
0688  *
0689  * - no more data available in the mailbox
0690  * - end of the user provided buffer
0691  * - end of the mapped area
0692  *
0693  * If the file is opened without O_NONBLOCK, we wait here until
0694  * any data is available, but return when we have been able to
0695  * read something.
0696  */
0697 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
0698             size_t len, loff_t *pos)
0699 {
0700     struct spu_context *ctx = file->private_data;
0701     u32 ibox_data, __user *udata = (void __user *)buf;
0702     ssize_t count;
0703 
0704     if (len < 4)
0705         return -EINVAL;
0706 
0707     count = spu_acquire(ctx);
0708     if (count)
0709         goto out;
0710 
0711     /* wait only for the first element */
0712     count = 0;
0713     if (file->f_flags & O_NONBLOCK) {
0714         if (!spu_ibox_read(ctx, &ibox_data)) {
0715             count = -EAGAIN;
0716             goto out_unlock;
0717         }
0718     } else {
0719         count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
0720         if (count)
0721             goto out;
0722     }
0723 
0724     /* if we can't write at all, return -EFAULT */
0725     count = put_user(ibox_data, udata);
0726     if (count)
0727         goto out_unlock;
0728 
0729     for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
0730         int ret;
0731         ret = ctx->ops->ibox_read(ctx, &ibox_data);
0732         if (ret == 0)
0733             break;
0734         /*
0735          * at the end of the mapped area, we can fault
0736          * but still need to return the data we have
0737          * read successfully so far.
0738          */
0739         ret = put_user(ibox_data, udata);
0740         if (ret)
0741             break;
0742     }
0743 
0744 out_unlock:
0745     spu_release(ctx);
0746 out:
0747     return count;
0748 }
0749 
0750 static __poll_t spufs_ibox_poll(struct file *file, poll_table *wait)
0751 {
0752     struct spu_context *ctx = file->private_data;
0753     __poll_t mask;
0754 
0755     poll_wait(file, &ctx->ibox_wq, wait);
0756 
0757     /*
0758      * For now keep this uninterruptible and also ignore the rule
0759      * that poll should not sleep.  Will be fixed later.
0760      */
0761     mutex_lock(&ctx->state_mutex);
0762     mask = ctx->ops->mbox_stat_poll(ctx, EPOLLIN | EPOLLRDNORM);
0763     spu_release(ctx);
0764 
0765     return mask;
0766 }
0767 
0768 static const struct file_operations spufs_ibox_fops = {
0769     .open   = spufs_pipe_open,
0770     .read   = spufs_ibox_read,
0771     .poll   = spufs_ibox_poll,
0772     .llseek = no_llseek,
0773 };
0774 
0775 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
0776             size_t len, loff_t *pos)
0777 {
0778     struct spu_context *ctx = file->private_data;
0779     ssize_t ret;
0780     u32 ibox_stat;
0781 
0782     if (len < 4)
0783         return -EINVAL;
0784 
0785     ret = spu_acquire(ctx);
0786     if (ret)
0787         return ret;
0788     ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
0789     spu_release(ctx);
0790 
0791     if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
0792         return -EFAULT;
0793 
0794     return 4;
0795 }
0796 
0797 static const struct file_operations spufs_ibox_stat_fops = {
0798     .open   = spufs_pipe_open,
0799     .read   = spufs_ibox_stat_read,
0800     .llseek = no_llseek,
0801 };
0802 
0803 /* low-level mailbox write */
0804 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
0805 {
0806     return ctx->ops->wbox_write(ctx, data);
0807 }
0808 
0809 /* interrupt-level wbox callback function. */
0810 void spufs_wbox_callback(struct spu *spu)
0811 {
0812     struct spu_context *ctx = spu->ctx;
0813 
0814     if (ctx)
0815         wake_up_all(&ctx->wbox_wq);
0816 }
0817 
0818 /*
0819  * Write as many bytes to the interrupt mailbox as possible, until
0820  * one of the conditions becomes true:
0821  *
0822  * - the mailbox is full
0823  * - end of the user provided buffer
0824  * - end of the mapped area
0825  *
0826  * If the file is opened without O_NONBLOCK, we wait here until
0827  * space is available, but return when we have been able to
0828  * write something.
0829  */
0830 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
0831             size_t len, loff_t *pos)
0832 {
0833     struct spu_context *ctx = file->private_data;
0834     u32 wbox_data, __user *udata = (void __user *)buf;
0835     ssize_t count;
0836 
0837     if (len < 4)
0838         return -EINVAL;
0839 
0840     if (get_user(wbox_data, udata))
0841         return -EFAULT;
0842 
0843     count = spu_acquire(ctx);
0844     if (count)
0845         goto out;
0846 
0847     /*
0848      * make sure we can at least write one element, by waiting
0849      * in case of !O_NONBLOCK
0850      */
0851     count = 0;
0852     if (file->f_flags & O_NONBLOCK) {
0853         if (!spu_wbox_write(ctx, wbox_data)) {
0854             count = -EAGAIN;
0855             goto out_unlock;
0856         }
0857     } else {
0858         count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
0859         if (count)
0860             goto out;
0861     }
0862 
0863 
0864     /* write as much as possible */
0865     for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
0866         int ret;
0867         ret = get_user(wbox_data, udata);
0868         if (ret)
0869             break;
0870 
0871         ret = spu_wbox_write(ctx, wbox_data);
0872         if (ret == 0)
0873             break;
0874     }
0875 
0876 out_unlock:
0877     spu_release(ctx);
0878 out:
0879     return count;
0880 }
0881 
0882 static __poll_t spufs_wbox_poll(struct file *file, poll_table *wait)
0883 {
0884     struct spu_context *ctx = file->private_data;
0885     __poll_t mask;
0886 
0887     poll_wait(file, &ctx->wbox_wq, wait);
0888 
0889     /*
0890      * For now keep this uninterruptible and also ignore the rule
0891      * that poll should not sleep.  Will be fixed later.
0892      */
0893     mutex_lock(&ctx->state_mutex);
0894     mask = ctx->ops->mbox_stat_poll(ctx, EPOLLOUT | EPOLLWRNORM);
0895     spu_release(ctx);
0896 
0897     return mask;
0898 }
0899 
0900 static const struct file_operations spufs_wbox_fops = {
0901     .open   = spufs_pipe_open,
0902     .write  = spufs_wbox_write,
0903     .poll   = spufs_wbox_poll,
0904     .llseek = no_llseek,
0905 };
0906 
0907 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
0908             size_t len, loff_t *pos)
0909 {
0910     struct spu_context *ctx = file->private_data;
0911     ssize_t ret;
0912     u32 wbox_stat;
0913 
0914     if (len < 4)
0915         return -EINVAL;
0916 
0917     ret = spu_acquire(ctx);
0918     if (ret)
0919         return ret;
0920     wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
0921     spu_release(ctx);
0922 
0923     if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
0924         return -EFAULT;
0925 
0926     return 4;
0927 }
0928 
0929 static const struct file_operations spufs_wbox_stat_fops = {
0930     .open   = spufs_pipe_open,
0931     .read   = spufs_wbox_stat_read,
0932     .llseek = no_llseek,
0933 };
0934 
0935 static int spufs_signal1_open(struct inode *inode, struct file *file)
0936 {
0937     struct spufs_inode_info *i = SPUFS_I(inode);
0938     struct spu_context *ctx = i->i_ctx;
0939 
0940     mutex_lock(&ctx->mapping_lock);
0941     file->private_data = ctx;
0942     if (!i->i_openers++)
0943         ctx->signal1 = inode->i_mapping;
0944     mutex_unlock(&ctx->mapping_lock);
0945     return nonseekable_open(inode, file);
0946 }
0947 
0948 static int
0949 spufs_signal1_release(struct inode *inode, struct file *file)
0950 {
0951     struct spufs_inode_info *i = SPUFS_I(inode);
0952     struct spu_context *ctx = i->i_ctx;
0953 
0954     mutex_lock(&ctx->mapping_lock);
0955     if (!--i->i_openers)
0956         ctx->signal1 = NULL;
0957     mutex_unlock(&ctx->mapping_lock);
0958     return 0;
0959 }
0960 
0961 static ssize_t spufs_signal1_dump(struct spu_context *ctx,
0962         struct coredump_params *cprm)
0963 {
0964     if (!ctx->csa.spu_chnlcnt_RW[3])
0965         return 0;
0966     return spufs_dump_emit(cprm, &ctx->csa.spu_chnldata_RW[3],
0967                    sizeof(ctx->csa.spu_chnldata_RW[3]));
0968 }
0969 
0970 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
0971             size_t len)
0972 {
0973     if (len < sizeof(ctx->csa.spu_chnldata_RW[3]))
0974         return -EINVAL;
0975     if (!ctx->csa.spu_chnlcnt_RW[3])
0976         return 0;
0977     if (copy_to_user(buf, &ctx->csa.spu_chnldata_RW[3],
0978              sizeof(ctx->csa.spu_chnldata_RW[3])))
0979         return -EFAULT;
0980     return sizeof(ctx->csa.spu_chnldata_RW[3]);
0981 }
0982 
0983 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
0984             size_t len, loff_t *pos)
0985 {
0986     int ret;
0987     struct spu_context *ctx = file->private_data;
0988 
0989     ret = spu_acquire_saved(ctx);
0990     if (ret)
0991         return ret;
0992     ret = __spufs_signal1_read(ctx, buf, len);
0993     spu_release_saved(ctx);
0994 
0995     return ret;
0996 }
0997 
0998 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
0999             size_t len, loff_t *pos)
1000 {
1001     struct spu_context *ctx;
1002     ssize_t ret;
1003     u32 data;
1004 
1005     ctx = file->private_data;
1006 
1007     if (len < 4)
1008         return -EINVAL;
1009 
1010     if (copy_from_user(&data, buf, 4))
1011         return -EFAULT;
1012 
1013     ret = spu_acquire(ctx);
1014     if (ret)
1015         return ret;
1016     ctx->ops->signal1_write(ctx, data);
1017     spu_release(ctx);
1018 
1019     return 4;
1020 }
1021 
1022 static vm_fault_t
1023 spufs_signal1_mmap_fault(struct vm_fault *vmf)
1024 {
1025 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1026     return spufs_ps_fault(vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
1027 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1028     /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1029      * signal 1 and 2 area
1030      */
1031     return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
1032 #else
1033 #error unsupported page size
1034 #endif
1035 }
1036 
1037 static const struct vm_operations_struct spufs_signal1_mmap_vmops = {
1038     .fault = spufs_signal1_mmap_fault,
1039 };
1040 
1041 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
1042 {
1043     if (!(vma->vm_flags & VM_SHARED))
1044         return -EINVAL;
1045 
1046     vma->vm_flags |= VM_IO | VM_PFNMAP;
1047     vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1048 
1049     vma->vm_ops = &spufs_signal1_mmap_vmops;
1050     return 0;
1051 }
1052 
1053 static const struct file_operations spufs_signal1_fops = {
1054     .open = spufs_signal1_open,
1055     .release = spufs_signal1_release,
1056     .read = spufs_signal1_read,
1057     .write = spufs_signal1_write,
1058     .mmap = spufs_signal1_mmap,
1059     .llseek = no_llseek,
1060 };
1061 
1062 static const struct file_operations spufs_signal1_nosched_fops = {
1063     .open = spufs_signal1_open,
1064     .release = spufs_signal1_release,
1065     .write = spufs_signal1_write,
1066     .mmap = spufs_signal1_mmap,
1067     .llseek = no_llseek,
1068 };
1069 
1070 static int spufs_signal2_open(struct inode *inode, struct file *file)
1071 {
1072     struct spufs_inode_info *i = SPUFS_I(inode);
1073     struct spu_context *ctx = i->i_ctx;
1074 
1075     mutex_lock(&ctx->mapping_lock);
1076     file->private_data = ctx;
1077     if (!i->i_openers++)
1078         ctx->signal2 = inode->i_mapping;
1079     mutex_unlock(&ctx->mapping_lock);
1080     return nonseekable_open(inode, file);
1081 }
1082 
1083 static int
1084 spufs_signal2_release(struct inode *inode, struct file *file)
1085 {
1086     struct spufs_inode_info *i = SPUFS_I(inode);
1087     struct spu_context *ctx = i->i_ctx;
1088 
1089     mutex_lock(&ctx->mapping_lock);
1090     if (!--i->i_openers)
1091         ctx->signal2 = NULL;
1092     mutex_unlock(&ctx->mapping_lock);
1093     return 0;
1094 }
1095 
1096 static ssize_t spufs_signal2_dump(struct spu_context *ctx,
1097         struct coredump_params *cprm)
1098 {
1099     if (!ctx->csa.spu_chnlcnt_RW[4])
1100         return 0;
1101     return spufs_dump_emit(cprm, &ctx->csa.spu_chnldata_RW[4],
1102                    sizeof(ctx->csa.spu_chnldata_RW[4]));
1103 }
1104 
1105 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
1106             size_t len)
1107 {
1108     if (len < sizeof(ctx->csa.spu_chnldata_RW[4]))
1109         return -EINVAL;
1110     if (!ctx->csa.spu_chnlcnt_RW[4])
1111         return 0;
1112     if (copy_to_user(buf, &ctx->csa.spu_chnldata_RW[4],
1113              sizeof(ctx->csa.spu_chnldata_RW[4])))
1114         return -EFAULT;
1115     return sizeof(ctx->csa.spu_chnldata_RW[4]);
1116 }
1117 
1118 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
1119             size_t len, loff_t *pos)
1120 {
1121     struct spu_context *ctx = file->private_data;
1122     int ret;
1123 
1124     ret = spu_acquire_saved(ctx);
1125     if (ret)
1126         return ret;
1127     ret = __spufs_signal2_read(ctx, buf, len);
1128     spu_release_saved(ctx);
1129 
1130     return ret;
1131 }
1132 
1133 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1134             size_t len, loff_t *pos)
1135 {
1136     struct spu_context *ctx;
1137     ssize_t ret;
1138     u32 data;
1139 
1140     ctx = file->private_data;
1141 
1142     if (len < 4)
1143         return -EINVAL;
1144 
1145     if (copy_from_user(&data, buf, 4))
1146         return -EFAULT;
1147 
1148     ret = spu_acquire(ctx);
1149     if (ret)
1150         return ret;
1151     ctx->ops->signal2_write(ctx, data);
1152     spu_release(ctx);
1153 
1154     return 4;
1155 }
1156 
1157 #if SPUFS_MMAP_4K
1158 static vm_fault_t
1159 spufs_signal2_mmap_fault(struct vm_fault *vmf)
1160 {
1161 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1162     return spufs_ps_fault(vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
1163 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1164     /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1165      * signal 1 and 2 area
1166      */
1167     return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
1168 #else
1169 #error unsupported page size
1170 #endif
1171 }
1172 
1173 static const struct vm_operations_struct spufs_signal2_mmap_vmops = {
1174     .fault = spufs_signal2_mmap_fault,
1175 };
1176 
1177 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1178 {
1179     if (!(vma->vm_flags & VM_SHARED))
1180         return -EINVAL;
1181 
1182     vma->vm_flags |= VM_IO | VM_PFNMAP;
1183     vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1184 
1185     vma->vm_ops = &spufs_signal2_mmap_vmops;
1186     return 0;
1187 }
1188 #else /* SPUFS_MMAP_4K */
1189 #define spufs_signal2_mmap NULL
1190 #endif /* !SPUFS_MMAP_4K */
1191 
1192 static const struct file_operations spufs_signal2_fops = {
1193     .open = spufs_signal2_open,
1194     .release = spufs_signal2_release,
1195     .read = spufs_signal2_read,
1196     .write = spufs_signal2_write,
1197     .mmap = spufs_signal2_mmap,
1198     .llseek = no_llseek,
1199 };
1200 
1201 static const struct file_operations spufs_signal2_nosched_fops = {
1202     .open = spufs_signal2_open,
1203     .release = spufs_signal2_release,
1204     .write = spufs_signal2_write,
1205     .mmap = spufs_signal2_mmap,
1206     .llseek = no_llseek,
1207 };
1208 
1209 /*
1210  * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1211  * work of acquiring (or not) the SPU context before calling through
1212  * to the actual get routine. The set routine is called directly.
1213  */
1214 #define SPU_ATTR_NOACQUIRE  0
1215 #define SPU_ATTR_ACQUIRE    1
1216 #define SPU_ATTR_ACQUIRE_SAVED  2
1217 
1218 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire)  \
1219 static int __##__get(void *data, u64 *val)              \
1220 {                                   \
1221     struct spu_context *ctx = data;                 \
1222     int ret = 0;                            \
1223                                     \
1224     if (__acquire == SPU_ATTR_ACQUIRE) {                \
1225         ret = spu_acquire(ctx);                 \
1226         if (ret)                        \
1227             return ret;                 \
1228         *val = __get(ctx);                  \
1229         spu_release(ctx);                   \
1230     } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) {       \
1231         ret = spu_acquire_saved(ctx);               \
1232         if (ret)                        \
1233             return ret;                 \
1234         *val = __get(ctx);                  \
1235         spu_release_saved(ctx);                 \
1236     } else                              \
1237         *val = __get(ctx);                  \
1238                                     \
1239     return 0;                           \
1240 }                                   \
1241 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1242 
1243 static int spufs_signal1_type_set(void *data, u64 val)
1244 {
1245     struct spu_context *ctx = data;
1246     int ret;
1247 
1248     ret = spu_acquire(ctx);
1249     if (ret)
1250         return ret;
1251     ctx->ops->signal1_type_set(ctx, val);
1252     spu_release(ctx);
1253 
1254     return 0;
1255 }
1256 
1257 static u64 spufs_signal1_type_get(struct spu_context *ctx)
1258 {
1259     return ctx->ops->signal1_type_get(ctx);
1260 }
1261 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1262                spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1263 
1264 
1265 static int spufs_signal2_type_set(void *data, u64 val)
1266 {
1267     struct spu_context *ctx = data;
1268     int ret;
1269 
1270     ret = spu_acquire(ctx);
1271     if (ret)
1272         return ret;
1273     ctx->ops->signal2_type_set(ctx, val);
1274     spu_release(ctx);
1275 
1276     return 0;
1277 }
1278 
1279 static u64 spufs_signal2_type_get(struct spu_context *ctx)
1280 {
1281     return ctx->ops->signal2_type_get(ctx);
1282 }
1283 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1284                spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1285 
1286 #if SPUFS_MMAP_4K
1287 static vm_fault_t
1288 spufs_mss_mmap_fault(struct vm_fault *vmf)
1289 {
1290     return spufs_ps_fault(vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
1291 }
1292 
1293 static const struct vm_operations_struct spufs_mss_mmap_vmops = {
1294     .fault = spufs_mss_mmap_fault,
1295 };
1296 
1297 /*
1298  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1299  */
1300 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1301 {
1302     if (!(vma->vm_flags & VM_SHARED))
1303         return -EINVAL;
1304 
1305     vma->vm_flags |= VM_IO | VM_PFNMAP;
1306     vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1307 
1308     vma->vm_ops = &spufs_mss_mmap_vmops;
1309     return 0;
1310 }
1311 #else /* SPUFS_MMAP_4K */
1312 #define spufs_mss_mmap NULL
1313 #endif /* !SPUFS_MMAP_4K */
1314 
1315 static int spufs_mss_open(struct inode *inode, struct file *file)
1316 {
1317     struct spufs_inode_info *i = SPUFS_I(inode);
1318     struct spu_context *ctx = i->i_ctx;
1319 
1320     file->private_data = i->i_ctx;
1321 
1322     mutex_lock(&ctx->mapping_lock);
1323     if (!i->i_openers++)
1324         ctx->mss = inode->i_mapping;
1325     mutex_unlock(&ctx->mapping_lock);
1326     return nonseekable_open(inode, file);
1327 }
1328 
1329 static int
1330 spufs_mss_release(struct inode *inode, struct file *file)
1331 {
1332     struct spufs_inode_info *i = SPUFS_I(inode);
1333     struct spu_context *ctx = i->i_ctx;
1334 
1335     mutex_lock(&ctx->mapping_lock);
1336     if (!--i->i_openers)
1337         ctx->mss = NULL;
1338     mutex_unlock(&ctx->mapping_lock);
1339     return 0;
1340 }
1341 
1342 static const struct file_operations spufs_mss_fops = {
1343     .open    = spufs_mss_open,
1344     .release = spufs_mss_release,
1345     .mmap    = spufs_mss_mmap,
1346     .llseek  = no_llseek,
1347 };
1348 
1349 static vm_fault_t
1350 spufs_psmap_mmap_fault(struct vm_fault *vmf)
1351 {
1352     return spufs_ps_fault(vmf, 0x0000, SPUFS_PS_MAP_SIZE);
1353 }
1354 
1355 static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
1356     .fault = spufs_psmap_mmap_fault,
1357 };
1358 
1359 /*
1360  * mmap support for full problem state area [0x00000 - 0x1ffff].
1361  */
1362 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1363 {
1364     if (!(vma->vm_flags & VM_SHARED))
1365         return -EINVAL;
1366 
1367     vma->vm_flags |= VM_IO | VM_PFNMAP;
1368     vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1369 
1370     vma->vm_ops = &spufs_psmap_mmap_vmops;
1371     return 0;
1372 }
1373 
1374 static int spufs_psmap_open(struct inode *inode, struct file *file)
1375 {
1376     struct spufs_inode_info *i = SPUFS_I(inode);
1377     struct spu_context *ctx = i->i_ctx;
1378 
1379     mutex_lock(&ctx->mapping_lock);
1380     file->private_data = i->i_ctx;
1381     if (!i->i_openers++)
1382         ctx->psmap = inode->i_mapping;
1383     mutex_unlock(&ctx->mapping_lock);
1384     return nonseekable_open(inode, file);
1385 }
1386 
1387 static int
1388 spufs_psmap_release(struct inode *inode, struct file *file)
1389 {
1390     struct spufs_inode_info *i = SPUFS_I(inode);
1391     struct spu_context *ctx = i->i_ctx;
1392 
1393     mutex_lock(&ctx->mapping_lock);
1394     if (!--i->i_openers)
1395         ctx->psmap = NULL;
1396     mutex_unlock(&ctx->mapping_lock);
1397     return 0;
1398 }
1399 
1400 static const struct file_operations spufs_psmap_fops = {
1401     .open    = spufs_psmap_open,
1402     .release = spufs_psmap_release,
1403     .mmap    = spufs_psmap_mmap,
1404     .llseek  = no_llseek,
1405 };
1406 
1407 
1408 #if SPUFS_MMAP_4K
1409 static vm_fault_t
1410 spufs_mfc_mmap_fault(struct vm_fault *vmf)
1411 {
1412     return spufs_ps_fault(vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
1413 }
1414 
1415 static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
1416     .fault = spufs_mfc_mmap_fault,
1417 };
1418 
1419 /*
1420  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1421  */
1422 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1423 {
1424     if (!(vma->vm_flags & VM_SHARED))
1425         return -EINVAL;
1426 
1427     vma->vm_flags |= VM_IO | VM_PFNMAP;
1428     vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1429 
1430     vma->vm_ops = &spufs_mfc_mmap_vmops;
1431     return 0;
1432 }
1433 #else /* SPUFS_MMAP_4K */
1434 #define spufs_mfc_mmap NULL
1435 #endif /* !SPUFS_MMAP_4K */
1436 
1437 static int spufs_mfc_open(struct inode *inode, struct file *file)
1438 {
1439     struct spufs_inode_info *i = SPUFS_I(inode);
1440     struct spu_context *ctx = i->i_ctx;
1441 
1442     /* we don't want to deal with DMA into other processes */
1443     if (ctx->owner != current->mm)
1444         return -EINVAL;
1445 
1446     if (atomic_read(&inode->i_count) != 1)
1447         return -EBUSY;
1448 
1449     mutex_lock(&ctx->mapping_lock);
1450     file->private_data = ctx;
1451     if (!i->i_openers++)
1452         ctx->mfc = inode->i_mapping;
1453     mutex_unlock(&ctx->mapping_lock);
1454     return nonseekable_open(inode, file);
1455 }
1456 
1457 static int
1458 spufs_mfc_release(struct inode *inode, struct file *file)
1459 {
1460     struct spufs_inode_info *i = SPUFS_I(inode);
1461     struct spu_context *ctx = i->i_ctx;
1462 
1463     mutex_lock(&ctx->mapping_lock);
1464     if (!--i->i_openers)
1465         ctx->mfc = NULL;
1466     mutex_unlock(&ctx->mapping_lock);
1467     return 0;
1468 }
1469 
1470 /* interrupt-level mfc callback function. */
1471 void spufs_mfc_callback(struct spu *spu)
1472 {
1473     struct spu_context *ctx = spu->ctx;
1474 
1475     if (ctx)
1476         wake_up_all(&ctx->mfc_wq);
1477 }
1478 
1479 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1480 {
1481     /* See if there is one tag group is complete */
1482     /* FIXME we need locking around tagwait */
1483     *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1484     ctx->tagwait &= ~*status;
1485     if (*status)
1486         return 1;
1487 
1488     /* enable interrupt waiting for any tag group,
1489        may silently fail if interrupts are already enabled */
1490     ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1491     return 0;
1492 }
1493 
1494 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1495             size_t size, loff_t *pos)
1496 {
1497     struct spu_context *ctx = file->private_data;
1498     int ret = -EINVAL;
1499     u32 status;
1500 
1501     if (size != 4)
1502         goto out;
1503 
1504     ret = spu_acquire(ctx);
1505     if (ret)
1506         return ret;
1507 
1508     ret = -EINVAL;
1509     if (file->f_flags & O_NONBLOCK) {
1510         status = ctx->ops->read_mfc_tagstatus(ctx);
1511         if (!(status & ctx->tagwait))
1512             ret = -EAGAIN;
1513         else
1514             /* XXX(hch): shouldn't we clear ret here? */
1515             ctx->tagwait &= ~status;
1516     } else {
1517         ret = spufs_wait(ctx->mfc_wq,
1518                spufs_read_mfc_tagstatus(ctx, &status));
1519         if (ret)
1520             goto out;
1521     }
1522     spu_release(ctx);
1523 
1524     ret = 4;
1525     if (copy_to_user(buffer, &status, 4))
1526         ret = -EFAULT;
1527 
1528 out:
1529     return ret;
1530 }
1531 
1532 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1533 {
1534     pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa,
1535          cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1536 
1537     switch (cmd->cmd) {
1538     case MFC_PUT_CMD:
1539     case MFC_PUTF_CMD:
1540     case MFC_PUTB_CMD:
1541     case MFC_GET_CMD:
1542     case MFC_GETF_CMD:
1543     case MFC_GETB_CMD:
1544         break;
1545     default:
1546         pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1547         return -EIO;
1548     }
1549 
1550     if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1551         pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
1552                 cmd->ea, cmd->lsa);
1553         return -EIO;
1554     }
1555 
1556     switch (cmd->size & 0xf) {
1557     case 1:
1558         break;
1559     case 2:
1560         if (cmd->lsa & 1)
1561             goto error;
1562         break;
1563     case 4:
1564         if (cmd->lsa & 3)
1565             goto error;
1566         break;
1567     case 8:
1568         if (cmd->lsa & 7)
1569             goto error;
1570         break;
1571     case 0:
1572         if (cmd->lsa & 15)
1573             goto error;
1574         break;
1575     error:
1576     default:
1577         pr_debug("invalid DMA alignment %x for size %x\n",
1578             cmd->lsa & 0xf, cmd->size);
1579         return -EIO;
1580     }
1581 
1582     if (cmd->size > 16 * 1024) {
1583         pr_debug("invalid DMA size %x\n", cmd->size);
1584         return -EIO;
1585     }
1586 
1587     if (cmd->tag & 0xfff0) {
1588         /* we reserve the higher tag numbers for kernel use */
1589         pr_debug("invalid DMA tag\n");
1590         return -EIO;
1591     }
1592 
1593     if (cmd->class) {
1594         /* not supported in this version */
1595         pr_debug("invalid DMA class\n");
1596         return -EIO;
1597     }
1598 
1599     return 0;
1600 }
1601 
1602 static int spu_send_mfc_command(struct spu_context *ctx,
1603                 struct mfc_dma_command cmd,
1604                 int *error)
1605 {
1606     *error = ctx->ops->send_mfc_command(ctx, &cmd);
1607     if (*error == -EAGAIN) {
1608         /* wait for any tag group to complete
1609            so we have space for the new command */
1610         ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1611         /* try again, because the queue might be
1612            empty again */
1613         *error = ctx->ops->send_mfc_command(ctx, &cmd);
1614         if (*error == -EAGAIN)
1615             return 0;
1616     }
1617     return 1;
1618 }
1619 
1620 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1621             size_t size, loff_t *pos)
1622 {
1623     struct spu_context *ctx = file->private_data;
1624     struct mfc_dma_command cmd;
1625     int ret = -EINVAL;
1626 
1627     if (size != sizeof cmd)
1628         goto out;
1629 
1630     ret = -EFAULT;
1631     if (copy_from_user(&cmd, buffer, sizeof cmd))
1632         goto out;
1633 
1634     ret = spufs_check_valid_dma(&cmd);
1635     if (ret)
1636         goto out;
1637 
1638     ret = spu_acquire(ctx);
1639     if (ret)
1640         goto out;
1641 
1642     ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
1643     if (ret)
1644         goto out;
1645 
1646     if (file->f_flags & O_NONBLOCK) {
1647         ret = ctx->ops->send_mfc_command(ctx, &cmd);
1648     } else {
1649         int status;
1650         ret = spufs_wait(ctx->mfc_wq,
1651                  spu_send_mfc_command(ctx, cmd, &status));
1652         if (ret)
1653             goto out;
1654         if (status)
1655             ret = status;
1656     }
1657 
1658     if (ret)
1659         goto out_unlock;
1660 
1661     ctx->tagwait |= 1 << cmd.tag;
1662     ret = size;
1663 
1664 out_unlock:
1665     spu_release(ctx);
1666 out:
1667     return ret;
1668 }
1669 
1670 static __poll_t spufs_mfc_poll(struct file *file,poll_table *wait)
1671 {
1672     struct spu_context *ctx = file->private_data;
1673     u32 free_elements, tagstatus;
1674     __poll_t mask;
1675 
1676     poll_wait(file, &ctx->mfc_wq, wait);
1677 
1678     /*
1679      * For now keep this uninterruptible and also ignore the rule
1680      * that poll should not sleep.  Will be fixed later.
1681      */
1682     mutex_lock(&ctx->state_mutex);
1683     ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1684     free_elements = ctx->ops->get_mfc_free_elements(ctx);
1685     tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1686     spu_release(ctx);
1687 
1688     mask = 0;
1689     if (free_elements & 0xffff)
1690         mask |= EPOLLOUT | EPOLLWRNORM;
1691     if (tagstatus & ctx->tagwait)
1692         mask |= EPOLLIN | EPOLLRDNORM;
1693 
1694     pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__,
1695         free_elements, tagstatus, ctx->tagwait);
1696 
1697     return mask;
1698 }
1699 
1700 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1701 {
1702     struct spu_context *ctx = file->private_data;
1703     int ret;
1704 
1705     ret = spu_acquire(ctx);
1706     if (ret)
1707         goto out;
1708 #if 0
1709 /* this currently hangs */
1710     ret = spufs_wait(ctx->mfc_wq,
1711              ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1712     if (ret)
1713         goto out;
1714     ret = spufs_wait(ctx->mfc_wq,
1715              ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1716     if (ret)
1717         goto out;
1718 #else
1719     ret = 0;
1720 #endif
1721     spu_release(ctx);
1722 out:
1723     return ret;
1724 }
1725 
1726 static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1727 {
1728     struct inode *inode = file_inode(file);
1729     int err = file_write_and_wait_range(file, start, end);
1730     if (!err) {
1731         inode_lock(inode);
1732         err = spufs_mfc_flush(file, NULL);
1733         inode_unlock(inode);
1734     }
1735     return err;
1736 }
1737 
1738 static const struct file_operations spufs_mfc_fops = {
1739     .open    = spufs_mfc_open,
1740     .release = spufs_mfc_release,
1741     .read    = spufs_mfc_read,
1742     .write   = spufs_mfc_write,
1743     .poll    = spufs_mfc_poll,
1744     .flush   = spufs_mfc_flush,
1745     .fsync   = spufs_mfc_fsync,
1746     .mmap    = spufs_mfc_mmap,
1747     .llseek  = no_llseek,
1748 };
1749 
1750 static int spufs_npc_set(void *data, u64 val)
1751 {
1752     struct spu_context *ctx = data;
1753     int ret;
1754 
1755     ret = spu_acquire(ctx);
1756     if (ret)
1757         return ret;
1758     ctx->ops->npc_write(ctx, val);
1759     spu_release(ctx);
1760 
1761     return 0;
1762 }
1763 
1764 static u64 spufs_npc_get(struct spu_context *ctx)
1765 {
1766     return ctx->ops->npc_read(ctx);
1767 }
1768 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1769                "0x%llx\n", SPU_ATTR_ACQUIRE);
1770 
1771 static int spufs_decr_set(void *data, u64 val)
1772 {
1773     struct spu_context *ctx = data;
1774     struct spu_lscsa *lscsa = ctx->csa.lscsa;
1775     int ret;
1776 
1777     ret = spu_acquire_saved(ctx);
1778     if (ret)
1779         return ret;
1780     lscsa->decr.slot[0] = (u32) val;
1781     spu_release_saved(ctx);
1782 
1783     return 0;
1784 }
1785 
1786 static u64 spufs_decr_get(struct spu_context *ctx)
1787 {
1788     struct spu_lscsa *lscsa = ctx->csa.lscsa;
1789     return lscsa->decr.slot[0];
1790 }
1791 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1792                "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
1793 
1794 static int spufs_decr_status_set(void *data, u64 val)
1795 {
1796     struct spu_context *ctx = data;
1797     int ret;
1798 
1799     ret = spu_acquire_saved(ctx);
1800     if (ret)
1801         return ret;
1802     if (val)
1803         ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1804     else
1805         ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
1806     spu_release_saved(ctx);
1807 
1808     return 0;
1809 }
1810 
1811 static u64 spufs_decr_status_get(struct spu_context *ctx)
1812 {
1813     if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1814         return SPU_DECR_STATUS_RUNNING;
1815     else
1816         return 0;
1817 }
1818 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1819                spufs_decr_status_set, "0x%llx\n",
1820                SPU_ATTR_ACQUIRE_SAVED);
1821 
1822 static int spufs_event_mask_set(void *data, u64 val)
1823 {
1824     struct spu_context *ctx = data;
1825     struct spu_lscsa *lscsa = ctx->csa.lscsa;
1826     int ret;
1827 
1828     ret = spu_acquire_saved(ctx);
1829     if (ret)
1830         return ret;
1831     lscsa->event_mask.slot[0] = (u32) val;
1832     spu_release_saved(ctx);
1833 
1834     return 0;
1835 }
1836 
1837 static u64 spufs_event_mask_get(struct spu_context *ctx)
1838 {
1839     struct spu_lscsa *lscsa = ctx->csa.lscsa;
1840     return lscsa->event_mask.slot[0];
1841 }
1842 
1843 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1844                spufs_event_mask_set, "0x%llx\n",
1845                SPU_ATTR_ACQUIRE_SAVED);
1846 
1847 static u64 spufs_event_status_get(struct spu_context *ctx)
1848 {
1849     struct spu_state *state = &ctx->csa;
1850     u64 stat;
1851     stat = state->spu_chnlcnt_RW[0];
1852     if (stat)
1853         return state->spu_chnldata_RW[0];
1854     return 0;
1855 }
1856 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1857                NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1858 
1859 static int spufs_srr0_set(void *data, u64 val)
1860 {
1861     struct spu_context *ctx = data;
1862     struct spu_lscsa *lscsa = ctx->csa.lscsa;
1863     int ret;
1864 
1865     ret = spu_acquire_saved(ctx);
1866     if (ret)
1867         return ret;
1868     lscsa->srr0.slot[0] = (u32) val;
1869     spu_release_saved(ctx);
1870 
1871     return 0;
1872 }
1873 
1874 static u64 spufs_srr0_get(struct spu_context *ctx)
1875 {
1876     struct spu_lscsa *lscsa = ctx->csa.lscsa;
1877     return lscsa->srr0.slot[0];
1878 }
1879 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1880                "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1881 
1882 static u64 spufs_id_get(struct spu_context *ctx)
1883 {
1884     u64 num;
1885 
1886     if (ctx->state == SPU_STATE_RUNNABLE)
1887         num = ctx->spu->number;
1888     else
1889         num = (unsigned int)-1;
1890 
1891     return num;
1892 }
1893 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
1894                SPU_ATTR_ACQUIRE)
1895 
1896 static u64 spufs_object_id_get(struct spu_context *ctx)
1897 {
1898     /* FIXME: Should there really be no locking here? */
1899     return ctx->object_id;
1900 }
1901 
1902 static int spufs_object_id_set(void *data, u64 id)
1903 {
1904     struct spu_context *ctx = data;
1905     ctx->object_id = id;
1906 
1907     return 0;
1908 }
1909 
1910 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1911                spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
1912 
1913 static u64 spufs_lslr_get(struct spu_context *ctx)
1914 {
1915     return ctx->csa.priv2.spu_lslr_RW;
1916 }
1917 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
1918                SPU_ATTR_ACQUIRE_SAVED);
1919 
1920 static int spufs_info_open(struct inode *inode, struct file *file)
1921 {
1922     struct spufs_inode_info *i = SPUFS_I(inode);
1923     struct spu_context *ctx = i->i_ctx;
1924     file->private_data = ctx;
1925     return 0;
1926 }
1927 
1928 static int spufs_caps_show(struct seq_file *s, void *private)
1929 {
1930     struct spu_context *ctx = s->private;
1931 
1932     if (!(ctx->flags & SPU_CREATE_NOSCHED))
1933         seq_puts(s, "sched\n");
1934     if (!(ctx->flags & SPU_CREATE_ISOLATE))
1935         seq_puts(s, "step\n");
1936     return 0;
1937 }
1938 
1939 static int spufs_caps_open(struct inode *inode, struct file *file)
1940 {
1941     return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
1942 }
1943 
1944 static const struct file_operations spufs_caps_fops = {
1945     .open       = spufs_caps_open,
1946     .read       = seq_read,
1947     .llseek     = seq_lseek,
1948     .release    = single_release,
1949 };
1950 
1951 static ssize_t spufs_mbox_info_dump(struct spu_context *ctx,
1952         struct coredump_params *cprm)
1953 {
1954     if (!(ctx->csa.prob.mb_stat_R & 0x0000ff))
1955         return 0;
1956     return spufs_dump_emit(cprm, &ctx->csa.prob.pu_mb_R,
1957                    sizeof(ctx->csa.prob.pu_mb_R));
1958 }
1959 
1960 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1961                    size_t len, loff_t *pos)
1962 {
1963     struct spu_context *ctx = file->private_data;
1964     u32 stat, data;
1965     int ret;
1966 
1967     ret = spu_acquire_saved(ctx);
1968     if (ret)
1969         return ret;
1970     spin_lock(&ctx->csa.register_lock);
1971     stat = ctx->csa.prob.mb_stat_R;
1972     data = ctx->csa.prob.pu_mb_R;
1973     spin_unlock(&ctx->csa.register_lock);
1974     spu_release_saved(ctx);
1975 
1976     /* EOF if there's no entry in the mbox */
1977     if (!(stat & 0x0000ff))
1978         return 0;
1979 
1980     return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
1981 }
1982 
1983 static const struct file_operations spufs_mbox_info_fops = {
1984     .open = spufs_info_open,
1985     .read = spufs_mbox_info_read,
1986     .llseek  = generic_file_llseek,
1987 };
1988 
1989 static ssize_t spufs_ibox_info_dump(struct spu_context *ctx,
1990         struct coredump_params *cprm)
1991 {
1992     if (!(ctx->csa.prob.mb_stat_R & 0xff0000))
1993         return 0;
1994     return spufs_dump_emit(cprm, &ctx->csa.priv2.puint_mb_R,
1995                    sizeof(ctx->csa.priv2.puint_mb_R));
1996 }
1997 
1998 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1999                    size_t len, loff_t *pos)
2000 {
2001     struct spu_context *ctx = file->private_data;
2002     u32 stat, data;
2003     int ret;
2004 
2005     ret = spu_acquire_saved(ctx);
2006     if (ret)
2007         return ret;
2008     spin_lock(&ctx->csa.register_lock);
2009     stat = ctx->csa.prob.mb_stat_R;
2010     data = ctx->csa.priv2.puint_mb_R;
2011     spin_unlock(&ctx->csa.register_lock);
2012     spu_release_saved(ctx);
2013 
2014     /* EOF if there's no entry in the ibox */
2015     if (!(stat & 0xff0000))
2016         return 0;
2017 
2018     return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
2019 }
2020 
2021 static const struct file_operations spufs_ibox_info_fops = {
2022     .open = spufs_info_open,
2023     .read = spufs_ibox_info_read,
2024     .llseek  = generic_file_llseek,
2025 };
2026 
2027 static size_t spufs_wbox_info_cnt(struct spu_context *ctx)
2028 {
2029     return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32);
2030 }
2031 
2032 static ssize_t spufs_wbox_info_dump(struct spu_context *ctx,
2033         struct coredump_params *cprm)
2034 {
2035     return spufs_dump_emit(cprm, &ctx->csa.spu_mailbox_data,
2036             spufs_wbox_info_cnt(ctx));
2037 }
2038 
2039 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
2040                    size_t len, loff_t *pos)
2041 {
2042     struct spu_context *ctx = file->private_data;
2043     u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)];
2044     int ret, count;
2045 
2046     ret = spu_acquire_saved(ctx);
2047     if (ret)
2048         return ret;
2049     spin_lock(&ctx->csa.register_lock);
2050     count = spufs_wbox_info_cnt(ctx);
2051     memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data));
2052     spin_unlock(&ctx->csa.register_lock);
2053     spu_release_saved(ctx);
2054 
2055     return simple_read_from_buffer(buf, len, pos, &data,
2056                 count * sizeof(u32));
2057 }
2058 
2059 static const struct file_operations spufs_wbox_info_fops = {
2060     .open = spufs_info_open,
2061     .read = spufs_wbox_info_read,
2062     .llseek  = generic_file_llseek,
2063 };
2064 
2065 static void spufs_get_dma_info(struct spu_context *ctx,
2066         struct spu_dma_info *info)
2067 {
2068     int i;
2069 
2070     info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
2071     info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
2072     info->dma_info_status = ctx->csa.spu_chnldata_RW[24];
2073     info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
2074     info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
2075     for (i = 0; i < 16; i++) {
2076         struct mfc_cq_sr *qp = &info->dma_info_command_data[i];
2077         struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i];
2078 
2079         qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
2080         qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
2081         qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
2082         qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
2083     }
2084 }
2085 
2086 static ssize_t spufs_dma_info_dump(struct spu_context *ctx,
2087         struct coredump_params *cprm)
2088 {
2089     struct spu_dma_info info;
2090 
2091     spufs_get_dma_info(ctx, &info);
2092     return spufs_dump_emit(cprm, &info, sizeof(info));
2093 }
2094 
2095 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
2096                   size_t len, loff_t *pos)
2097 {
2098     struct spu_context *ctx = file->private_data;
2099     struct spu_dma_info info;
2100     int ret;
2101 
2102     ret = spu_acquire_saved(ctx);
2103     if (ret)
2104         return ret;
2105     spin_lock(&ctx->csa.register_lock);
2106     spufs_get_dma_info(ctx, &info);
2107     spin_unlock(&ctx->csa.register_lock);
2108     spu_release_saved(ctx);
2109 
2110     return simple_read_from_buffer(buf, len, pos, &info,
2111                 sizeof(info));
2112 }
2113 
2114 static const struct file_operations spufs_dma_info_fops = {
2115     .open = spufs_info_open,
2116     .read = spufs_dma_info_read,
2117     .llseek = no_llseek,
2118 };
2119 
2120 static void spufs_get_proxydma_info(struct spu_context *ctx,
2121         struct spu_proxydma_info *info)
2122 {
2123     int i;
2124 
2125     info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
2126     info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
2127     info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
2128 
2129     for (i = 0; i < 8; i++) {
2130         struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i];
2131         struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i];
2132 
2133         qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
2134         qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
2135         qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
2136         qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
2137     }
2138 }
2139 
2140 static ssize_t spufs_proxydma_info_dump(struct spu_context *ctx,
2141         struct coredump_params *cprm)
2142 {
2143     struct spu_proxydma_info info;
2144 
2145     spufs_get_proxydma_info(ctx, &info);
2146     return spufs_dump_emit(cprm, &info, sizeof(info));
2147 }
2148 
2149 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
2150                    size_t len, loff_t *pos)
2151 {
2152     struct spu_context *ctx = file->private_data;
2153     struct spu_proxydma_info info;
2154     int ret;
2155 
2156     if (len < sizeof(info))
2157         return -EINVAL;
2158 
2159     ret = spu_acquire_saved(ctx);
2160     if (ret)
2161         return ret;
2162     spin_lock(&ctx->csa.register_lock);
2163     spufs_get_proxydma_info(ctx, &info);
2164     spin_unlock(&ctx->csa.register_lock);
2165     spu_release_saved(ctx);
2166 
2167     return simple_read_from_buffer(buf, len, pos, &info,
2168                 sizeof(info));
2169 }
2170 
2171 static const struct file_operations spufs_proxydma_info_fops = {
2172     .open = spufs_info_open,
2173     .read = spufs_proxydma_info_read,
2174     .llseek = no_llseek,
2175 };
2176 
2177 static int spufs_show_tid(struct seq_file *s, void *private)
2178 {
2179     struct spu_context *ctx = s->private;
2180 
2181     seq_printf(s, "%d\n", ctx->tid);
2182     return 0;
2183 }
2184 
2185 static int spufs_tid_open(struct inode *inode, struct file *file)
2186 {
2187     return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
2188 }
2189 
2190 static const struct file_operations spufs_tid_fops = {
2191     .open       = spufs_tid_open,
2192     .read       = seq_read,
2193     .llseek     = seq_lseek,
2194     .release    = single_release,
2195 };
2196 
2197 static const char *ctx_state_names[] = {
2198     "user", "system", "iowait", "loaded"
2199 };
2200 
2201 static unsigned long long spufs_acct_time(struct spu_context *ctx,
2202         enum spu_utilization_state state)
2203 {
2204     unsigned long long time = ctx->stats.times[state];
2205 
2206     /*
2207      * In general, utilization statistics are updated by the controlling
2208      * thread as the spu context moves through various well defined
2209      * state transitions, but if the context is lazily loaded its
2210      * utilization statistics are not updated as the controlling thread
2211      * is not tightly coupled with the execution of the spu context.  We
2212      * calculate and apply the time delta from the last recorded state
2213      * of the spu context.
2214      */
2215     if (ctx->spu && ctx->stats.util_state == state) {
2216         time += ktime_get_ns() - ctx->stats.tstamp;
2217     }
2218 
2219     return time / NSEC_PER_MSEC;
2220 }
2221 
2222 static unsigned long long spufs_slb_flts(struct spu_context *ctx)
2223 {
2224     unsigned long long slb_flts = ctx->stats.slb_flt;
2225 
2226     if (ctx->state == SPU_STATE_RUNNABLE) {
2227         slb_flts += (ctx->spu->stats.slb_flt -
2228                  ctx->stats.slb_flt_base);
2229     }
2230 
2231     return slb_flts;
2232 }
2233 
2234 static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
2235 {
2236     unsigned long long class2_intrs = ctx->stats.class2_intr;
2237 
2238     if (ctx->state == SPU_STATE_RUNNABLE) {
2239         class2_intrs += (ctx->spu->stats.class2_intr -
2240                  ctx->stats.class2_intr_base);
2241     }
2242 
2243     return class2_intrs;
2244 }
2245 
2246 
2247 static int spufs_show_stat(struct seq_file *s, void *private)
2248 {
2249     struct spu_context *ctx = s->private;
2250     int ret;
2251 
2252     ret = spu_acquire(ctx);
2253     if (ret)
2254         return ret;
2255 
2256     seq_printf(s, "%s %llu %llu %llu %llu "
2257               "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2258         ctx_state_names[ctx->stats.util_state],
2259         spufs_acct_time(ctx, SPU_UTIL_USER),
2260         spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2261         spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2262         spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
2263         ctx->stats.vol_ctx_switch,
2264         ctx->stats.invol_ctx_switch,
2265         spufs_slb_flts(ctx),
2266         ctx->stats.hash_flt,
2267         ctx->stats.min_flt,
2268         ctx->stats.maj_flt,
2269         spufs_class2_intrs(ctx),
2270         ctx->stats.libassist);
2271     spu_release(ctx);
2272     return 0;
2273 }
2274 
2275 static int spufs_stat_open(struct inode *inode, struct file *file)
2276 {
2277     return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
2278 }
2279 
2280 static const struct file_operations spufs_stat_fops = {
2281     .open       = spufs_stat_open,
2282     .read       = seq_read,
2283     .llseek     = seq_lseek,
2284     .release    = single_release,
2285 };
2286 
2287 static inline int spufs_switch_log_used(struct spu_context *ctx)
2288 {
2289     return (ctx->switch_log->head - ctx->switch_log->tail) %
2290         SWITCH_LOG_BUFSIZE;
2291 }
2292 
2293 static inline int spufs_switch_log_avail(struct spu_context *ctx)
2294 {
2295     return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
2296 }
2297 
2298 static int spufs_switch_log_open(struct inode *inode, struct file *file)
2299 {
2300     struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2301     int rc;
2302 
2303     rc = spu_acquire(ctx);
2304     if (rc)
2305         return rc;
2306 
2307     if (ctx->switch_log) {
2308         rc = -EBUSY;
2309         goto out;
2310     }
2311 
2312     ctx->switch_log = kmalloc(struct_size(ctx->switch_log, log,
2313                   SWITCH_LOG_BUFSIZE), GFP_KERNEL);
2314 
2315     if (!ctx->switch_log) {
2316         rc = -ENOMEM;
2317         goto out;
2318     }
2319 
2320     ctx->switch_log->head = ctx->switch_log->tail = 0;
2321     init_waitqueue_head(&ctx->switch_log->wait);
2322     rc = 0;
2323 
2324 out:
2325     spu_release(ctx);
2326     return rc;
2327 }
2328 
2329 static int spufs_switch_log_release(struct inode *inode, struct file *file)
2330 {
2331     struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2332     int rc;
2333 
2334     rc = spu_acquire(ctx);
2335     if (rc)
2336         return rc;
2337 
2338     kfree(ctx->switch_log);
2339     ctx->switch_log = NULL;
2340     spu_release(ctx);
2341 
2342     return 0;
2343 }
2344 
2345 static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
2346 {
2347     struct switch_log_entry *p;
2348 
2349     p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
2350 
2351     return snprintf(tbuf, n, "%llu.%09u %d %u %u %llu\n",
2352             (unsigned long long) p->tstamp.tv_sec,
2353             (unsigned int) p->tstamp.tv_nsec,
2354             p->spu_id,
2355             (unsigned int) p->type,
2356             (unsigned int) p->val,
2357             (unsigned long long) p->timebase);
2358 }
2359 
2360 static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
2361                  size_t len, loff_t *ppos)
2362 {
2363     struct inode *inode = file_inode(file);
2364     struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2365     int error = 0, cnt = 0;
2366 
2367     if (!buf)
2368         return -EINVAL;
2369 
2370     error = spu_acquire(ctx);
2371     if (error)
2372         return error;
2373 
2374     while (cnt < len) {
2375         char tbuf[128];
2376         int width;
2377 
2378         if (spufs_switch_log_used(ctx) == 0) {
2379             if (cnt > 0) {
2380                 /* If there's data ready to go, we can
2381                  * just return straight away */
2382                 break;
2383 
2384             } else if (file->f_flags & O_NONBLOCK) {
2385                 error = -EAGAIN;
2386                 break;
2387 
2388             } else {
2389                 /* spufs_wait will drop the mutex and
2390                  * re-acquire, but since we're in read(), the
2391                  * file cannot be _released (and so
2392                  * ctx->switch_log is stable).
2393                  */
2394                 error = spufs_wait(ctx->switch_log->wait,
2395                         spufs_switch_log_used(ctx) > 0);
2396 
2397                 /* On error, spufs_wait returns without the
2398                  * state mutex held */
2399                 if (error)
2400                     return error;
2401 
2402                 /* We may have had entries read from underneath
2403                  * us while we dropped the mutex in spufs_wait,
2404                  * so re-check */
2405                 if (spufs_switch_log_used(ctx) == 0)
2406                     continue;
2407             }
2408         }
2409 
2410         width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
2411         if (width < len)
2412             ctx->switch_log->tail =
2413                 (ctx->switch_log->tail + 1) %
2414                  SWITCH_LOG_BUFSIZE;
2415         else
2416             /* If the record is greater than space available return
2417              * partial buffer (so far) */
2418             break;
2419 
2420         error = copy_to_user(buf + cnt, tbuf, width);
2421         if (error)
2422             break;
2423         cnt += width;
2424     }
2425 
2426     spu_release(ctx);
2427 
2428     return cnt == 0 ? error : cnt;
2429 }
2430 
2431 static __poll_t spufs_switch_log_poll(struct file *file, poll_table *wait)
2432 {
2433     struct inode *inode = file_inode(file);
2434     struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2435     __poll_t mask = 0;
2436     int rc;
2437 
2438     poll_wait(file, &ctx->switch_log->wait, wait);
2439 
2440     rc = spu_acquire(ctx);
2441     if (rc)
2442         return rc;
2443 
2444     if (spufs_switch_log_used(ctx) > 0)
2445         mask |= EPOLLIN;
2446 
2447     spu_release(ctx);
2448 
2449     return mask;
2450 }
2451 
2452 static const struct file_operations spufs_switch_log_fops = {
2453     .open       = spufs_switch_log_open,
2454     .read       = spufs_switch_log_read,
2455     .poll       = spufs_switch_log_poll,
2456     .release    = spufs_switch_log_release,
2457     .llseek     = no_llseek,
2458 };
2459 
2460 /**
2461  * Log a context switch event to a switch log reader.
2462  *
2463  * Must be called with ctx->state_mutex held.
2464  */
2465 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
2466         u32 type, u32 val)
2467 {
2468     if (!ctx->switch_log)
2469         return;
2470 
2471     if (spufs_switch_log_avail(ctx) > 1) {
2472         struct switch_log_entry *p;
2473 
2474         p = ctx->switch_log->log + ctx->switch_log->head;
2475         ktime_get_ts64(&p->tstamp);
2476         p->timebase = get_tb();
2477         p->spu_id = spu ? spu->number : -1;
2478         p->type = type;
2479         p->val = val;
2480 
2481         ctx->switch_log->head =
2482             (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
2483     }
2484 
2485     wake_up(&ctx->switch_log->wait);
2486 }
2487 
2488 static int spufs_show_ctx(struct seq_file *s, void *private)
2489 {
2490     struct spu_context *ctx = s->private;
2491     u64 mfc_control_RW;
2492 
2493     mutex_lock(&ctx->state_mutex);
2494     if (ctx->spu) {
2495         struct spu *spu = ctx->spu;
2496         struct spu_priv2 __iomem *priv2 = spu->priv2;
2497 
2498         spin_lock_irq(&spu->register_lock);
2499         mfc_control_RW = in_be64(&priv2->mfc_control_RW);
2500         spin_unlock_irq(&spu->register_lock);
2501     } else {
2502         struct spu_state *csa = &ctx->csa;
2503 
2504         mfc_control_RW = csa->priv2.mfc_control_RW;
2505     }
2506 
2507     seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
2508         " %c %llx %llx %llx %llx %x %x\n",
2509         ctx->state == SPU_STATE_SAVED ? 'S' : 'R',
2510         ctx->flags,
2511         ctx->sched_flags,
2512         ctx->prio,
2513         ctx->time_slice,
2514         ctx->spu ? ctx->spu->number : -1,
2515         !list_empty(&ctx->rq) ? 'q' : ' ',
2516         ctx->csa.class_0_pending,
2517         ctx->csa.class_0_dar,
2518         ctx->csa.class_1_dsisr,
2519         mfc_control_RW,
2520         ctx->ops->runcntl_read(ctx),
2521         ctx->ops->status_read(ctx));
2522 
2523     mutex_unlock(&ctx->state_mutex);
2524 
2525     return 0;
2526 }
2527 
2528 static int spufs_ctx_open(struct inode *inode, struct file *file)
2529 {
2530     return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx);
2531 }
2532 
2533 static const struct file_operations spufs_ctx_fops = {
2534     .open           = spufs_ctx_open,
2535     .read           = seq_read,
2536     .llseek         = seq_lseek,
2537     .release        = single_release,
2538 };
2539 
2540 const struct spufs_tree_descr spufs_dir_contents[] = {
2541     { "capabilities", &spufs_caps_fops, 0444, },
2542     { "mem",  &spufs_mem_fops,  0666, LS_SIZE, },
2543     { "regs", &spufs_regs_fops,  0666, sizeof(struct spu_reg128[128]), },
2544     { "mbox", &spufs_mbox_fops, 0444, },
2545     { "ibox", &spufs_ibox_fops, 0444, },
2546     { "wbox", &spufs_wbox_fops, 0222, },
2547     { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2548     { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2549     { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
2550     { "signal1", &spufs_signal1_fops, 0666, },
2551     { "signal2", &spufs_signal2_fops, 0666, },
2552     { "signal1_type", &spufs_signal1_type, 0666, },
2553     { "signal2_type", &spufs_signal2_type, 0666, },
2554     { "cntl", &spufs_cntl_fops,  0666, },
2555     { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), },
2556     { "lslr", &spufs_lslr_ops, 0444, },
2557     { "mfc", &spufs_mfc_fops, 0666, },
2558     { "mss", &spufs_mss_fops, 0666, },
2559     { "npc", &spufs_npc_ops, 0666, },
2560     { "srr0", &spufs_srr0_ops, 0666, },
2561     { "decr", &spufs_decr_ops, 0666, },
2562     { "decr_status", &spufs_decr_status_ops, 0666, },
2563     { "event_mask", &spufs_event_mask_ops, 0666, },
2564     { "event_status", &spufs_event_status_ops, 0444, },
2565     { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
2566     { "phys-id", &spufs_id_ops, 0666, },
2567     { "object-id", &spufs_object_id_ops, 0666, },
2568     { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), },
2569     { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), },
2570     { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), },
2571     { "dma_info", &spufs_dma_info_fops, 0444,
2572         sizeof(struct spu_dma_info), },
2573     { "proxydma_info", &spufs_proxydma_info_fops, 0444,
2574         sizeof(struct spu_proxydma_info)},
2575     { "tid", &spufs_tid_fops, 0444, },
2576     { "stat", &spufs_stat_fops, 0444, },
2577     { "switch_log", &spufs_switch_log_fops, 0444 },
2578     {},
2579 };
2580 
2581 const struct spufs_tree_descr spufs_dir_nosched_contents[] = {
2582     { "capabilities", &spufs_caps_fops, 0444, },
2583     { "mem",  &spufs_mem_fops,  0666, LS_SIZE, },
2584     { "mbox", &spufs_mbox_fops, 0444, },
2585     { "ibox", &spufs_ibox_fops, 0444, },
2586     { "wbox", &spufs_wbox_fops, 0222, },
2587     { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2588     { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2589     { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
2590     { "signal1", &spufs_signal1_nosched_fops, 0222, },
2591     { "signal2", &spufs_signal2_nosched_fops, 0222, },
2592     { "signal1_type", &spufs_signal1_type, 0666, },
2593     { "signal2_type", &spufs_signal2_type, 0666, },
2594     { "mss", &spufs_mss_fops, 0666, },
2595     { "mfc", &spufs_mfc_fops, 0666, },
2596     { "cntl", &spufs_cntl_fops,  0666, },
2597     { "npc", &spufs_npc_ops, 0666, },
2598     { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
2599     { "phys-id", &spufs_id_ops, 0666, },
2600     { "object-id", &spufs_object_id_ops, 0666, },
2601     { "tid", &spufs_tid_fops, 0444, },
2602     { "stat", &spufs_stat_fops, 0444, },
2603     {},
2604 };
2605 
2606 const struct spufs_tree_descr spufs_dir_debug_contents[] = {
2607     { ".ctx", &spufs_ctx_fops, 0444, },
2608     {},
2609 };
2610 
2611 const struct spufs_coredump_reader spufs_coredump_read[] = {
2612     { "regs", spufs_regs_dump, NULL, sizeof(struct spu_reg128[128])},
2613     { "fpcr", spufs_fpcr_dump, NULL, sizeof(struct spu_reg128) },
2614     { "lslr", NULL, spufs_lslr_get, 19 },
2615     { "decr", NULL, spufs_decr_get, 19 },
2616     { "decr_status", NULL, spufs_decr_status_get, 19 },
2617     { "mem", spufs_mem_dump, NULL, LS_SIZE, },
2618     { "signal1", spufs_signal1_dump, NULL, sizeof(u32) },
2619     { "signal1_type", NULL, spufs_signal1_type_get, 19 },
2620     { "signal2", spufs_signal2_dump, NULL, sizeof(u32) },
2621     { "signal2_type", NULL, spufs_signal2_type_get, 19 },
2622     { "event_mask", NULL, spufs_event_mask_get, 19 },
2623     { "event_status", NULL, spufs_event_status_get, 19 },
2624     { "mbox_info", spufs_mbox_info_dump, NULL, sizeof(u32) },
2625     { "ibox_info", spufs_ibox_info_dump, NULL, sizeof(u32) },
2626     { "wbox_info", spufs_wbox_info_dump, NULL, 4 * sizeof(u32)},
2627     { "dma_info", spufs_dma_info_dump, NULL, sizeof(struct spu_dma_info)},
2628     { "proxydma_info", spufs_proxydma_info_dump,
2629                NULL, sizeof(struct spu_proxydma_info)},
2630     { "object-id", NULL, spufs_object_id_get, 19 },
2631     { "npc", NULL, spufs_npc_get, 19 },
2632     { NULL },
2633 };