Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #define DEBUG
0003 
0004 #include <linux/wait.h>
0005 #include <linux/ptrace.h>
0006 
0007 #include <asm/spu.h>
0008 #include <asm/spu_priv1.h>
0009 #include <asm/io.h>
0010 #include <asm/unistd.h>
0011 
0012 #include "spufs.h"
0013 
0014 /* interrupt-level stop callback function. */
0015 void spufs_stop_callback(struct spu *spu, int irq)
0016 {
0017     struct spu_context *ctx = spu->ctx;
0018 
0019     /*
0020      * It should be impossible to preempt a context while an exception
0021      * is being processed, since the context switch code is specially
0022      * coded to deal with interrupts ... But, just in case, sanity check
0023      * the context pointer.  It is OK to return doing nothing since
0024      * the exception will be regenerated when the context is resumed.
0025      */
0026     if (ctx) {
0027         /* Copy exception arguments into module specific structure */
0028         switch(irq) {
0029         case 0 :
0030             ctx->csa.class_0_pending = spu->class_0_pending;
0031             ctx->csa.class_0_dar = spu->class_0_dar;
0032             break;
0033         case 1 :
0034             ctx->csa.class_1_dsisr = spu->class_1_dsisr;
0035             ctx->csa.class_1_dar = spu->class_1_dar;
0036             break;
0037         case 2 :
0038             break;
0039         }
0040 
0041         /* ensure that the exception status has hit memory before a
0042          * thread waiting on the context's stop queue is woken */
0043         smp_wmb();
0044 
0045         wake_up_all(&ctx->stop_wq);
0046     }
0047 }
0048 
0049 int spu_stopped(struct spu_context *ctx, u32 *stat)
0050 {
0051     u64 dsisr;
0052     u32 stopped;
0053 
0054     stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
0055         SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
0056 
0057 top:
0058     *stat = ctx->ops->status_read(ctx);
0059     if (*stat & stopped) {
0060         /*
0061          * If the spu hasn't finished stopping, we need to
0062          * re-read the register to get the stopped value.
0063          */
0064         if (*stat & SPU_STATUS_RUNNING)
0065             goto top;
0066         return 1;
0067     }
0068 
0069     if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
0070         return 1;
0071 
0072     dsisr = ctx->csa.class_1_dsisr;
0073     if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
0074         return 1;
0075 
0076     if (ctx->csa.class_0_pending)
0077         return 1;
0078 
0079     return 0;
0080 }
0081 
0082 static int spu_setup_isolated(struct spu_context *ctx)
0083 {
0084     int ret;
0085     u64 __iomem *mfc_cntl;
0086     u64 sr1;
0087     u32 status;
0088     unsigned long timeout;
0089     const u32 status_loading = SPU_STATUS_RUNNING
0090         | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
0091 
0092     ret = -ENODEV;
0093     if (!isolated_loader)
0094         goto out;
0095 
0096     /*
0097      * We need to exclude userspace access to the context.
0098      *
0099      * To protect against memory access we invalidate all ptes
0100      * and make sure the pagefault handlers block on the mutex.
0101      */
0102     spu_unmap_mappings(ctx);
0103 
0104     mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
0105 
0106     /* purge the MFC DMA queue to ensure no spurious accesses before we
0107      * enter kernel mode */
0108     timeout = jiffies + HZ;
0109     out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
0110     while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
0111             != MFC_CNTL_PURGE_DMA_COMPLETE) {
0112         if (time_after(jiffies, timeout)) {
0113             printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
0114                     __func__);
0115             ret = -EIO;
0116             goto out;
0117         }
0118         cond_resched();
0119     }
0120 
0121     /* clear purge status */
0122     out_be64(mfc_cntl, 0);
0123 
0124     /* put the SPE in kernel mode to allow access to the loader */
0125     sr1 = spu_mfc_sr1_get(ctx->spu);
0126     sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
0127     spu_mfc_sr1_set(ctx->spu, sr1);
0128 
0129     /* start the loader */
0130     ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
0131     ctx->ops->signal2_write(ctx,
0132             (unsigned long)isolated_loader & 0xffffffff);
0133 
0134     ctx->ops->runcntl_write(ctx,
0135             SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
0136 
0137     ret = 0;
0138     timeout = jiffies + HZ;
0139     while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
0140                 status_loading) {
0141         if (time_after(jiffies, timeout)) {
0142             printk(KERN_ERR "%s: timeout waiting for loader\n",
0143                     __func__);
0144             ret = -EIO;
0145             goto out_drop_priv;
0146         }
0147         cond_resched();
0148     }
0149 
0150     if (!(status & SPU_STATUS_RUNNING)) {
0151         /* If isolated LOAD has failed: run SPU, we will get a stop-and
0152          * signal later. */
0153         pr_debug("%s: isolated LOAD failed\n", __func__);
0154         ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
0155         ret = -EACCES;
0156         goto out_drop_priv;
0157     }
0158 
0159     if (!(status & SPU_STATUS_ISOLATED_STATE)) {
0160         /* This isn't allowed by the CBEA, but check anyway */
0161         pr_debug("%s: SPU fell out of isolated mode?\n", __func__);
0162         ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
0163         ret = -EINVAL;
0164         goto out_drop_priv;
0165     }
0166 
0167 out_drop_priv:
0168     /* Finished accessing the loader. Drop kernel mode */
0169     sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
0170     spu_mfc_sr1_set(ctx->spu, sr1);
0171 
0172 out:
0173     return ret;
0174 }
0175 
0176 static int spu_run_init(struct spu_context *ctx, u32 *npc)
0177 {
0178     unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
0179     int ret;
0180 
0181     spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
0182 
0183     /*
0184      * NOSCHED is synchronous scheduling with respect to the caller.
0185      * The caller waits for the context to be loaded.
0186      */
0187     if (ctx->flags & SPU_CREATE_NOSCHED) {
0188         if (ctx->state == SPU_STATE_SAVED) {
0189             ret = spu_activate(ctx, 0);
0190             if (ret)
0191                 return ret;
0192         }
0193     }
0194 
0195     /*
0196      * Apply special setup as required.
0197      */
0198     if (ctx->flags & SPU_CREATE_ISOLATE) {
0199         if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
0200             ret = spu_setup_isolated(ctx);
0201             if (ret)
0202                 return ret;
0203         }
0204 
0205         /*
0206          * If userspace has set the runcntrl register (eg, to
0207          * issue an isolated exit), we need to re-set it here
0208          */
0209         runcntl = ctx->ops->runcntl_read(ctx) &
0210             (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
0211         if (runcntl == 0)
0212             runcntl = SPU_RUNCNTL_RUNNABLE;
0213     } else {
0214         unsigned long privcntl;
0215 
0216         if (test_thread_flag(TIF_SINGLESTEP))
0217             privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP;
0218         else
0219             privcntl = SPU_PRIVCNTL_MODE_NORMAL;
0220 
0221         ctx->ops->privcntl_write(ctx, privcntl);
0222         ctx->ops->npc_write(ctx, *npc);
0223     }
0224 
0225     ctx->ops->runcntl_write(ctx, runcntl);
0226 
0227     if (ctx->flags & SPU_CREATE_NOSCHED) {
0228         spuctx_switch_state(ctx, SPU_UTIL_USER);
0229     } else {
0230 
0231         if (ctx->state == SPU_STATE_SAVED) {
0232             ret = spu_activate(ctx, 0);
0233             if (ret)
0234                 return ret;
0235         } else {
0236             spuctx_switch_state(ctx, SPU_UTIL_USER);
0237         }
0238     }
0239 
0240     set_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
0241     return 0;
0242 }
0243 
0244 static int spu_run_fini(struct spu_context *ctx, u32 *npc,
0245                    u32 *status)
0246 {
0247     int ret = 0;
0248 
0249     spu_del_from_rq(ctx);
0250 
0251     *status = ctx->ops->status_read(ctx);
0252     *npc = ctx->ops->npc_read(ctx);
0253 
0254     spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
0255     clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
0256     spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status);
0257     spu_release(ctx);
0258 
0259     if (signal_pending(current))
0260         ret = -ERESTARTSYS;
0261 
0262     return ret;
0263 }
0264 
0265 /*
0266  * SPU syscall restarting is tricky because we violate the basic
0267  * assumption that the signal handler is running on the interrupted
0268  * thread. Here instead, the handler runs on PowerPC user space code,
0269  * while the syscall was called from the SPU.
0270  * This means we can only do a very rough approximation of POSIX
0271  * signal semantics.
0272  */
0273 static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
0274               unsigned int *npc)
0275 {
0276     int ret;
0277 
0278     switch (*spu_ret) {
0279     case -ERESTARTSYS:
0280     case -ERESTARTNOINTR:
0281         /*
0282          * Enter the regular syscall restarting for
0283          * sys_spu_run, then restart the SPU syscall
0284          * callback.
0285          */
0286         *npc -= 8;
0287         ret = -ERESTARTSYS;
0288         break;
0289     case -ERESTARTNOHAND:
0290     case -ERESTART_RESTARTBLOCK:
0291         /*
0292          * Restart block is too hard for now, just return -EINTR
0293          * to the SPU.
0294          * ERESTARTNOHAND comes from sys_pause, we also return
0295          * -EINTR from there.
0296          * Assume that we need to be restarted ourselves though.
0297          */
0298         *spu_ret = -EINTR;
0299         ret = -ERESTARTSYS;
0300         break;
0301     default:
0302         printk(KERN_WARNING "%s: unexpected return code %ld\n",
0303             __func__, *spu_ret);
0304         ret = 0;
0305     }
0306     return ret;
0307 }
0308 
0309 static int spu_process_callback(struct spu_context *ctx)
0310 {
0311     struct spu_syscall_block s;
0312     u32 ls_pointer, npc;
0313     void __iomem *ls;
0314     long spu_ret;
0315     int ret;
0316 
0317     /* get syscall block from local store */
0318     npc = ctx->ops->npc_read(ctx) & ~3;
0319     ls = (void __iomem *)ctx->ops->get_ls(ctx);
0320     ls_pointer = in_be32(ls + npc);
0321     if (ls_pointer > (LS_SIZE - sizeof(s)))
0322         return -EFAULT;
0323     memcpy_fromio(&s, ls + ls_pointer, sizeof(s));
0324 
0325     /* do actual syscall without pinning the spu */
0326     ret = 0;
0327     spu_ret = -ENOSYS;
0328     npc += 4;
0329 
0330     if (s.nr_ret < NR_syscalls) {
0331         spu_release(ctx);
0332         /* do actual system call from here */
0333         spu_ret = spu_sys_callback(&s);
0334         if (spu_ret <= -ERESTARTSYS) {
0335             ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
0336         }
0337         mutex_lock(&ctx->state_mutex);
0338         if (ret == -ERESTARTSYS)
0339             return ret;
0340     }
0341 
0342     /* need to re-get the ls, as it may have changed when we released the
0343      * spu */
0344     ls = (void __iomem *)ctx->ops->get_ls(ctx);
0345 
0346     /* write result, jump over indirect pointer */
0347     memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret));
0348     ctx->ops->npc_write(ctx, npc);
0349     ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
0350     return ret;
0351 }
0352 
0353 long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
0354 {
0355     int ret;
0356     u32 status;
0357 
0358     if (mutex_lock_interruptible(&ctx->run_mutex))
0359         return -ERESTARTSYS;
0360 
0361     ctx->event_return = 0;
0362 
0363     ret = spu_acquire(ctx);
0364     if (ret)
0365         goto out_unlock;
0366 
0367     spu_enable_spu(ctx);
0368 
0369     spu_update_sched_info(ctx);
0370 
0371     ret = spu_run_init(ctx, npc);
0372     if (ret) {
0373         spu_release(ctx);
0374         goto out;
0375     }
0376 
0377     do {
0378         ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
0379         if (unlikely(ret)) {
0380             /*
0381              * This is nasty: we need the state_mutex for all the
0382              * bookkeeping even if the syscall was interrupted by
0383              * a signal. ewww.
0384              */
0385             mutex_lock(&ctx->state_mutex);
0386             break;
0387         }
0388         if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,
0389                         &ctx->sched_flags))) {
0390             if (!(status & SPU_STATUS_STOPPED_BY_STOP))
0391                 continue;
0392         }
0393 
0394         spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
0395 
0396         if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
0397             (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
0398             ret = spu_process_callback(ctx);
0399             if (ret)
0400                 break;
0401             status &= ~SPU_STATUS_STOPPED_BY_STOP;
0402         }
0403         ret = spufs_handle_class1(ctx);
0404         if (ret)
0405             break;
0406 
0407         ret = spufs_handle_class0(ctx);
0408         if (ret)
0409             break;
0410 
0411         if (signal_pending(current))
0412             ret = -ERESTARTSYS;
0413     } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
0414                       SPU_STATUS_STOPPED_BY_HALT |
0415                        SPU_STATUS_SINGLE_STEP)));
0416 
0417     spu_disable_spu(ctx);
0418     ret = spu_run_fini(ctx, npc, &status);
0419     spu_yield(ctx);
0420 
0421     if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
0422         (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
0423         ctx->stats.libassist++;
0424 
0425     if ((ret == 0) ||
0426         ((ret == -ERESTARTSYS) &&
0427          ((status & SPU_STATUS_STOPPED_BY_HALT) ||
0428           (status & SPU_STATUS_SINGLE_STEP) ||
0429           ((status & SPU_STATUS_STOPPED_BY_STOP) &&
0430            (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
0431         ret = status;
0432 
0433     /* Note: we don't need to force_sig SIGTRAP on single-step
0434      * since we have TIF_SINGLESTEP set, thus the kernel will do
0435      * it upon return from the syscall anyway.
0436      */
0437     if (unlikely(status & SPU_STATUS_SINGLE_STEP))
0438         ret = -ERESTARTSYS;
0439 
0440     else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP)
0441         && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) {
0442         force_sig(SIGTRAP);
0443         ret = -ERESTARTSYS;
0444     }
0445 
0446 out:
0447     *event = ctx->event_return;
0448 out_unlock:
0449     mutex_unlock(&ctx->run_mutex);
0450     return ret;
0451 }