Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /* backing_ops.c - query/set operations on saved SPU context.
0003  *
0004  * Copyright (C) IBM 2005
0005  * Author: Mark Nutter <mnutter@us.ibm.com>
0006  *
0007  * These register operations allow SPUFS to operate on saved
0008  * SPU contexts rather than hardware.
0009  */
0010 
0011 #include <linux/errno.h>
0012 #include <linux/sched.h>
0013 #include <linux/kernel.h>
0014 #include <linux/mm.h>
0015 #include <linux/vmalloc.h>
0016 #include <linux/smp.h>
0017 #include <linux/stddef.h>
0018 #include <linux/unistd.h>
0019 #include <linux/poll.h>
0020 
0021 #include <asm/io.h>
0022 #include <asm/spu.h>
0023 #include <asm/spu_csa.h>
0024 #include <asm/spu_info.h>
0025 #include <asm/mmu_context.h>
0026 #include "spufs.h"
0027 
0028 /*
0029  * Reads/writes to various problem and priv2 registers require
0030  * state changes, i.e.  generate SPU events, modify channel
0031  * counts, etc.
0032  */
0033 
0034 static void gen_spu_event(struct spu_context *ctx, u32 event)
0035 {
0036     u64 ch0_cnt;
0037     u64 ch0_data;
0038     u64 ch1_data;
0039 
0040     ch0_cnt = ctx->csa.spu_chnlcnt_RW[0];
0041     ch0_data = ctx->csa.spu_chnldata_RW[0];
0042     ch1_data = ctx->csa.spu_chnldata_RW[1];
0043     ctx->csa.spu_chnldata_RW[0] |= event;
0044     if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) {
0045         ctx->csa.spu_chnlcnt_RW[0] = 1;
0046     }
0047 }
0048 
0049 static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data)
0050 {
0051     u32 mbox_stat;
0052     int ret = 0;
0053 
0054     spin_lock(&ctx->csa.register_lock);
0055     mbox_stat = ctx->csa.prob.mb_stat_R;
0056     if (mbox_stat & 0x0000ff) {
0057         /* Read the first available word.
0058          * Implementation note: the depth
0059          * of pu_mb_R is currently 1.
0060          */
0061         *data = ctx->csa.prob.pu_mb_R;
0062         ctx->csa.prob.mb_stat_R &= ~(0x0000ff);
0063         ctx->csa.spu_chnlcnt_RW[28] = 1;
0064         gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT);
0065         ret = 4;
0066     }
0067     spin_unlock(&ctx->csa.register_lock);
0068     return ret;
0069 }
0070 
0071 static u32 spu_backing_mbox_stat_read(struct spu_context *ctx)
0072 {
0073     return ctx->csa.prob.mb_stat_R;
0074 }
0075 
0076 static __poll_t spu_backing_mbox_stat_poll(struct spu_context *ctx,
0077                       __poll_t events)
0078 {
0079     __poll_t ret;
0080     u32 stat;
0081 
0082     ret = 0;
0083     spin_lock_irq(&ctx->csa.register_lock);
0084     stat = ctx->csa.prob.mb_stat_R;
0085 
0086     /* if the requested event is there, return the poll
0087        mask, otherwise enable the interrupt to get notified,
0088        but first mark any pending interrupts as done so
0089        we don't get woken up unnecessarily */
0090 
0091     if (events & (EPOLLIN | EPOLLRDNORM)) {
0092         if (stat & 0xff0000)
0093             ret |= EPOLLIN | EPOLLRDNORM;
0094         else {
0095             ctx->csa.priv1.int_stat_class2_RW &=
0096                 ~CLASS2_MAILBOX_INTR;
0097             ctx->csa.priv1.int_mask_class2_RW |=
0098                 CLASS2_ENABLE_MAILBOX_INTR;
0099         }
0100     }
0101     if (events & (EPOLLOUT | EPOLLWRNORM)) {
0102         if (stat & 0x00ff00)
0103             ret = EPOLLOUT | EPOLLWRNORM;
0104         else {
0105             ctx->csa.priv1.int_stat_class2_RW &=
0106                 ~CLASS2_MAILBOX_THRESHOLD_INTR;
0107             ctx->csa.priv1.int_mask_class2_RW |=
0108                 CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
0109         }
0110     }
0111     spin_unlock_irq(&ctx->csa.register_lock);
0112     return ret;
0113 }
0114 
0115 static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
0116 {
0117     int ret;
0118 
0119     spin_lock(&ctx->csa.register_lock);
0120     if (ctx->csa.prob.mb_stat_R & 0xff0000) {
0121         /* Read the first available word.
0122          * Implementation note: the depth
0123          * of puint_mb_R is currently 1.
0124          */
0125         *data = ctx->csa.priv2.puint_mb_R;
0126         ctx->csa.prob.mb_stat_R &= ~(0xff0000);
0127         ctx->csa.spu_chnlcnt_RW[30] = 1;
0128         gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT);
0129         ret = 4;
0130     } else {
0131         /* make sure we get woken up by the interrupt */
0132         ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR;
0133         ret = 0;
0134     }
0135     spin_unlock(&ctx->csa.register_lock);
0136     return ret;
0137 }
0138 
0139 static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
0140 {
0141     int ret;
0142 
0143     spin_lock(&ctx->csa.register_lock);
0144     if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) {
0145         int slot = ctx->csa.spu_chnlcnt_RW[29];
0146         int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8;
0147 
0148         /* We have space to write wbox_data.
0149          * Implementation note: the depth
0150          * of spu_mb_W is currently 4.
0151          */
0152         BUG_ON(avail != (4 - slot));
0153         ctx->csa.spu_mailbox_data[slot] = data;
0154         ctx->csa.spu_chnlcnt_RW[29] = ++slot;
0155         ctx->csa.prob.mb_stat_R &= ~(0x00ff00);
0156         ctx->csa.prob.mb_stat_R |= (((4 - slot) & 0xff) << 8);
0157         gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT);
0158         ret = 4;
0159     } else {
0160         /* make sure we get woken up by the interrupt when space
0161            becomes available */
0162         ctx->csa.priv1.int_mask_class2_RW |=
0163             CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
0164         ret = 0;
0165     }
0166     spin_unlock(&ctx->csa.register_lock);
0167     return ret;
0168 }
0169 
0170 static u32 spu_backing_signal1_read(struct spu_context *ctx)
0171 {
0172     return ctx->csa.spu_chnldata_RW[3];
0173 }
0174 
0175 static void spu_backing_signal1_write(struct spu_context *ctx, u32 data)
0176 {
0177     spin_lock(&ctx->csa.register_lock);
0178     if (ctx->csa.priv2.spu_cfg_RW & 0x1)
0179         ctx->csa.spu_chnldata_RW[3] |= data;
0180     else
0181         ctx->csa.spu_chnldata_RW[3] = data;
0182     ctx->csa.spu_chnlcnt_RW[3] = 1;
0183     gen_spu_event(ctx, MFC_SIGNAL_1_EVENT);
0184     spin_unlock(&ctx->csa.register_lock);
0185 }
0186 
0187 static u32 spu_backing_signal2_read(struct spu_context *ctx)
0188 {
0189     return ctx->csa.spu_chnldata_RW[4];
0190 }
0191 
0192 static void spu_backing_signal2_write(struct spu_context *ctx, u32 data)
0193 {
0194     spin_lock(&ctx->csa.register_lock);
0195     if (ctx->csa.priv2.spu_cfg_RW & 0x2)
0196         ctx->csa.spu_chnldata_RW[4] |= data;
0197     else
0198         ctx->csa.spu_chnldata_RW[4] = data;
0199     ctx->csa.spu_chnlcnt_RW[4] = 1;
0200     gen_spu_event(ctx, MFC_SIGNAL_2_EVENT);
0201     spin_unlock(&ctx->csa.register_lock);
0202 }
0203 
0204 static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val)
0205 {
0206     u64 tmp;
0207 
0208     spin_lock(&ctx->csa.register_lock);
0209     tmp = ctx->csa.priv2.spu_cfg_RW;
0210     if (val)
0211         tmp |= 1;
0212     else
0213         tmp &= ~1;
0214     ctx->csa.priv2.spu_cfg_RW = tmp;
0215     spin_unlock(&ctx->csa.register_lock);
0216 }
0217 
0218 static u64 spu_backing_signal1_type_get(struct spu_context *ctx)
0219 {
0220     return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0);
0221 }
0222 
0223 static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val)
0224 {
0225     u64 tmp;
0226 
0227     spin_lock(&ctx->csa.register_lock);
0228     tmp = ctx->csa.priv2.spu_cfg_RW;
0229     if (val)
0230         tmp |= 2;
0231     else
0232         tmp &= ~2;
0233     ctx->csa.priv2.spu_cfg_RW = tmp;
0234     spin_unlock(&ctx->csa.register_lock);
0235 }
0236 
0237 static u64 spu_backing_signal2_type_get(struct spu_context *ctx)
0238 {
0239     return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0);
0240 }
0241 
0242 static u32 spu_backing_npc_read(struct spu_context *ctx)
0243 {
0244     return ctx->csa.prob.spu_npc_RW;
0245 }
0246 
0247 static void spu_backing_npc_write(struct spu_context *ctx, u32 val)
0248 {
0249     ctx->csa.prob.spu_npc_RW = val;
0250 }
0251 
0252 static u32 spu_backing_status_read(struct spu_context *ctx)
0253 {
0254     return ctx->csa.prob.spu_status_R;
0255 }
0256 
0257 static char *spu_backing_get_ls(struct spu_context *ctx)
0258 {
0259     return ctx->csa.lscsa->ls;
0260 }
0261 
0262 static void spu_backing_privcntl_write(struct spu_context *ctx, u64 val)
0263 {
0264     ctx->csa.priv2.spu_privcntl_RW = val;
0265 }
0266 
0267 static u32 spu_backing_runcntl_read(struct spu_context *ctx)
0268 {
0269     return ctx->csa.prob.spu_runcntl_RW;
0270 }
0271 
0272 static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
0273 {
0274     spin_lock(&ctx->csa.register_lock);
0275     ctx->csa.prob.spu_runcntl_RW = val;
0276     if (val & SPU_RUNCNTL_RUNNABLE) {
0277         ctx->csa.prob.spu_status_R &=
0278             ~SPU_STATUS_STOPPED_BY_STOP &
0279             ~SPU_STATUS_STOPPED_BY_HALT &
0280             ~SPU_STATUS_SINGLE_STEP &
0281             ~SPU_STATUS_INVALID_INSTR &
0282             ~SPU_STATUS_INVALID_CH;
0283         ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING;
0284     } else {
0285         ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;
0286     }
0287     spin_unlock(&ctx->csa.register_lock);
0288 }
0289 
0290 static void spu_backing_runcntl_stop(struct spu_context *ctx)
0291 {
0292     spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
0293 }
0294 
0295 static void spu_backing_master_start(struct spu_context *ctx)
0296 {
0297     struct spu_state *csa = &ctx->csa;
0298     u64 sr1;
0299 
0300     spin_lock(&csa->register_lock);
0301     sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
0302     csa->priv1.mfc_sr1_RW = sr1;
0303     spin_unlock(&csa->register_lock);
0304 }
0305 
0306 static void spu_backing_master_stop(struct spu_context *ctx)
0307 {
0308     struct spu_state *csa = &ctx->csa;
0309     u64 sr1;
0310 
0311     spin_lock(&csa->register_lock);
0312     sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
0313     csa->priv1.mfc_sr1_RW = sr1;
0314     spin_unlock(&csa->register_lock);
0315 }
0316 
0317 static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
0318                     u32 mode)
0319 {
0320     struct spu_problem_collapsed *prob = &ctx->csa.prob;
0321     int ret;
0322 
0323     spin_lock(&ctx->csa.register_lock);
0324     ret = -EAGAIN;
0325     if (prob->dma_querytype_RW)
0326         goto out;
0327     ret = 0;
0328     /* FIXME: what are the side-effects of this? */
0329     prob->dma_querymask_RW = mask;
0330     prob->dma_querytype_RW = mode;
0331     /* In the current implementation, the SPU context is always
0332      * acquired in runnable state when new bits are added to the
0333      * mask (tagwait), so it's sufficient just to mask
0334      * dma_tagstatus_R with the 'mask' parameter here.
0335      */
0336     ctx->csa.prob.dma_tagstatus_R &= mask;
0337 out:
0338     spin_unlock(&ctx->csa.register_lock);
0339 
0340     return ret;
0341 }
0342 
0343 static u32 spu_backing_read_mfc_tagstatus(struct spu_context * ctx)
0344 {
0345     return ctx->csa.prob.dma_tagstatus_R;
0346 }
0347 
0348 static u32 spu_backing_get_mfc_free_elements(struct spu_context *ctx)
0349 {
0350     return ctx->csa.prob.dma_qstatus_R;
0351 }
0352 
0353 static int spu_backing_send_mfc_command(struct spu_context *ctx,
0354                     struct mfc_dma_command *cmd)
0355 {
0356     int ret;
0357 
0358     spin_lock(&ctx->csa.register_lock);
0359     ret = -EAGAIN;
0360     /* FIXME: set up priv2->puq */
0361     spin_unlock(&ctx->csa.register_lock);
0362 
0363     return ret;
0364 }
0365 
0366 static void spu_backing_restart_dma(struct spu_context *ctx)
0367 {
0368     ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
0369 }
0370 
0371 struct spu_context_ops spu_backing_ops = {
0372     .mbox_read = spu_backing_mbox_read,
0373     .mbox_stat_read = spu_backing_mbox_stat_read,
0374     .mbox_stat_poll = spu_backing_mbox_stat_poll,
0375     .ibox_read = spu_backing_ibox_read,
0376     .wbox_write = spu_backing_wbox_write,
0377     .signal1_read = spu_backing_signal1_read,
0378     .signal1_write = spu_backing_signal1_write,
0379     .signal2_read = spu_backing_signal2_read,
0380     .signal2_write = spu_backing_signal2_write,
0381     .signal1_type_set = spu_backing_signal1_type_set,
0382     .signal1_type_get = spu_backing_signal1_type_get,
0383     .signal2_type_set = spu_backing_signal2_type_set,
0384     .signal2_type_get = spu_backing_signal2_type_get,
0385     .npc_read = spu_backing_npc_read,
0386     .npc_write = spu_backing_npc_write,
0387     .status_read = spu_backing_status_read,
0388     .get_ls = spu_backing_get_ls,
0389     .privcntl_write = spu_backing_privcntl_write,
0390     .runcntl_read = spu_backing_runcntl_read,
0391     .runcntl_write = spu_backing_runcntl_write,
0392     .runcntl_stop = spu_backing_runcntl_stop,
0393     .master_start = spu_backing_master_start,
0394     .master_stop = spu_backing_master_stop,
0395     .set_mfc_query = spu_backing_set_mfc_query,
0396     .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
0397     .get_mfc_free_elements = spu_backing_get_mfc_free_elements,
0398     .send_mfc_command = spu_backing_send_mfc_command,
0399     .restart_dma = spu_backing_restart_dma,
0400 };