Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is subject to the terms and conditions of the GNU General Public
0003  * License.  See the file "COPYING" in the main directory of this archive
0004  * for more details.
0005  *
0006  * Copyright (C) 2004, 2005 MIPS Technologies, Inc.  All rights reserved.
0007  * Copyright (C) 2013 Imagination Technologies Ltd.
0008  */
0009 #include <linux/kernel.h>
0010 #include <linux/device.h>
0011 #include <linux/fs.h>
0012 #include <linux/slab.h>
0013 #include <linux/export.h>
0014 
0015 #include <asm/mipsregs.h>
0016 #include <asm/mipsmtregs.h>
0017 #include <asm/mips_mt.h>
0018 #include <asm/vpe.h>
0019 
0020 static int major;
0021 
0022 /* The number of TCs and VPEs physically available on the core */
0023 static int hw_tcs, hw_vpes;
0024 
0025 /* We are prepared so configure and start the VPE... */
0026 int vpe_run(struct vpe *v)
0027 {
0028     unsigned long flags, val, dmt_flag;
0029     struct vpe_notifications *notifier;
0030     unsigned int vpeflags;
0031     struct tc *t;
0032 
0033     /* check we are the Master VPE */
0034     local_irq_save(flags);
0035     val = read_c0_vpeconf0();
0036     if (!(val & VPECONF0_MVP)) {
0037         pr_warn("VPE loader: only Master VPE's are able to config MT\n");
0038         local_irq_restore(flags);
0039 
0040         return -1;
0041     }
0042 
0043     dmt_flag = dmt();
0044     vpeflags = dvpe();
0045 
0046     if (list_empty(&v->tc)) {
0047         evpe(vpeflags);
0048         emt(dmt_flag);
0049         local_irq_restore(flags);
0050 
0051         pr_warn("VPE loader: No TC's associated with VPE %d\n",
0052             v->minor);
0053 
0054         return -ENOEXEC;
0055     }
0056 
0057     t = list_first_entry(&v->tc, struct tc, tc);
0058 
0059     /* Put MVPE's into 'configuration state' */
0060     set_c0_mvpcontrol(MVPCONTROL_VPC);
0061 
0062     settc(t->index);
0063 
0064     /* should check it is halted, and not activated */
0065     if ((read_tc_c0_tcstatus() & TCSTATUS_A) ||
0066        !(read_tc_c0_tchalt() & TCHALT_H)) {
0067         evpe(vpeflags);
0068         emt(dmt_flag);
0069         local_irq_restore(flags);
0070 
0071         pr_warn("VPE loader: TC %d is already active!\n",
0072             t->index);
0073 
0074         return -ENOEXEC;
0075     }
0076 
0077     /*
0078      * Write the address we want it to start running from in the TCPC
0079      * register.
0080      */
0081     write_tc_c0_tcrestart((unsigned long)v->__start);
0082     write_tc_c0_tccontext((unsigned long)0);
0083 
0084     /*
0085      * Mark the TC as activated, not interrupt exempt and not dynamically
0086      * allocatable
0087      */
0088     val = read_tc_c0_tcstatus();
0089     val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
0090     write_tc_c0_tcstatus(val);
0091 
0092     write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
0093 
0094     /*
0095      * The sde-kit passes 'memsize' to __start in $a3, so set something
0096      * here...  Or set $a3 to zero and define DFLT_STACK_SIZE and
0097      * DFLT_HEAP_SIZE when you compile your program
0098      */
0099     mttgpr(6, v->ntcs);
0100     mttgpr(7, physical_memsize);
0101 
0102     /* set up VPE1 */
0103     /*
0104      * bind the TC to VPE 1 as late as possible so we only have the final
0105      * VPE registers to set up, and so an EJTAG probe can trigger on it
0106      */
0107     write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
0108 
0109     write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
0110 
0111     back_to_back_c0_hazard();
0112 
0113     /* Set up the XTC bit in vpeconf0 to point at our tc */
0114     write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
0115                   | (t->index << VPECONF0_XTC_SHIFT));
0116 
0117     back_to_back_c0_hazard();
0118 
0119     /* enable this VPE */
0120     write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
0121 
0122     /* clear out any left overs from a previous program */
0123     write_vpe_c0_status(0);
0124     write_vpe_c0_cause(0);
0125 
0126     /* take system out of configuration state */
0127     clear_c0_mvpcontrol(MVPCONTROL_VPC);
0128 
0129     /*
0130      * SMVP kernels manage VPE enable independently, but uniprocessor
0131      * kernels need to turn it on, even if that wasn't the pre-dvpe() state.
0132      */
0133 #ifdef CONFIG_SMP
0134     evpe(vpeflags);
0135 #else
0136     evpe(EVPE_ENABLE);
0137 #endif
0138     emt(dmt_flag);
0139     local_irq_restore(flags);
0140 
0141     list_for_each_entry(notifier, &v->notify, list)
0142         notifier->start(VPE_MODULE_MINOR);
0143 
0144     return 0;
0145 }
0146 
0147 void cleanup_tc(struct tc *tc)
0148 {
0149     unsigned long flags;
0150     unsigned int mtflags, vpflags;
0151     int tmp;
0152 
0153     local_irq_save(flags);
0154     mtflags = dmt();
0155     vpflags = dvpe();
0156     /* Put MVPE's into 'configuration state' */
0157     set_c0_mvpcontrol(MVPCONTROL_VPC);
0158 
0159     settc(tc->index);
0160     tmp = read_tc_c0_tcstatus();
0161 
0162     /* mark not allocated and not dynamically allocatable */
0163     tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
0164     tmp |= TCSTATUS_IXMT;   /* interrupt exempt */
0165     write_tc_c0_tcstatus(tmp);
0166 
0167     write_tc_c0_tchalt(TCHALT_H);
0168     mips_ihb();
0169 
0170     clear_c0_mvpcontrol(MVPCONTROL_VPC);
0171     evpe(vpflags);
0172     emt(mtflags);
0173     local_irq_restore(flags);
0174 }
0175 
0176 /* module wrapper entry points */
0177 /* give me a vpe */
0178 void *vpe_alloc(void)
0179 {
0180     int i;
0181     struct vpe *v;
0182 
0183     /* find a vpe */
0184     for (i = 1; i < MAX_VPES; i++) {
0185         v = get_vpe(i);
0186         if (v != NULL) {
0187             v->state = VPE_STATE_INUSE;
0188             return v;
0189         }
0190     }
0191     return NULL;
0192 }
0193 EXPORT_SYMBOL(vpe_alloc);
0194 
0195 /* start running from here */
0196 int vpe_start(void *vpe, unsigned long start)
0197 {
0198     struct vpe *v = vpe;
0199 
0200     v->__start = start;
0201     return vpe_run(v);
0202 }
0203 EXPORT_SYMBOL(vpe_start);
0204 
0205 /* halt it for now */
0206 int vpe_stop(void *vpe)
0207 {
0208     struct vpe *v = vpe;
0209     struct tc *t;
0210     unsigned int evpe_flags;
0211 
0212     evpe_flags = dvpe();
0213 
0214     t = list_entry(v->tc.next, struct tc, tc);
0215     if (t != NULL) {
0216         settc(t->index);
0217         write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
0218     }
0219 
0220     evpe(evpe_flags);
0221 
0222     return 0;
0223 }
0224 EXPORT_SYMBOL(vpe_stop);
0225 
0226 /* I've done with it thank you */
0227 int vpe_free(void *vpe)
0228 {
0229     struct vpe *v = vpe;
0230     struct tc *t;
0231     unsigned int evpe_flags;
0232 
0233     t = list_entry(v->tc.next, struct tc, tc);
0234     if (t == NULL)
0235         return -ENOEXEC;
0236 
0237     evpe_flags = dvpe();
0238 
0239     /* Put MVPE's into 'configuration state' */
0240     set_c0_mvpcontrol(MVPCONTROL_VPC);
0241 
0242     settc(t->index);
0243     write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
0244 
0245     /* halt the TC */
0246     write_tc_c0_tchalt(TCHALT_H);
0247     mips_ihb();
0248 
0249     /* mark the TC unallocated */
0250     write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
0251 
0252     v->state = VPE_STATE_UNUSED;
0253 
0254     clear_c0_mvpcontrol(MVPCONTROL_VPC);
0255     evpe(evpe_flags);
0256 
0257     return 0;
0258 }
0259 EXPORT_SYMBOL(vpe_free);
0260 
0261 static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
0262               const char *buf, size_t len)
0263 {
0264     struct vpe *vpe = get_vpe(aprp_cpu_index());
0265     struct vpe_notifications *notifier;
0266 
0267     list_for_each_entry(notifier, &vpe->notify, list)
0268         notifier->stop(aprp_cpu_index());
0269 
0270     release_progmem(vpe->load_addr);
0271     cleanup_tc(get_tc(aprp_cpu_index()));
0272     vpe_stop(vpe);
0273     vpe_free(vpe);
0274 
0275     return len;
0276 }
0277 static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
0278 
0279 static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
0280              char *buf)
0281 {
0282     struct vpe *vpe = get_vpe(aprp_cpu_index());
0283 
0284     return sprintf(buf, "%d\n", vpe->ntcs);
0285 }
0286 
0287 static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
0288               const char *buf, size_t len)
0289 {
0290     struct vpe *vpe = get_vpe(aprp_cpu_index());
0291     unsigned long new;
0292     int ret;
0293 
0294     ret = kstrtoul(buf, 0, &new);
0295     if (ret < 0)
0296         return ret;
0297 
0298     if (new == 0 || new > (hw_tcs - aprp_cpu_index()))
0299         return -EINVAL;
0300 
0301     vpe->ntcs = new;
0302 
0303     return len;
0304 }
0305 static DEVICE_ATTR_RW(ntcs);
0306 
0307 static struct attribute *vpe_attrs[] = {
0308     &dev_attr_kill.attr,
0309     &dev_attr_ntcs.attr,
0310     NULL,
0311 };
0312 ATTRIBUTE_GROUPS(vpe);
0313 
0314 static void vpe_device_release(struct device *cd)
0315 {
0316     kfree(cd);
0317 }
0318 
0319 static struct class vpe_class = {
0320     .name = "vpe",
0321     .owner = THIS_MODULE,
0322     .dev_release = vpe_device_release,
0323     .dev_groups = vpe_groups,
0324 };
0325 
0326 static struct device vpe_device;
0327 
0328 int __init vpe_module_init(void)
0329 {
0330     unsigned int mtflags, vpflags;
0331     unsigned long flags, val;
0332     struct vpe *v = NULL;
0333     struct tc *t;
0334     int tc, err;
0335 
0336     if (!cpu_has_mipsmt) {
0337         pr_warn("VPE loader: not a MIPS MT capable processor\n");
0338         return -ENODEV;
0339     }
0340 
0341     if (vpelimit == 0) {
0342         pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n"
0343             "Pass maxvpes=<n> argument as kernel argument\n");
0344 
0345         return -ENODEV;
0346     }
0347 
0348     if (aprp_cpu_index() == 0) {
0349         pr_warn("No TCs reserved for AP/SP, not initialize VPE loader\n"
0350             "Pass maxtcs=<n> argument as kernel argument\n");
0351 
0352         return -ENODEV;
0353     }
0354 
0355     major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops);
0356     if (major < 0) {
0357         pr_warn("VPE loader: unable to register character device\n");
0358         return major;
0359     }
0360 
0361     err = class_register(&vpe_class);
0362     if (err) {
0363         pr_err("vpe_class registration failed\n");
0364         goto out_chrdev;
0365     }
0366 
0367     device_initialize(&vpe_device);
0368     vpe_device.class    = &vpe_class;
0369     vpe_device.parent   = NULL;
0370     dev_set_name(&vpe_device, "vpe1");
0371     vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
0372     err = device_add(&vpe_device);
0373     if (err) {
0374         pr_err("Adding vpe_device failed\n");
0375         goto out_class;
0376     }
0377 
0378     local_irq_save(flags);
0379     mtflags = dmt();
0380     vpflags = dvpe();
0381 
0382     /* Put MVPE's into 'configuration state' */
0383     set_c0_mvpcontrol(MVPCONTROL_VPC);
0384 
0385     val = read_c0_mvpconf0();
0386     hw_tcs = (val & MVPCONF0_PTC) + 1;
0387     hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
0388 
0389     for (tc = aprp_cpu_index(); tc < hw_tcs; tc++) {
0390         /*
0391          * Must re-enable multithreading temporarily or in case we
0392          * reschedule send IPIs or similar we might hang.
0393          */
0394         clear_c0_mvpcontrol(MVPCONTROL_VPC);
0395         evpe(vpflags);
0396         emt(mtflags);
0397         local_irq_restore(flags);
0398         t = alloc_tc(tc);
0399         if (!t) {
0400             err = -ENOMEM;
0401             goto out_dev;
0402         }
0403 
0404         local_irq_save(flags);
0405         mtflags = dmt();
0406         vpflags = dvpe();
0407         set_c0_mvpcontrol(MVPCONTROL_VPC);
0408 
0409         /* VPE's */
0410         if (tc < hw_tcs) {
0411             settc(tc);
0412 
0413             v = alloc_vpe(tc);
0414             if (v == NULL) {
0415                 pr_warn("VPE: unable to allocate VPE\n");
0416                 goto out_reenable;
0417             }
0418 
0419             v->ntcs = hw_tcs - aprp_cpu_index();
0420 
0421             /* add the tc to the list of this vpe's tc's. */
0422             list_add(&t->tc, &v->tc);
0423 
0424             /* deactivate all but vpe0 */
0425             if (tc >= aprp_cpu_index()) {
0426                 unsigned long tmp = read_vpe_c0_vpeconf0();
0427 
0428                 tmp &= ~VPECONF0_VPA;
0429 
0430                 /* master VPE */
0431                 tmp |= VPECONF0_MVP;
0432                 write_vpe_c0_vpeconf0(tmp);
0433             }
0434 
0435             /* disable multi-threading with TC's */
0436             write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() &
0437                         ~VPECONTROL_TE);
0438 
0439             if (tc >= vpelimit) {
0440                 /*
0441                  * Set config to be the same as vpe0,
0442                  * particularly kseg0 coherency alg
0443                  */
0444                 write_vpe_c0_config(read_c0_config());
0445             }
0446         }
0447 
0448         /* TC's */
0449         t->pvpe = v;    /* set the parent vpe */
0450 
0451         if (tc >= aprp_cpu_index()) {
0452             unsigned long tmp;
0453 
0454             settc(tc);
0455 
0456             /*
0457              * A TC that is bound to any other VPE gets bound to
0458              * VPE0, ideally I'd like to make it homeless but it
0459              * doesn't appear to let me bind a TC to a non-existent
0460              * VPE. Which is perfectly reasonable.
0461              *
0462              * The (un)bound state is visible to an EJTAG probe so
0463              * may notify GDB...
0464              */
0465             tmp = read_tc_c0_tcbind();
0466             if (tmp & TCBIND_CURVPE) {
0467                 /* tc is bound >vpe0 */
0468                 write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
0469 
0470                 t->pvpe = get_vpe(0);   /* set the parent vpe */
0471             }
0472 
0473             /* halt the TC */
0474             write_tc_c0_tchalt(TCHALT_H);
0475             mips_ihb();
0476 
0477             tmp = read_tc_c0_tcstatus();
0478 
0479             /* mark not activated and not dynamically allocatable */
0480             tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
0481             tmp |= TCSTATUS_IXMT;   /* interrupt exempt */
0482             write_tc_c0_tcstatus(tmp);
0483         }
0484     }
0485 
0486 out_reenable:
0487     /* release config state */
0488     clear_c0_mvpcontrol(MVPCONTROL_VPC);
0489 
0490     evpe(vpflags);
0491     emt(mtflags);
0492     local_irq_restore(flags);
0493 
0494     return 0;
0495 
0496 out_dev:
0497     device_del(&vpe_device);
0498 
0499 out_class:
0500     class_unregister(&vpe_class);
0501 
0502 out_chrdev:
0503     unregister_chrdev(major, VPE_MODULE_NAME);
0504 
0505     return err;
0506 }
0507 
0508 void __exit vpe_module_exit(void)
0509 {
0510     struct vpe *v, *n;
0511 
0512     device_del(&vpe_device);
0513     class_unregister(&vpe_class);
0514     unregister_chrdev(major, VPE_MODULE_NAME);
0515 
0516     /* No locking needed here */
0517     list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
0518         if (v->state != VPE_STATE_UNUSED)
0519             release_vpe(v);
0520     }
0521 }