Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0018  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
0019  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
0020  * DEALINGS IN THE SOFTWARE.
0021  */
0022 #include "priv.h"
0023 
0024 #include <core/gpuobj.h>
0025 #include <core/memory.h>
0026 #include <subdev/timer.h>
0027 
0028 void
0029 nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
0030              u32 size, u16 tag, u8 port, bool secure)
0031 {
0032     u8 rem = size % 4;
0033     u32 reg;
0034     int i;
0035 
0036     size -= rem;
0037 
0038     reg = start | BIT(24) | (secure ? BIT(28) : 0);
0039     nvkm_falcon_wr32(falcon, 0x180 + (port * 16), reg);
0040     for (i = 0; i < size / 4; i++) {
0041         /* write new tag every 256B */
0042         if ((i & 0x3f) == 0)
0043             nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
0044         nvkm_falcon_wr32(falcon, 0x184 + (port * 16), ((u32 *)data)[i]);
0045     }
0046 
0047     /*
0048      * If size is not a multiple of 4, mask the last work to ensure garbage
0049      * does not get written
0050      */
0051     if (rem) {
0052         u32 extra = ((u32 *)data)[i];
0053 
0054         /* write new tag every 256B */
0055         if ((i & 0x3f) == 0)
0056             nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
0057         nvkm_falcon_wr32(falcon, 0x184 + (port * 16),
0058                  extra & (BIT(rem * 8) - 1));
0059         ++i;
0060     }
0061 
0062     /* code must be padded to 0x40 words */
0063     for (; i & 0x3f; i++)
0064         nvkm_falcon_wr32(falcon, 0x184 + (port * 16), 0);
0065 }
0066 
0067 static void
0068 nvkm_falcon_v1_load_emem(struct nvkm_falcon *falcon, void *data, u32 start,
0069              u32 size, u8 port)
0070 {
0071     u8 rem = size % 4;
0072     int i;
0073 
0074     size -= rem;
0075 
0076     nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 24));
0077     for (i = 0; i < size / 4; i++)
0078         nvkm_falcon_wr32(falcon, 0xac4 + (port * 8), ((u32 *)data)[i]);
0079 
0080     /*
0081      * If size is not a multiple of 4, mask the last word to ensure garbage
0082      * does not get written
0083      */
0084     if (rem) {
0085         u32 extra = ((u32 *)data)[i];
0086 
0087         nvkm_falcon_wr32(falcon, 0xac4 + (port * 8),
0088                  extra & (BIT(rem * 8) - 1));
0089     }
0090 }
0091 
0092 void
0093 nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
0094              u32 size, u8 port)
0095 {
0096     const struct nvkm_falcon_func *func = falcon->func;
0097     u8 rem = size % 4;
0098     int i;
0099 
0100     if (func->emem_addr && start >= func->emem_addr)
0101         return nvkm_falcon_v1_load_emem(falcon, data,
0102                         start - func->emem_addr, size,
0103                         port);
0104 
0105     size -= rem;
0106 
0107     nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 24));
0108     for (i = 0; i < size / 4; i++)
0109         nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), ((u32 *)data)[i]);
0110 
0111     /*
0112      * If size is not a multiple of 4, mask the last word to ensure garbage
0113      * does not get written
0114      */
0115     if (rem) {
0116         u32 extra = ((u32 *)data)[i];
0117 
0118         nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8),
0119                  extra & (BIT(rem * 8) - 1));
0120     }
0121 }
0122 
0123 static void
0124 nvkm_falcon_v1_read_emem(struct nvkm_falcon *falcon, u32 start, u32 size,
0125              u8 port, void *data)
0126 {
0127     u8 rem = size % 4;
0128     int i;
0129 
0130     size -= rem;
0131 
0132     nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 25));
0133     for (i = 0; i < size / 4; i++)
0134         ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8));
0135 
0136     /*
0137      * If size is not a multiple of 4, mask the last word to ensure garbage
0138      * does not get read
0139      */
0140     if (rem) {
0141         u32 extra = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8));
0142 
0143         for (i = size; i < size + rem; i++) {
0144             ((u8 *)data)[i] = (u8)(extra & 0xff);
0145             extra >>= 8;
0146         }
0147     }
0148 }
0149 
0150 void
0151 nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
0152              u8 port, void *data)
0153 {
0154     const struct nvkm_falcon_func *func = falcon->func;
0155     u8 rem = size % 4;
0156     int i;
0157 
0158     if (func->emem_addr && start >= func->emem_addr)
0159         return nvkm_falcon_v1_read_emem(falcon, start - func->emem_addr,
0160                         size, port, data);
0161 
0162     size -= rem;
0163 
0164     nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 25));
0165     for (i = 0; i < size / 4; i++)
0166         ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
0167 
0168     /*
0169      * If size is not a multiple of 4, mask the last word to ensure garbage
0170      * does not get read
0171      */
0172     if (rem) {
0173         u32 extra = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
0174 
0175         for (i = size; i < size + rem; i++) {
0176             ((u8 *)data)[i] = (u8)(extra & 0xff);
0177             extra >>= 8;
0178         }
0179     }
0180 }
0181 
0182 void
0183 nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
0184 {
0185     const u32 fbif = falcon->func->fbif;
0186     u32 inst_loc;
0187 
0188     /* disable instance block binding */
0189     if (ctx == NULL) {
0190         nvkm_falcon_wr32(falcon, 0x10c, 0x0);
0191         return;
0192     }
0193 
0194     nvkm_falcon_wr32(falcon, 0x10c, 0x1);
0195 
0196     /* setup apertures - virtual */
0197     nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_UCODE, 0x4);
0198     nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_VIRT, 0x0);
0199     /* setup apertures - physical */
0200     nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_VID, 0x4);
0201     nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_COH, 0x5);
0202     nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6);
0203 
0204     /* Set context */
0205     switch (nvkm_memory_target(ctx)) {
0206     case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break;
0207     case NVKM_MEM_TARGET_HOST: inst_loc = 2; break;
0208     case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break;
0209     default:
0210         WARN_ON(1);
0211         return;
0212     }
0213 
0214     /* Enable context */
0215     nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1);
0216     nvkm_falcon_wr32(falcon, 0x054,
0217              ((nvkm_memory_addr(ctx) >> 12) & 0xfffffff) |
0218              (inst_loc << 28) | (1 << 30));
0219 
0220     nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000);
0221     nvkm_falcon_mask(falcon, 0x0a4, 0x8, 0x8);
0222 }
0223 
0224 void
0225 nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
0226 {
0227     nvkm_falcon_wr32(falcon, 0x104, start_addr);
0228 }
0229 
0230 void
0231 nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
0232 {
0233     u32 reg = nvkm_falcon_rd32(falcon, 0x100);
0234 
0235     if (reg & BIT(6))
0236         nvkm_falcon_wr32(falcon, 0x130, 0x2);
0237     else
0238         nvkm_falcon_wr32(falcon, 0x100, 0x2);
0239 }
0240 
0241 int
0242 nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
0243 {
0244     struct nvkm_device *device = falcon->owner->device;
0245     int ret;
0246 
0247     ret = nvkm_wait_msec(device, ms, falcon->addr + 0x100, 0x10, 0x10);
0248     if (ret < 0)
0249         return ret;
0250 
0251     return 0;
0252 }
0253 
0254 int
0255 nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
0256 {
0257     struct nvkm_device *device = falcon->owner->device;
0258     int ret;
0259 
0260     /* clear interrupt(s) */
0261     nvkm_falcon_mask(falcon, 0x004, mask, mask);
0262     /* wait until interrupts are cleared */
0263     ret = nvkm_wait_msec(device, 10, falcon->addr + 0x008, mask, 0x0);
0264     if (ret < 0)
0265         return ret;
0266 
0267     return 0;
0268 }
0269 
0270 static int
0271 falcon_v1_wait_idle(struct nvkm_falcon *falcon)
0272 {
0273     struct nvkm_device *device = falcon->owner->device;
0274     int ret;
0275 
0276     ret = nvkm_wait_msec(device, 10, falcon->addr + 0x04c, 0xffff, 0x0);
0277     if (ret < 0)
0278         return ret;
0279 
0280     return 0;
0281 }
0282 
0283 int
0284 nvkm_falcon_v1_enable(struct nvkm_falcon *falcon)
0285 {
0286     struct nvkm_device *device = falcon->owner->device;
0287     int ret;
0288 
0289     ret = nvkm_wait_msec(device, 10, falcon->addr + 0x10c, 0x6, 0x0);
0290     if (ret < 0) {
0291         nvkm_error(falcon->user, "Falcon mem scrubbing timeout\n");
0292         return ret;
0293     }
0294 
0295     ret = falcon_v1_wait_idle(falcon);
0296     if (ret)
0297         return ret;
0298 
0299     /* enable IRQs */
0300     nvkm_falcon_wr32(falcon, 0x010, 0xff);
0301 
0302     return 0;
0303 }
0304 
0305 void
0306 nvkm_falcon_v1_disable(struct nvkm_falcon *falcon)
0307 {
0308     /* disable IRQs and wait for any previous code to complete */
0309     nvkm_falcon_wr32(falcon, 0x014, 0xff);
0310     falcon_v1_wait_idle(falcon);
0311 }