Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2017 Red Hat Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  */
0022 #include "vmm.h"
0023 
0024 #include <subdev/timer.h>
0025 
0026 static void
0027 nv44_vmm_pgt_fill(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
0028           dma_addr_t *list, u32 ptei, u32 ptes)
0029 {
0030     u32 pteo = (ptei << 2) & ~0x0000000f;
0031     u32 tmp[4];
0032 
0033     tmp[0] = nvkm_ro32(pt->memory, pteo + 0x0);
0034     tmp[1] = nvkm_ro32(pt->memory, pteo + 0x4);
0035     tmp[2] = nvkm_ro32(pt->memory, pteo + 0x8);
0036     tmp[3] = nvkm_ro32(pt->memory, pteo + 0xc);
0037 
0038     while (ptes--) {
0039         u32 addr = (list ? *list++ : vmm->null) >> 12;
0040         switch (ptei++ & 0x3) {
0041         case 0:
0042             tmp[0] &= ~0x07ffffff;
0043             tmp[0] |= addr;
0044             break;
0045         case 1:
0046             tmp[0] &= ~0xf8000000;
0047             tmp[0] |= addr << 27;
0048             tmp[1] &= ~0x003fffff;
0049             tmp[1] |= addr >> 5;
0050             break;
0051         case 2:
0052             tmp[1] &= ~0xffc00000;
0053             tmp[1] |= addr << 22;
0054             tmp[2] &= ~0x0001ffff;
0055             tmp[2] |= addr >> 10;
0056             break;
0057         case 3:
0058             tmp[2] &= ~0xfffe0000;
0059             tmp[2] |= addr << 17;
0060             tmp[3] &= ~0x00000fff;
0061             tmp[3] |= addr >> 15;
0062             break;
0063         }
0064     }
0065 
0066     VMM_WO032(pt, vmm, pteo + 0x0, tmp[0]);
0067     VMM_WO032(pt, vmm, pteo + 0x4, tmp[1]);
0068     VMM_WO032(pt, vmm, pteo + 0x8, tmp[2]);
0069     VMM_WO032(pt, vmm, pteo + 0xc, tmp[3] | 0x40000000);
0070 }
0071 
0072 static void
0073 nv44_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
0074          u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
0075 {
0076     dma_addr_t tmp[4], i;
0077 
0078     if (ptei & 3) {
0079         const u32 pten = min(ptes, 4 - (ptei & 3));
0080         for (i = 0; i < pten; i++, addr += 0x1000)
0081             tmp[i] = addr;
0082         nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, pten);
0083         ptei += pten;
0084         ptes -= pten;
0085     }
0086 
0087     while (ptes >= 4) {
0088         for (i = 0; i < 4; i++, addr += 0x1000)
0089             tmp[i] = addr >> 12;
0090         VMM_WO032(pt, vmm, ptei++ * 4, tmp[0] >>  0 | tmp[1] << 27);
0091         VMM_WO032(pt, vmm, ptei++ * 4, tmp[1] >>  5 | tmp[2] << 22);
0092         VMM_WO032(pt, vmm, ptei++ * 4, tmp[2] >> 10 | tmp[3] << 17);
0093         VMM_WO032(pt, vmm, ptei++ * 4, tmp[3] >> 15 | 0x40000000);
0094         ptes -= 4;
0095     }
0096 
0097     if (ptes) {
0098         for (i = 0; i < ptes; i++, addr += 0x1000)
0099             tmp[i] = addr;
0100         nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, ptes);
0101     }
0102 }
0103 
0104 static void
0105 nv44_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
0106          u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
0107 {
0108     VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
0109 }
0110 
0111 static void
0112 nv44_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
0113          u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
0114 {
0115 #if PAGE_SHIFT == 12
0116     nvkm_kmap(pt->memory);
0117     if (ptei & 3) {
0118         const u32 pten = min(ptes, 4 - (ptei & 3));
0119         nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, pten);
0120         ptei += pten;
0121         ptes -= pten;
0122         map->dma += pten;
0123     }
0124 
0125     while (ptes >= 4) {
0126         u32 tmp[4], i;
0127         for (i = 0; i < 4; i++)
0128             tmp[i] = *map->dma++ >> 12;
0129         VMM_WO032(pt, vmm, ptei++ * 4, tmp[0] >>  0 | tmp[1] << 27);
0130         VMM_WO032(pt, vmm, ptei++ * 4, tmp[1] >>  5 | tmp[2] << 22);
0131         VMM_WO032(pt, vmm, ptei++ * 4, tmp[2] >> 10 | tmp[3] << 17);
0132         VMM_WO032(pt, vmm, ptei++ * 4, tmp[3] >> 15 | 0x40000000);
0133         ptes -= 4;
0134     }
0135 
0136     if (ptes) {
0137         nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, ptes);
0138         map->dma += ptes;
0139     }
0140     nvkm_done(pt->memory);
0141 #else
0142     VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
0143 #endif
0144 }
0145 
0146 static void
0147 nv44_vmm_pgt_unmap(struct nvkm_vmm *vmm,
0148            struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
0149 {
0150     nvkm_kmap(pt->memory);
0151     if (ptei & 3) {
0152         const u32 pten = min(ptes, 4 - (ptei & 3));
0153         nv44_vmm_pgt_fill(vmm, pt, NULL, ptei, pten);
0154         ptei += pten;
0155         ptes -= pten;
0156     }
0157 
0158     while (ptes > 4) {
0159         VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
0160         VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
0161         VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
0162         VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
0163         ptes -= 4;
0164     }
0165 
0166     if (ptes)
0167         nv44_vmm_pgt_fill(vmm, pt, NULL, ptei, ptes);
0168     nvkm_done(pt->memory);
0169 }
0170 
0171 static const struct nvkm_vmm_desc_func
0172 nv44_vmm_desc_pgt = {
0173     .unmap = nv44_vmm_pgt_unmap,
0174     .dma = nv44_vmm_pgt_dma,
0175     .sgl = nv44_vmm_pgt_sgl,
0176 };
0177 
0178 static const struct nvkm_vmm_desc
0179 nv44_vmm_desc_12[] = {
0180     { PGT, 17, 4, 0x80000, &nv44_vmm_desc_pgt },
0181     {}
0182 };
0183 
0184 static void
0185 nv44_vmm_flush(struct nvkm_vmm *vmm, int level)
0186 {
0187     struct nvkm_device *device = vmm->mmu->subdev.device;
0188     nvkm_wr32(device, 0x100814, vmm->limit - 4096);
0189     nvkm_wr32(device, 0x100808, 0x000000020);
0190     nvkm_msec(device, 2000,
0191         if (nvkm_rd32(device, 0x100808) & 0x00000001)
0192             break;
0193     );
0194     nvkm_wr32(device, 0x100808, 0x00000000);
0195 }
0196 
0197 static const struct nvkm_vmm_func
0198 nv44_vmm = {
0199     .valid = nv04_vmm_valid,
0200     .flush = nv44_vmm_flush,
0201     .page = {
0202         { 12, &nv44_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
0203         {}
0204     }
0205 };
0206 
0207 int
0208 nv44_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
0209          void *argv, u32 argc, struct lock_class_key *key, const char *name,
0210          struct nvkm_vmm **pvmm)
0211 {
0212     struct nvkm_subdev *subdev = &mmu->subdev;
0213     struct nvkm_vmm *vmm;
0214     int ret;
0215 
0216     ret = nv04_vmm_new_(&nv44_vmm, mmu, 0, managed, addr, size,
0217                 argv, argc, key, name, &vmm);
0218     *pvmm = vmm;
0219     if (ret)
0220         return ret;
0221 
0222     vmm->nullp = dma_alloc_coherent(subdev->device->dev, 16 * 1024,
0223                     &vmm->null, GFP_KERNEL);
0224     if (!vmm->nullp) {
0225         nvkm_warn(subdev, "unable to allocate dummy pages\n");
0226         vmm->null = 0;
0227     }
0228 
0229     return 0;
0230 }