Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2012 Red Hat Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  * Authors: Ben Skeggs
0023  */
0024 #include "priv.h"
0025 
0026 #include <subdev/bar.h>
0027 
0028 /******************************************************************************
0029  * instmem object base implementation
0030  *****************************************************************************/
0031 static void
0032 nvkm_instobj_load(struct nvkm_instobj *iobj)
0033 {
0034     struct nvkm_memory *memory = &iobj->memory;
0035     const u64 size = nvkm_memory_size(memory);
0036     void __iomem *map;
0037     int i;
0038 
0039     if (!(map = nvkm_kmap(memory))) {
0040         for (i = 0; i < size; i += 4)
0041             nvkm_wo32(memory, i, iobj->suspend[i / 4]);
0042     } else {
0043         memcpy_toio(map, iobj->suspend, size);
0044     }
0045     nvkm_done(memory);
0046 
0047     kvfree(iobj->suspend);
0048     iobj->suspend = NULL;
0049 }
0050 
0051 static int
0052 nvkm_instobj_save(struct nvkm_instobj *iobj)
0053 {
0054     struct nvkm_memory *memory = &iobj->memory;
0055     const u64 size = nvkm_memory_size(memory);
0056     void __iomem *map;
0057     int i;
0058 
0059     iobj->suspend = kvmalloc(size, GFP_KERNEL);
0060     if (!iobj->suspend)
0061         return -ENOMEM;
0062 
0063     if (!(map = nvkm_kmap(memory))) {
0064         for (i = 0; i < size; i += 4)
0065             iobj->suspend[i / 4] = nvkm_ro32(memory, i);
0066     } else {
0067         memcpy_fromio(iobj->suspend, map, size);
0068     }
0069     nvkm_done(memory);
0070     return 0;
0071 }
0072 
0073 void
0074 nvkm_instobj_dtor(struct nvkm_instmem *imem, struct nvkm_instobj *iobj)
0075 {
0076     spin_lock(&imem->lock);
0077     list_del(&iobj->head);
0078     spin_unlock(&imem->lock);
0079 }
0080 
0081 void
0082 nvkm_instobj_ctor(const struct nvkm_memory_func *func,
0083           struct nvkm_instmem *imem, struct nvkm_instobj *iobj)
0084 {
0085     nvkm_memory_ctor(func, &iobj->memory);
0086     iobj->suspend = NULL;
0087     spin_lock(&imem->lock);
0088     list_add_tail(&iobj->head, &imem->list);
0089     spin_unlock(&imem->lock);
0090 }
0091 
0092 int
0093 nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
0094          struct nvkm_memory **pmemory)
0095 {
0096     struct nvkm_subdev *subdev = &imem->subdev;
0097     struct nvkm_memory *memory = NULL;
0098     u32 offset;
0099     int ret;
0100 
0101     ret = imem->func->memory_new(imem, size, align, zero, &memory);
0102     if (ret) {
0103         nvkm_error(subdev, "OOM: %08x %08x %d\n", size, align, ret);
0104         goto done;
0105     }
0106 
0107     nvkm_trace(subdev, "new %08x %08x %d: %010llx %010llx\n", size, align,
0108            zero, nvkm_memory_addr(memory), nvkm_memory_size(memory));
0109 
0110     if (!imem->func->zero && zero) {
0111         void __iomem *map = nvkm_kmap(memory);
0112         if (unlikely(!map)) {
0113             for (offset = 0; offset < size; offset += 4)
0114                 nvkm_wo32(memory, offset, 0x00000000);
0115         } else {
0116             memset_io(map, 0x00, size);
0117         }
0118         nvkm_done(memory);
0119     }
0120 
0121 done:
0122     if (ret)
0123         nvkm_memory_unref(&memory);
0124     *pmemory = memory;
0125     return ret;
0126 }
0127 
0128 /******************************************************************************
0129  * instmem subdev base implementation
0130  *****************************************************************************/
0131 
0132 u32
0133 nvkm_instmem_rd32(struct nvkm_instmem *imem, u32 addr)
0134 {
0135     return imem->func->rd32(imem, addr);
0136 }
0137 
0138 void
0139 nvkm_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
0140 {
0141     return imem->func->wr32(imem, addr, data);
0142 }
0143 
0144 void
0145 nvkm_instmem_boot(struct nvkm_instmem *imem)
0146 {
0147     /* Separate bootstrapped objects from normal list, as we need
0148      * to make sure they're accessed with the slowpath on suspend
0149      * and resume.
0150      */
0151     struct nvkm_instobj *iobj, *itmp;
0152     spin_lock(&imem->lock);
0153     list_for_each_entry_safe(iobj, itmp, &imem->list, head) {
0154         list_move_tail(&iobj->head, &imem->boot);
0155     }
0156     spin_unlock(&imem->lock);
0157 }
0158 
0159 static int
0160 nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
0161 {
0162     struct nvkm_instmem *imem = nvkm_instmem(subdev);
0163     struct nvkm_instobj *iobj;
0164 
0165     if (suspend) {
0166         list_for_each_entry(iobj, &imem->list, head) {
0167             int ret = nvkm_instobj_save(iobj);
0168             if (ret)
0169                 return ret;
0170         }
0171 
0172         nvkm_bar_bar2_fini(subdev->device);
0173 
0174         list_for_each_entry(iobj, &imem->boot, head) {
0175             int ret = nvkm_instobj_save(iobj);
0176             if (ret)
0177                 return ret;
0178         }
0179     }
0180 
0181     if (imem->func->fini)
0182         imem->func->fini(imem);
0183 
0184     return 0;
0185 }
0186 
0187 static int
0188 nvkm_instmem_init(struct nvkm_subdev *subdev)
0189 {
0190     struct nvkm_instmem *imem = nvkm_instmem(subdev);
0191     struct nvkm_instobj *iobj;
0192 
0193     list_for_each_entry(iobj, &imem->boot, head) {
0194         if (iobj->suspend)
0195             nvkm_instobj_load(iobj);
0196     }
0197 
0198     nvkm_bar_bar2_init(subdev->device);
0199 
0200     list_for_each_entry(iobj, &imem->list, head) {
0201         if (iobj->suspend)
0202             nvkm_instobj_load(iobj);
0203     }
0204 
0205     return 0;
0206 }
0207 
0208 static int
0209 nvkm_instmem_oneinit(struct nvkm_subdev *subdev)
0210 {
0211     struct nvkm_instmem *imem = nvkm_instmem(subdev);
0212     if (imem->func->oneinit)
0213         return imem->func->oneinit(imem);
0214     return 0;
0215 }
0216 
0217 static void *
0218 nvkm_instmem_dtor(struct nvkm_subdev *subdev)
0219 {
0220     struct nvkm_instmem *imem = nvkm_instmem(subdev);
0221     void *data = imem;
0222     if (imem->func->dtor)
0223         data = imem->func->dtor(imem);
0224     mutex_destroy(&imem->mutex);
0225     return data;
0226 }
0227 
0228 static const struct nvkm_subdev_func
0229 nvkm_instmem = {
0230     .dtor = nvkm_instmem_dtor,
0231     .oneinit = nvkm_instmem_oneinit,
0232     .init = nvkm_instmem_init,
0233     .fini = nvkm_instmem_fini,
0234 };
0235 
0236 void
0237 nvkm_instmem_ctor(const struct nvkm_instmem_func *func, struct nvkm_device *device,
0238           enum nvkm_subdev_type type, int inst, struct nvkm_instmem *imem)
0239 {
0240     nvkm_subdev_ctor(&nvkm_instmem, device, type, inst, &imem->subdev);
0241     imem->func = func;
0242     spin_lock_init(&imem->lock);
0243     INIT_LIST_HEAD(&imem->list);
0244     INIT_LIST_HEAD(&imem->boot);
0245     mutex_init(&imem->mutex);
0246 }