Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: MIT */
0002 #ifndef __NVKM_MMU_H__
0003 #define __NVKM_MMU_H__
0004 #include <core/subdev.h>
0005 
0006 struct nvkm_vma {
0007     struct list_head head;
0008     struct rb_node tree;
0009     u64 addr;
0010     u64 size:50;
0011     bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
0012     bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
0013 #define NVKM_VMA_PAGE_NONE 7
0014     u8   page:3; /* Requested page type (index, or NONE for automatic). */
0015     u8   refd:3; /* Current page type (index, or NONE for unreferenced). */
0016     bool used:1; /* Region allocated. */
0017     bool part:1; /* Region was split from an allocated region by map(). */
0018     bool busy:1; /* Region busy (for temporarily preventing user access). */
0019     bool mapped:1; /* Region contains valid pages. */
0020     struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
0021     struct nvkm_tags *tags; /* Compression tag reference. */
0022 };
0023 
0024 struct nvkm_vmm {
0025     const struct nvkm_vmm_func *func;
0026     struct nvkm_mmu *mmu;
0027     const char *name;
0028     u32 debug;
0029     struct kref kref;
0030     struct mutex mutex;
0031 
0032     u64 start;
0033     u64 limit;
0034 
0035     struct nvkm_vmm_pt *pd;
0036     struct list_head join;
0037 
0038     struct list_head list;
0039     struct rb_root free;
0040     struct rb_root root;
0041 
0042     bool bootstrapped;
0043     atomic_t engref[NVKM_SUBDEV_NR];
0044 
0045     dma_addr_t null;
0046     void *nullp;
0047 
0048     bool replay;
0049 };
0050 
0051 int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
0052          struct lock_class_key *, const char *name, struct nvkm_vmm **);
0053 struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
0054 void nvkm_vmm_unref(struct nvkm_vmm **);
0055 int nvkm_vmm_boot(struct nvkm_vmm *);
0056 int nvkm_vmm_join(struct nvkm_vmm *, struct nvkm_memory *inst);
0057 void nvkm_vmm_part(struct nvkm_vmm *, struct nvkm_memory *inst);
0058 int nvkm_vmm_get(struct nvkm_vmm *, u8 page, u64 size, struct nvkm_vma **);
0059 void nvkm_vmm_put(struct nvkm_vmm *, struct nvkm_vma **);
0060 
0061 struct nvkm_vmm_map {
0062     struct nvkm_memory *memory;
0063     u64 offset;
0064 
0065     struct nvkm_mm_node *mem;
0066     struct scatterlist *sgl;
0067     dma_addr_t *dma;
0068     u64 *pfn;
0069     u64 off;
0070 
0071     const struct nvkm_vmm_page *page;
0072 
0073     struct nvkm_tags *tags;
0074     u64 next;
0075     u64 type;
0076     u64 ctag;
0077 };
0078 
0079 int nvkm_vmm_map(struct nvkm_vmm *, struct nvkm_vma *, void *argv, u32 argc,
0080          struct nvkm_vmm_map *);
0081 void nvkm_vmm_unmap(struct nvkm_vmm *, struct nvkm_vma *);
0082 
0083 struct nvkm_memory *nvkm_umem_search(struct nvkm_client *, u64);
0084 struct nvkm_vmm *nvkm_uvmm_search(struct nvkm_client *, u64 handle);
0085 
0086 struct nvkm_mmu {
0087     const struct nvkm_mmu_func *func;
0088     struct nvkm_subdev subdev;
0089 
0090     u8  dma_bits;
0091 
0092     int heap_nr;
0093     struct {
0094 #define NVKM_MEM_VRAM                                                      0x01
0095 #define NVKM_MEM_HOST                                                      0x02
0096 #define NVKM_MEM_COMP                                                      0x04
0097 #define NVKM_MEM_DISP                                                      0x08
0098         u8  type;
0099         u64 size;
0100     } heap[4];
0101 
0102     int type_nr;
0103     struct {
0104 #define NVKM_MEM_KIND                                                      0x10
0105 #define NVKM_MEM_MAPPABLE                                                  0x20
0106 #define NVKM_MEM_COHERENT                                                  0x40
0107 #define NVKM_MEM_UNCACHED                                                  0x80
0108         u8 type;
0109         u8 heap;
0110     } type[16];
0111 
0112     struct nvkm_vmm *vmm;
0113 
0114     struct {
0115         struct mutex mutex;
0116         struct list_head list;
0117     } ptc, ptp;
0118 
0119     struct mutex mutex; /* serialises mmu invalidations */
0120 
0121     struct nvkm_device_oclass user;
0122 };
0123 
0124 int nv04_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
0125 int nv41_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
0126 int nv44_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
0127 int nv50_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
0128 int g84_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
0129 int mcp77_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
0130 int gf100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
0131 int gk104_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
0132 int gk20a_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
0133 int gm200_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
0134 int gm20b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
0135 int gp100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
0136 int gp10b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
0137 int gv100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
0138 int tu102_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
0139 #endif