0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <core/mm.h>
0025
0026 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
0027 list_entry((root)->nl_entry.dir, struct nvkm_mm_node, nl_entry)
0028
0029 void
0030 nvkm_mm_dump(struct nvkm_mm *mm, const char *header)
0031 {
0032 struct nvkm_mm_node *node;
0033
0034 pr_err("nvkm: %s\n", header);
0035 pr_err("nvkm: node list:\n");
0036 list_for_each_entry(node, &mm->nodes, nl_entry) {
0037 pr_err("nvkm: \t%08x %08x %d\n",
0038 node->offset, node->length, node->type);
0039 }
0040 pr_err("nvkm: free list:\n");
0041 list_for_each_entry(node, &mm->free, fl_entry) {
0042 pr_err("nvkm: \t%08x %08x %d\n",
0043 node->offset, node->length, node->type);
0044 }
0045 }
0046
0047 void
0048 nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis)
0049 {
0050 struct nvkm_mm_node *this = *pthis;
0051
0052 if (this) {
0053 struct nvkm_mm_node *prev = node(this, prev);
0054 struct nvkm_mm_node *next = node(this, next);
0055
0056 if (prev && prev->type == NVKM_MM_TYPE_NONE) {
0057 prev->length += this->length;
0058 list_del(&this->nl_entry);
0059 kfree(this); this = prev;
0060 }
0061
0062 if (next && next->type == NVKM_MM_TYPE_NONE) {
0063 next->offset = this->offset;
0064 next->length += this->length;
0065 if (this->type == NVKM_MM_TYPE_NONE)
0066 list_del(&this->fl_entry);
0067 list_del(&this->nl_entry);
0068 kfree(this); this = NULL;
0069 }
0070
0071 if (this && this->type != NVKM_MM_TYPE_NONE) {
0072 list_for_each_entry(prev, &mm->free, fl_entry) {
0073 if (this->offset < prev->offset)
0074 break;
0075 }
0076
0077 list_add_tail(&this->fl_entry, &prev->fl_entry);
0078 this->type = NVKM_MM_TYPE_NONE;
0079 }
0080 }
0081
0082 *pthis = NULL;
0083 }
0084
0085 static struct nvkm_mm_node *
0086 region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
0087 {
0088 struct nvkm_mm_node *b;
0089
0090 if (a->length == size)
0091 return a;
0092
0093 b = kmalloc(sizeof(*b), GFP_KERNEL);
0094 if (unlikely(b == NULL))
0095 return NULL;
0096
0097 b->offset = a->offset;
0098 b->length = size;
0099 b->heap = a->heap;
0100 b->type = a->type;
0101 a->offset += size;
0102 a->length -= size;
0103 list_add_tail(&b->nl_entry, &a->nl_entry);
0104 if (b->type == NVKM_MM_TYPE_NONE)
0105 list_add_tail(&b->fl_entry, &a->fl_entry);
0106
0107 return b;
0108 }
0109
0110 int
0111 nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
0112 u32 align, struct nvkm_mm_node **pnode)
0113 {
0114 struct nvkm_mm_node *prev, *this, *next;
0115 u32 mask = align - 1;
0116 u32 splitoff;
0117 u32 s, e;
0118
0119 BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
0120
0121 list_for_each_entry(this, &mm->free, fl_entry) {
0122 if (unlikely(heap != NVKM_MM_HEAP_ANY)) {
0123 if (this->heap != heap)
0124 continue;
0125 }
0126 e = this->offset + this->length;
0127 s = this->offset;
0128
0129 prev = node(this, prev);
0130 if (prev && prev->type != type)
0131 s = roundup(s, mm->block_size);
0132
0133 next = node(this, next);
0134 if (next && next->type != type)
0135 e = rounddown(e, mm->block_size);
0136
0137 s = (s + mask) & ~mask;
0138 e &= ~mask;
0139 if (s > e || e - s < size_min)
0140 continue;
0141
0142 splitoff = s - this->offset;
0143 if (splitoff && !region_head(mm, this, splitoff))
0144 return -ENOMEM;
0145
0146 this = region_head(mm, this, min(size_max, e - s));
0147 if (!this)
0148 return -ENOMEM;
0149
0150 this->next = NULL;
0151 this->type = type;
0152 list_del(&this->fl_entry);
0153 *pnode = this;
0154 return 0;
0155 }
0156
0157 return -ENOSPC;
0158 }
0159
0160 static struct nvkm_mm_node *
0161 region_tail(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
0162 {
0163 struct nvkm_mm_node *b;
0164
0165 if (a->length == size)
0166 return a;
0167
0168 b = kmalloc(sizeof(*b), GFP_KERNEL);
0169 if (unlikely(b == NULL))
0170 return NULL;
0171
0172 a->length -= size;
0173 b->offset = a->offset + a->length;
0174 b->length = size;
0175 b->heap = a->heap;
0176 b->type = a->type;
0177
0178 list_add(&b->nl_entry, &a->nl_entry);
0179 if (b->type == NVKM_MM_TYPE_NONE)
0180 list_add(&b->fl_entry, &a->fl_entry);
0181
0182 return b;
0183 }
0184
0185 int
0186 nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
0187 u32 align, struct nvkm_mm_node **pnode)
0188 {
0189 struct nvkm_mm_node *prev, *this, *next;
0190 u32 mask = align - 1;
0191
0192 BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
0193
0194 list_for_each_entry_reverse(this, &mm->free, fl_entry) {
0195 u32 e = this->offset + this->length;
0196 u32 s = this->offset;
0197 u32 c = 0, a;
0198 if (unlikely(heap != NVKM_MM_HEAP_ANY)) {
0199 if (this->heap != heap)
0200 continue;
0201 }
0202
0203 prev = node(this, prev);
0204 if (prev && prev->type != type)
0205 s = roundup(s, mm->block_size);
0206
0207 next = node(this, next);
0208 if (next && next->type != type) {
0209 e = rounddown(e, mm->block_size);
0210 c = next->offset - e;
0211 }
0212
0213 s = (s + mask) & ~mask;
0214 a = e - s;
0215 if (s > e || a < size_min)
0216 continue;
0217
0218 a = min(a, size_max);
0219 s = (e - a) & ~mask;
0220 c += (e - s) - a;
0221
0222 if (c && !region_tail(mm, this, c))
0223 return -ENOMEM;
0224
0225 this = region_tail(mm, this, a);
0226 if (!this)
0227 return -ENOMEM;
0228
0229 this->next = NULL;
0230 this->type = type;
0231 list_del(&this->fl_entry);
0232 *pnode = this;
0233 return 0;
0234 }
0235
0236 return -ENOSPC;
0237 }
0238
0239 int
0240 nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block)
0241 {
0242 struct nvkm_mm_node *node, *prev;
0243 u32 next;
0244
0245 if (nvkm_mm_initialised(mm)) {
0246 prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry);
0247 next = prev->offset + prev->length;
0248 if (next != offset) {
0249 BUG_ON(next > offset);
0250 if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
0251 return -ENOMEM;
0252 node->type = NVKM_MM_TYPE_HOLE;
0253 node->offset = next;
0254 node->length = offset - next;
0255 list_add_tail(&node->nl_entry, &mm->nodes);
0256 }
0257 BUG_ON(block != mm->block_size);
0258 } else {
0259 INIT_LIST_HEAD(&mm->nodes);
0260 INIT_LIST_HEAD(&mm->free);
0261 mm->block_size = block;
0262 mm->heap_nodes = 0;
0263 }
0264
0265 node = kzalloc(sizeof(*node), GFP_KERNEL);
0266 if (!node)
0267 return -ENOMEM;
0268
0269 if (length) {
0270 node->offset = roundup(offset, mm->block_size);
0271 node->length = rounddown(offset + length, mm->block_size);
0272 node->length -= node->offset;
0273 }
0274
0275 list_add_tail(&node->nl_entry, &mm->nodes);
0276 list_add_tail(&node->fl_entry, &mm->free);
0277 node->heap = heap;
0278 mm->heap_nodes++;
0279 return 0;
0280 }
0281
0282 int
0283 nvkm_mm_fini(struct nvkm_mm *mm)
0284 {
0285 struct nvkm_mm_node *node, *temp;
0286 int nodes = 0;
0287
0288 if (!nvkm_mm_initialised(mm))
0289 return 0;
0290
0291 list_for_each_entry(node, &mm->nodes, nl_entry) {
0292 if (node->type != NVKM_MM_TYPE_HOLE) {
0293 if (++nodes > mm->heap_nodes) {
0294 nvkm_mm_dump(mm, "mm not clean!");
0295 return -EBUSY;
0296 }
0297 }
0298 }
0299
0300 list_for_each_entry_safe(node, temp, &mm->nodes, nl_entry) {
0301 list_del(&node->nl_entry);
0302 kfree(node);
0303 }
0304
0305 mm->heap_nodes = 0;
0306 return 0;
0307 }