Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: MIT
0002 /*
0003  * Copyright © 2021 Intel Corporation
0004  */
0005 
0006 #include <linux/kmemleak.h>
0007 #include <linux/module.h>
0008 #include <linux/sizes.h>
0009 
0010 #include <drm/drm_buddy.h>
0011 
0012 static struct kmem_cache *slab_blocks;
0013 
0014 static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
0015                            struct drm_buddy_block *parent,
0016                            unsigned int order,
0017                            u64 offset)
0018 {
0019     struct drm_buddy_block *block;
0020 
0021     BUG_ON(order > DRM_BUDDY_MAX_ORDER);
0022 
0023     block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
0024     if (!block)
0025         return NULL;
0026 
0027     block->header = offset;
0028     block->header |= order;
0029     block->parent = parent;
0030 
0031     BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
0032     return block;
0033 }
0034 
0035 static void drm_block_free(struct drm_buddy *mm,
0036                struct drm_buddy_block *block)
0037 {
0038     kmem_cache_free(slab_blocks, block);
0039 }
0040 
0041 static void mark_allocated(struct drm_buddy_block *block)
0042 {
0043     block->header &= ~DRM_BUDDY_HEADER_STATE;
0044     block->header |= DRM_BUDDY_ALLOCATED;
0045 
0046     list_del(&block->link);
0047 }
0048 
0049 static void mark_free(struct drm_buddy *mm,
0050               struct drm_buddy_block *block)
0051 {
0052     block->header &= ~DRM_BUDDY_HEADER_STATE;
0053     block->header |= DRM_BUDDY_FREE;
0054 
0055     list_add(&block->link,
0056          &mm->free_list[drm_buddy_block_order(block)]);
0057 }
0058 
0059 static void mark_split(struct drm_buddy_block *block)
0060 {
0061     block->header &= ~DRM_BUDDY_HEADER_STATE;
0062     block->header |= DRM_BUDDY_SPLIT;
0063 
0064     list_del(&block->link);
0065 }
0066 
0067 /**
0068  * drm_buddy_init - init memory manager
0069  *
0070  * @mm: DRM buddy manager to initialize
0071  * @size: size in bytes to manage
0072  * @chunk_size: minimum page size in bytes for our allocations
0073  *
0074  * Initializes the memory manager and its resources.
0075  *
0076  * Returns:
0077  * 0 on success, error code on failure.
0078  */
0079 int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
0080 {
0081     unsigned int i;
0082     u64 offset;
0083 
0084     if (size < chunk_size)
0085         return -EINVAL;
0086 
0087     if (chunk_size < PAGE_SIZE)
0088         return -EINVAL;
0089 
0090     if (!is_power_of_2(chunk_size))
0091         return -EINVAL;
0092 
0093     size = round_down(size, chunk_size);
0094 
0095     mm->size = size;
0096     mm->avail = size;
0097     mm->chunk_size = chunk_size;
0098     mm->max_order = ilog2(size) - ilog2(chunk_size);
0099 
0100     BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
0101 
0102     mm->free_list = kmalloc_array(mm->max_order + 1,
0103                       sizeof(struct list_head),
0104                       GFP_KERNEL);
0105     if (!mm->free_list)
0106         return -ENOMEM;
0107 
0108     for (i = 0; i <= mm->max_order; ++i)
0109         INIT_LIST_HEAD(&mm->free_list[i]);
0110 
0111     mm->n_roots = hweight64(size);
0112 
0113     mm->roots = kmalloc_array(mm->n_roots,
0114                   sizeof(struct drm_buddy_block *),
0115                   GFP_KERNEL);
0116     if (!mm->roots)
0117         goto out_free_list;
0118 
0119     offset = 0;
0120     i = 0;
0121 
0122     /*
0123      * Split into power-of-two blocks, in case we are given a size that is
0124      * not itself a power-of-two.
0125      */
0126     do {
0127         struct drm_buddy_block *root;
0128         unsigned int order;
0129         u64 root_size;
0130 
0131         root_size = rounddown_pow_of_two(size);
0132         order = ilog2(root_size) - ilog2(chunk_size);
0133 
0134         root = drm_block_alloc(mm, NULL, order, offset);
0135         if (!root)
0136             goto out_free_roots;
0137 
0138         mark_free(mm, root);
0139 
0140         BUG_ON(i > mm->max_order);
0141         BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
0142 
0143         mm->roots[i] = root;
0144 
0145         offset += root_size;
0146         size -= root_size;
0147         i++;
0148     } while (size);
0149 
0150     return 0;
0151 
0152 out_free_roots:
0153     while (i--)
0154         drm_block_free(mm, mm->roots[i]);
0155     kfree(mm->roots);
0156 out_free_list:
0157     kfree(mm->free_list);
0158     return -ENOMEM;
0159 }
0160 EXPORT_SYMBOL(drm_buddy_init);
0161 
0162 /**
0163  * drm_buddy_fini - tear down the memory manager
0164  *
0165  * @mm: DRM buddy manager to free
0166  *
0167  * Cleanup memory manager resources and the freelist
0168  */
0169 void drm_buddy_fini(struct drm_buddy *mm)
0170 {
0171     int i;
0172 
0173     for (i = 0; i < mm->n_roots; ++i) {
0174         WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
0175         drm_block_free(mm, mm->roots[i]);
0176     }
0177 
0178     WARN_ON(mm->avail != mm->size);
0179 
0180     kfree(mm->roots);
0181     kfree(mm->free_list);
0182 }
0183 EXPORT_SYMBOL(drm_buddy_fini);
0184 
0185 static int split_block(struct drm_buddy *mm,
0186                struct drm_buddy_block *block)
0187 {
0188     unsigned int block_order = drm_buddy_block_order(block) - 1;
0189     u64 offset = drm_buddy_block_offset(block);
0190 
0191     BUG_ON(!drm_buddy_block_is_free(block));
0192     BUG_ON(!drm_buddy_block_order(block));
0193 
0194     block->left = drm_block_alloc(mm, block, block_order, offset);
0195     if (!block->left)
0196         return -ENOMEM;
0197 
0198     block->right = drm_block_alloc(mm, block, block_order,
0199                        offset + (mm->chunk_size << block_order));
0200     if (!block->right) {
0201         drm_block_free(mm, block->left);
0202         return -ENOMEM;
0203     }
0204 
0205     mark_free(mm, block->left);
0206     mark_free(mm, block->right);
0207 
0208     mark_split(block);
0209 
0210     return 0;
0211 }
0212 
0213 static struct drm_buddy_block *
0214 __get_buddy(struct drm_buddy_block *block)
0215 {
0216     struct drm_buddy_block *parent;
0217 
0218     parent = block->parent;
0219     if (!parent)
0220         return NULL;
0221 
0222     if (parent->left == block)
0223         return parent->right;
0224 
0225     return parent->left;
0226 }
0227 
0228 /**
0229  * drm_get_buddy - get buddy address
0230  *
0231  * @block: DRM buddy block
0232  *
0233  * Returns the corresponding buddy block for @block, or NULL
0234  * if this is a root block and can't be merged further.
0235  * Requires some kind of locking to protect against
0236  * any concurrent allocate and free operations.
0237  */
0238 struct drm_buddy_block *
0239 drm_get_buddy(struct drm_buddy_block *block)
0240 {
0241     return __get_buddy(block);
0242 }
0243 EXPORT_SYMBOL(drm_get_buddy);
0244 
0245 static void __drm_buddy_free(struct drm_buddy *mm,
0246                  struct drm_buddy_block *block)
0247 {
0248     struct drm_buddy_block *parent;
0249 
0250     while ((parent = block->parent)) {
0251         struct drm_buddy_block *buddy;
0252 
0253         buddy = __get_buddy(block);
0254 
0255         if (!drm_buddy_block_is_free(buddy))
0256             break;
0257 
0258         list_del(&buddy->link);
0259 
0260         drm_block_free(mm, block);
0261         drm_block_free(mm, buddy);
0262 
0263         block = parent;
0264     }
0265 
0266     mark_free(mm, block);
0267 }
0268 
0269 /**
0270  * drm_buddy_free_block - free a block
0271  *
0272  * @mm: DRM buddy manager
0273  * @block: block to be freed
0274  */
0275 void drm_buddy_free_block(struct drm_buddy *mm,
0276               struct drm_buddy_block *block)
0277 {
0278     BUG_ON(!drm_buddy_block_is_allocated(block));
0279     mm->avail += drm_buddy_block_size(mm, block);
0280     __drm_buddy_free(mm, block);
0281 }
0282 EXPORT_SYMBOL(drm_buddy_free_block);
0283 
0284 /**
0285  * drm_buddy_free_list - free blocks
0286  *
0287  * @mm: DRM buddy manager
0288  * @objects: input list head to free blocks
0289  */
0290 void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
0291 {
0292     struct drm_buddy_block *block, *on;
0293 
0294     list_for_each_entry_safe(block, on, objects, link) {
0295         drm_buddy_free_block(mm, block);
0296         cond_resched();
0297     }
0298     INIT_LIST_HEAD(objects);
0299 }
0300 EXPORT_SYMBOL(drm_buddy_free_list);
0301 
0302 static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
0303 {
0304     return s1 <= e2 && e1 >= s2;
0305 }
0306 
0307 static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
0308 {
0309     return s1 <= s2 && e1 >= e2;
0310 }
0311 
0312 static struct drm_buddy_block *
0313 alloc_range_bias(struct drm_buddy *mm,
0314          u64 start, u64 end,
0315          unsigned int order)
0316 {
0317     struct drm_buddy_block *block;
0318     struct drm_buddy_block *buddy;
0319     LIST_HEAD(dfs);
0320     int err;
0321     int i;
0322 
0323     end = end - 1;
0324 
0325     for (i = 0; i < mm->n_roots; ++i)
0326         list_add_tail(&mm->roots[i]->tmp_link, &dfs);
0327 
0328     do {
0329         u64 block_start;
0330         u64 block_end;
0331 
0332         block = list_first_entry_or_null(&dfs,
0333                          struct drm_buddy_block,
0334                          tmp_link);
0335         if (!block)
0336             break;
0337 
0338         list_del(&block->tmp_link);
0339 
0340         if (drm_buddy_block_order(block) < order)
0341             continue;
0342 
0343         block_start = drm_buddy_block_offset(block);
0344         block_end = block_start + drm_buddy_block_size(mm, block) - 1;
0345 
0346         if (!overlaps(start, end, block_start, block_end))
0347             continue;
0348 
0349         if (drm_buddy_block_is_allocated(block))
0350             continue;
0351 
0352         if (contains(start, end, block_start, block_end) &&
0353             order == drm_buddy_block_order(block)) {
0354             /*
0355              * Find the free block within the range.
0356              */
0357             if (drm_buddy_block_is_free(block))
0358                 return block;
0359 
0360             continue;
0361         }
0362 
0363         if (!drm_buddy_block_is_split(block)) {
0364             err = split_block(mm, block);
0365             if (unlikely(err))
0366                 goto err_undo;
0367         }
0368 
0369         list_add(&block->right->tmp_link, &dfs);
0370         list_add(&block->left->tmp_link, &dfs);
0371     } while (1);
0372 
0373     return ERR_PTR(-ENOSPC);
0374 
0375 err_undo:
0376     /*
0377      * We really don't want to leave around a bunch of split blocks, since
0378      * bigger is better, so make sure we merge everything back before we
0379      * free the allocated blocks.
0380      */
0381     buddy = __get_buddy(block);
0382     if (buddy &&
0383         (drm_buddy_block_is_free(block) &&
0384          drm_buddy_block_is_free(buddy)))
0385         __drm_buddy_free(mm, block);
0386     return ERR_PTR(err);
0387 }
0388 
0389 static struct drm_buddy_block *
0390 get_maxblock(struct list_head *head)
0391 {
0392     struct drm_buddy_block *max_block = NULL, *node;
0393 
0394     max_block = list_first_entry_or_null(head,
0395                          struct drm_buddy_block,
0396                          link);
0397     if (!max_block)
0398         return NULL;
0399 
0400     list_for_each_entry(node, head, link) {
0401         if (drm_buddy_block_offset(node) >
0402             drm_buddy_block_offset(max_block))
0403             max_block = node;
0404     }
0405 
0406     return max_block;
0407 }
0408 
0409 static struct drm_buddy_block *
0410 alloc_from_freelist(struct drm_buddy *mm,
0411             unsigned int order,
0412             unsigned long flags)
0413 {
0414     struct drm_buddy_block *block = NULL;
0415     unsigned int i;
0416     int err;
0417 
0418     for (i = order; i <= mm->max_order; ++i) {
0419         if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
0420             block = get_maxblock(&mm->free_list[i]);
0421             if (block)
0422                 break;
0423         } else {
0424             block = list_first_entry_or_null(&mm->free_list[i],
0425                              struct drm_buddy_block,
0426                              link);
0427             if (block)
0428                 break;
0429         }
0430     }
0431 
0432     if (!block)
0433         return ERR_PTR(-ENOSPC);
0434 
0435     BUG_ON(!drm_buddy_block_is_free(block));
0436 
0437     while (i != order) {
0438         err = split_block(mm, block);
0439         if (unlikely(err))
0440             goto err_undo;
0441 
0442         block = block->right;
0443         i--;
0444     }
0445     return block;
0446 
0447 err_undo:
0448     if (i != order)
0449         __drm_buddy_free(mm, block);
0450     return ERR_PTR(err);
0451 }
0452 
0453 static int __alloc_range(struct drm_buddy *mm,
0454              struct list_head *dfs,
0455              u64 start, u64 size,
0456              struct list_head *blocks)
0457 {
0458     struct drm_buddy_block *block;
0459     struct drm_buddy_block *buddy;
0460     LIST_HEAD(allocated);
0461     u64 end;
0462     int err;
0463 
0464     end = start + size - 1;
0465 
0466     do {
0467         u64 block_start;
0468         u64 block_end;
0469 
0470         block = list_first_entry_or_null(dfs,
0471                          struct drm_buddy_block,
0472                          tmp_link);
0473         if (!block)
0474             break;
0475 
0476         list_del(&block->tmp_link);
0477 
0478         block_start = drm_buddy_block_offset(block);
0479         block_end = block_start + drm_buddy_block_size(mm, block) - 1;
0480 
0481         if (!overlaps(start, end, block_start, block_end))
0482             continue;
0483 
0484         if (drm_buddy_block_is_allocated(block)) {
0485             err = -ENOSPC;
0486             goto err_free;
0487         }
0488 
0489         if (contains(start, end, block_start, block_end)) {
0490             if (!drm_buddy_block_is_free(block)) {
0491                 err = -ENOSPC;
0492                 goto err_free;
0493             }
0494 
0495             mark_allocated(block);
0496             mm->avail -= drm_buddy_block_size(mm, block);
0497             list_add_tail(&block->link, &allocated);
0498             continue;
0499         }
0500 
0501         if (!drm_buddy_block_is_split(block)) {
0502             err = split_block(mm, block);
0503             if (unlikely(err))
0504                 goto err_undo;
0505         }
0506 
0507         list_add(&block->right->tmp_link, dfs);
0508         list_add(&block->left->tmp_link, dfs);
0509     } while (1);
0510 
0511     list_splice_tail(&allocated, blocks);
0512     return 0;
0513 
0514 err_undo:
0515     /*
0516      * We really don't want to leave around a bunch of split blocks, since
0517      * bigger is better, so make sure we merge everything back before we
0518      * free the allocated blocks.
0519      */
0520     buddy = __get_buddy(block);
0521     if (buddy &&
0522         (drm_buddy_block_is_free(block) &&
0523          drm_buddy_block_is_free(buddy)))
0524         __drm_buddy_free(mm, block);
0525 
0526 err_free:
0527     drm_buddy_free_list(mm, &allocated);
0528     return err;
0529 }
0530 
0531 static int __drm_buddy_alloc_range(struct drm_buddy *mm,
0532                    u64 start,
0533                    u64 size,
0534                    struct list_head *blocks)
0535 {
0536     LIST_HEAD(dfs);
0537     int i;
0538 
0539     for (i = 0; i < mm->n_roots; ++i)
0540         list_add_tail(&mm->roots[i]->tmp_link, &dfs);
0541 
0542     return __alloc_range(mm, &dfs, start, size, blocks);
0543 }
0544 
0545 /**
0546  * drm_buddy_block_trim - free unused pages
0547  *
0548  * @mm: DRM buddy manager
0549  * @new_size: original size requested
0550  * @blocks: Input and output list of allocated blocks.
0551  * MUST contain single block as input to be trimmed.
0552  * On success will contain the newly allocated blocks
0553  * making up the @new_size. Blocks always appear in
0554  * ascending order
0555  *
0556  * For contiguous allocation, we round up the size to the nearest
0557  * power of two value, drivers consume *actual* size, so remaining
0558  * portions are unused and can be optionally freed with this function
0559  *
0560  * Returns:
0561  * 0 on success, error code on failure.
0562  */
0563 int drm_buddy_block_trim(struct drm_buddy *mm,
0564              u64 new_size,
0565              struct list_head *blocks)
0566 {
0567     struct drm_buddy_block *parent;
0568     struct drm_buddy_block *block;
0569     LIST_HEAD(dfs);
0570     u64 new_start;
0571     int err;
0572 
0573     if (!list_is_singular(blocks))
0574         return -EINVAL;
0575 
0576     block = list_first_entry(blocks,
0577                  struct drm_buddy_block,
0578                  link);
0579 
0580     if (WARN_ON(!drm_buddy_block_is_allocated(block)))
0581         return -EINVAL;
0582 
0583     if (new_size > drm_buddy_block_size(mm, block))
0584         return -EINVAL;
0585 
0586     if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
0587         return -EINVAL;
0588 
0589     if (new_size == drm_buddy_block_size(mm, block))
0590         return 0;
0591 
0592     list_del(&block->link);
0593     mark_free(mm, block);
0594     mm->avail += drm_buddy_block_size(mm, block);
0595 
0596     /* Prevent recursively freeing this node */
0597     parent = block->parent;
0598     block->parent = NULL;
0599 
0600     new_start = drm_buddy_block_offset(block);
0601     list_add(&block->tmp_link, &dfs);
0602     err =  __alloc_range(mm, &dfs, new_start, new_size, blocks);
0603     if (err) {
0604         mark_allocated(block);
0605         mm->avail -= drm_buddy_block_size(mm, block);
0606         list_add(&block->link, blocks);
0607     }
0608 
0609     block->parent = parent;
0610     return err;
0611 }
0612 EXPORT_SYMBOL(drm_buddy_block_trim);
0613 
0614 /**
0615  * drm_buddy_alloc_blocks - allocate power-of-two blocks
0616  *
0617  * @mm: DRM buddy manager to allocate from
0618  * @start: start of the allowed range for this block
0619  * @end: end of the allowed range for this block
0620  * @size: size of the allocation
0621  * @min_page_size: alignment of the allocation
0622  * @blocks: output list head to add allocated blocks
0623  * @flags: DRM_BUDDY_*_ALLOCATION flags
0624  *
0625  * alloc_range_bias() called on range limitations, which traverses
0626  * the tree and returns the desired block.
0627  *
0628  * alloc_from_freelist() called when *no* range restrictions
0629  * are enforced, which picks the block from the freelist.
0630  *
0631  * Returns:
0632  * 0 on success, error code on failure.
0633  */
0634 int drm_buddy_alloc_blocks(struct drm_buddy *mm,
0635                u64 start, u64 end, u64 size,
0636                u64 min_page_size,
0637                struct list_head *blocks,
0638                unsigned long flags)
0639 {
0640     struct drm_buddy_block *block = NULL;
0641     unsigned int min_order, order;
0642     unsigned long pages;
0643     LIST_HEAD(allocated);
0644     int err;
0645 
0646     if (size < mm->chunk_size)
0647         return -EINVAL;
0648 
0649     if (min_page_size < mm->chunk_size)
0650         return -EINVAL;
0651 
0652     if (!is_power_of_2(min_page_size))
0653         return -EINVAL;
0654 
0655     if (!IS_ALIGNED(start | end | size, mm->chunk_size))
0656         return -EINVAL;
0657 
0658     if (end > mm->size)
0659         return -EINVAL;
0660 
0661     if (range_overflows(start, size, mm->size))
0662         return -EINVAL;
0663 
0664     /* Actual range allocation */
0665     if (start + size == end)
0666         return __drm_buddy_alloc_range(mm, start, size, blocks);
0667 
0668     if (!IS_ALIGNED(size, min_page_size))
0669         return -EINVAL;
0670 
0671     pages = size >> ilog2(mm->chunk_size);
0672     order = fls(pages) - 1;
0673     min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
0674 
0675     do {
0676         order = min(order, (unsigned int)fls(pages) - 1);
0677         BUG_ON(order > mm->max_order);
0678         BUG_ON(order < min_order);
0679 
0680         do {
0681             if (flags & DRM_BUDDY_RANGE_ALLOCATION)
0682                 /* Allocate traversing within the range */
0683                 block = alloc_range_bias(mm, start, end, order);
0684             else
0685                 /* Allocate from freelist */
0686                 block = alloc_from_freelist(mm, order, flags);
0687 
0688             if (!IS_ERR(block))
0689                 break;
0690 
0691             if (order-- == min_order) {
0692                 err = -ENOSPC;
0693                 goto err_free;
0694             }
0695         } while (1);
0696 
0697         mark_allocated(block);
0698         mm->avail -= drm_buddy_block_size(mm, block);
0699         kmemleak_update_trace(block);
0700         list_add_tail(&block->link, &allocated);
0701 
0702         pages -= BIT(order);
0703 
0704         if (!pages)
0705             break;
0706     } while (1);
0707 
0708     list_splice_tail(&allocated, blocks);
0709     return 0;
0710 
0711 err_free:
0712     drm_buddy_free_list(mm, &allocated);
0713     return err;
0714 }
0715 EXPORT_SYMBOL(drm_buddy_alloc_blocks);
0716 
0717 /**
0718  * drm_buddy_block_print - print block information
0719  *
0720  * @mm: DRM buddy manager
0721  * @block: DRM buddy block
0722  * @p: DRM printer to use
0723  */
0724 void drm_buddy_block_print(struct drm_buddy *mm,
0725                struct drm_buddy_block *block,
0726                struct drm_printer *p)
0727 {
0728     u64 start = drm_buddy_block_offset(block);
0729     u64 size = drm_buddy_block_size(mm, block);
0730 
0731     drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
0732 }
0733 EXPORT_SYMBOL(drm_buddy_block_print);
0734 
0735 /**
0736  * drm_buddy_print - print allocator state
0737  *
0738  * @mm: DRM buddy manager
0739  * @p: DRM printer to use
0740  */
0741 void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
0742 {
0743     int order;
0744 
0745     drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
0746            mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
0747 
0748     for (order = mm->max_order; order >= 0; order--) {
0749         struct drm_buddy_block *block;
0750         u64 count = 0, free;
0751 
0752         list_for_each_entry(block, &mm->free_list[order], link) {
0753             BUG_ON(!drm_buddy_block_is_free(block));
0754             count++;
0755         }
0756 
0757         drm_printf(p, "order-%d ", order);
0758 
0759         free = count * (mm->chunk_size << order);
0760         if (free < SZ_1M)
0761             drm_printf(p, "free: %lluKiB", free >> 10);
0762         else
0763             drm_printf(p, "free: %lluMiB", free >> 20);
0764 
0765         drm_printf(p, ", pages: %llu\n", count);
0766     }
0767 }
0768 EXPORT_SYMBOL(drm_buddy_print);
0769 
0770 static void drm_buddy_module_exit(void)
0771 {
0772     kmem_cache_destroy(slab_blocks);
0773 }
0774 
0775 static int __init drm_buddy_module_init(void)
0776 {
0777     slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
0778     if (!slab_blocks)
0779         return -ENOMEM;
0780 
0781     return 0;
0782 }
0783 
0784 module_init(drm_buddy_module_init);
0785 module_exit(drm_buddy_module_exit);
0786 
0787 MODULE_DESCRIPTION("DRM Buddy Allocator");
0788 MODULE_LICENSE("Dual MIT/GPL");