Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * A Remote Heap.  Remote means that we don't touch the memory that the
0003  * heap points to. Normal heap implementations use the memory they manage
0004  * to place their list. We cannot do that because the memory we manage may
0005  * have special properties, for example it is uncachable or of different
0006  * endianess.
0007  *
0008  * Author: Pantelis Antoniou <panto@intracom.gr>
0009  *
0010  * 2004 (c) INTRACOM S.A. Greece. This file is licensed under
0011  * the terms of the GNU General Public License version 2. This program
0012  * is licensed "as is" without any warranty of any kind, whether express
0013  * or implied.
0014  */
0015 #include <linux/types.h>
0016 #include <linux/errno.h>
0017 #include <linux/kernel.h>
0018 #include <linux/export.h>
0019 #include <linux/mm.h>
0020 #include <linux/err.h>
0021 #include <linux/slab.h>
0022 
0023 #include <asm/rheap.h>
0024 
0025 /*
0026  * Fixup a list_head, needed when copying lists.  If the pointers fall
0027  * between s and e, apply the delta.  This assumes that
0028  * sizeof(struct list_head *) == sizeof(unsigned long *).
0029  */
0030 static inline void fixup(unsigned long s, unsigned long e, int d,
0031              struct list_head *l)
0032 {
0033     unsigned long *pp;
0034 
0035     pp = (unsigned long *)&l->next;
0036     if (*pp >= s && *pp < e)
0037         *pp += d;
0038 
0039     pp = (unsigned long *)&l->prev;
0040     if (*pp >= s && *pp < e)
0041         *pp += d;
0042 }
0043 
0044 /* Grow the allocated blocks */
0045 static int grow(rh_info_t * info, int max_blocks)
0046 {
0047     rh_block_t *block, *blk;
0048     int i, new_blocks;
0049     int delta;
0050     unsigned long blks, blke;
0051 
0052     if (max_blocks <= info->max_blocks)
0053         return -EINVAL;
0054 
0055     new_blocks = max_blocks - info->max_blocks;
0056 
0057     block = kmalloc_array(max_blocks, sizeof(rh_block_t), GFP_ATOMIC);
0058     if (block == NULL)
0059         return -ENOMEM;
0060 
0061     if (info->max_blocks > 0) {
0062 
0063         /* copy old block area */
0064         memcpy(block, info->block,
0065                sizeof(rh_block_t) * info->max_blocks);
0066 
0067         delta = (char *)block - (char *)info->block;
0068 
0069         /* and fixup list pointers */
0070         blks = (unsigned long)info->block;
0071         blke = (unsigned long)(info->block + info->max_blocks);
0072 
0073         for (i = 0, blk = block; i < info->max_blocks; i++, blk++)
0074             fixup(blks, blke, delta, &blk->list);
0075 
0076         fixup(blks, blke, delta, &info->empty_list);
0077         fixup(blks, blke, delta, &info->free_list);
0078         fixup(blks, blke, delta, &info->taken_list);
0079 
0080         /* free the old allocated memory */
0081         if ((info->flags & RHIF_STATIC_BLOCK) == 0)
0082             kfree(info->block);
0083     }
0084 
0085     info->block = block;
0086     info->empty_slots += new_blocks;
0087     info->max_blocks = max_blocks;
0088     info->flags &= ~RHIF_STATIC_BLOCK;
0089 
0090     /* add all new blocks to the free list */
0091     blk = block + info->max_blocks - new_blocks;
0092     for (i = 0; i < new_blocks; i++, blk++)
0093         list_add(&blk->list, &info->empty_list);
0094 
0095     return 0;
0096 }
0097 
0098 /*
0099  * Assure at least the required amount of empty slots.  If this function
0100  * causes a grow in the block area then all pointers kept to the block
0101  * area are invalid!
0102  */
0103 static int assure_empty(rh_info_t * info, int slots)
0104 {
0105     int max_blocks;
0106 
0107     /* This function is not meant to be used to grow uncontrollably */
0108     if (slots >= 4)
0109         return -EINVAL;
0110 
0111     /* Enough space */
0112     if (info->empty_slots >= slots)
0113         return 0;
0114 
0115     /* Next 16 sized block */
0116     max_blocks = ((info->max_blocks + slots) + 15) & ~15;
0117 
0118     return grow(info, max_blocks);
0119 }
0120 
0121 static rh_block_t *get_slot(rh_info_t * info)
0122 {
0123     rh_block_t *blk;
0124 
0125     /* If no more free slots, and failure to extend. */
0126     /* XXX: You should have called assure_empty before */
0127     if (info->empty_slots == 0) {
0128         printk(KERN_ERR "rh: out of slots; crash is imminent.\n");
0129         return NULL;
0130     }
0131 
0132     /* Get empty slot to use */
0133     blk = list_entry(info->empty_list.next, rh_block_t, list);
0134     list_del_init(&blk->list);
0135     info->empty_slots--;
0136 
0137     /* Initialize */
0138     blk->start = 0;
0139     blk->size = 0;
0140     blk->owner = NULL;
0141 
0142     return blk;
0143 }
0144 
0145 static inline void release_slot(rh_info_t * info, rh_block_t * blk)
0146 {
0147     list_add(&blk->list, &info->empty_list);
0148     info->empty_slots++;
0149 }
0150 
0151 static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
0152 {
0153     rh_block_t *blk;
0154     rh_block_t *before;
0155     rh_block_t *after;
0156     rh_block_t *next;
0157     int size;
0158     unsigned long s, e, bs, be;
0159     struct list_head *l;
0160 
0161     /* We assume that they are aligned properly */
0162     size = blkn->size;
0163     s = blkn->start;
0164     e = s + size;
0165 
0166     /* Find the blocks immediately before and after the given one
0167      * (if any) */
0168     before = NULL;
0169     after = NULL;
0170     next = NULL;
0171 
0172     list_for_each(l, &info->free_list) {
0173         blk = list_entry(l, rh_block_t, list);
0174 
0175         bs = blk->start;
0176         be = bs + blk->size;
0177 
0178         if (next == NULL && s >= bs)
0179             next = blk;
0180 
0181         if (be == s)
0182             before = blk;
0183 
0184         if (e == bs)
0185             after = blk;
0186 
0187         /* If both are not null, break now */
0188         if (before != NULL && after != NULL)
0189             break;
0190     }
0191 
0192     /* Now check if they are really adjacent */
0193     if (before && s != (before->start + before->size))
0194         before = NULL;
0195 
0196     if (after && e != after->start)
0197         after = NULL;
0198 
0199     /* No coalescing; list insert and return */
0200     if (before == NULL && after == NULL) {
0201 
0202         if (next != NULL)
0203             list_add(&blkn->list, &next->list);
0204         else
0205             list_add(&blkn->list, &info->free_list);
0206 
0207         return;
0208     }
0209 
0210     /* We don't need it anymore */
0211     release_slot(info, blkn);
0212 
0213     /* Grow the before block */
0214     if (before != NULL && after == NULL) {
0215         before->size += size;
0216         return;
0217     }
0218 
0219     /* Grow the after block backwards */
0220     if (before == NULL && after != NULL) {
0221         after->start -= size;
0222         after->size += size;
0223         return;
0224     }
0225 
0226     /* Grow the before block, and release the after block */
0227     before->size += size + after->size;
0228     list_del(&after->list);
0229     release_slot(info, after);
0230 }
0231 
0232 static void attach_taken_block(rh_info_t * info, rh_block_t * blkn)
0233 {
0234     rh_block_t *blk;
0235     struct list_head *l;
0236 
0237     /* Find the block immediately before the given one (if any) */
0238     list_for_each(l, &info->taken_list) {
0239         blk = list_entry(l, rh_block_t, list);
0240         if (blk->start > blkn->start) {
0241             list_add_tail(&blkn->list, &blk->list);
0242             return;
0243         }
0244     }
0245 
0246     list_add_tail(&blkn->list, &info->taken_list);
0247 }
0248 
0249 /*
0250  * Create a remote heap dynamically.  Note that no memory for the blocks
0251  * are allocated.  It will upon the first allocation
0252  */
0253 rh_info_t *rh_create(unsigned int alignment)
0254 {
0255     rh_info_t *info;
0256 
0257     /* Alignment must be a power of two */
0258     if ((alignment & (alignment - 1)) != 0)
0259         return ERR_PTR(-EINVAL);
0260 
0261     info = kmalloc(sizeof(*info), GFP_ATOMIC);
0262     if (info == NULL)
0263         return ERR_PTR(-ENOMEM);
0264 
0265     info->alignment = alignment;
0266 
0267     /* Initially everything as empty */
0268     info->block = NULL;
0269     info->max_blocks = 0;
0270     info->empty_slots = 0;
0271     info->flags = 0;
0272 
0273     INIT_LIST_HEAD(&info->empty_list);
0274     INIT_LIST_HEAD(&info->free_list);
0275     INIT_LIST_HEAD(&info->taken_list);
0276 
0277     return info;
0278 }
0279 EXPORT_SYMBOL_GPL(rh_create);
0280 
0281 /*
0282  * Destroy a dynamically created remote heap.  Deallocate only if the areas
0283  * are not static
0284  */
0285 void rh_destroy(rh_info_t * info)
0286 {
0287     if ((info->flags & RHIF_STATIC_BLOCK) == 0)
0288         kfree(info->block);
0289 
0290     if ((info->flags & RHIF_STATIC_INFO) == 0)
0291         kfree(info);
0292 }
0293 EXPORT_SYMBOL_GPL(rh_destroy);
0294 
0295 /*
0296  * Initialize in place a remote heap info block.  This is needed to support
0297  * operation very early in the startup of the kernel, when it is not yet safe
0298  * to call kmalloc.
0299  */
0300 void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
0301          rh_block_t * block)
0302 {
0303     int i;
0304     rh_block_t *blk;
0305 
0306     /* Alignment must be a power of two */
0307     if ((alignment & (alignment - 1)) != 0)
0308         return;
0309 
0310     info->alignment = alignment;
0311 
0312     /* Initially everything as empty */
0313     info->block = block;
0314     info->max_blocks = max_blocks;
0315     info->empty_slots = max_blocks;
0316     info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK;
0317 
0318     INIT_LIST_HEAD(&info->empty_list);
0319     INIT_LIST_HEAD(&info->free_list);
0320     INIT_LIST_HEAD(&info->taken_list);
0321 
0322     /* Add all new blocks to the free list */
0323     for (i = 0, blk = block; i < max_blocks; i++, blk++)
0324         list_add(&blk->list, &info->empty_list);
0325 }
0326 EXPORT_SYMBOL_GPL(rh_init);
0327 
0328 /* Attach a free memory region, coalesces regions if adjacent */
0329 int rh_attach_region(rh_info_t * info, unsigned long start, int size)
0330 {
0331     rh_block_t *blk;
0332     unsigned long s, e, m;
0333     int r;
0334 
0335     /* The region must be aligned */
0336     s = start;
0337     e = s + size;
0338     m = info->alignment - 1;
0339 
0340     /* Round start up */
0341     s = (s + m) & ~m;
0342 
0343     /* Round end down */
0344     e = e & ~m;
0345 
0346     if (IS_ERR_VALUE(e) || (e < s))
0347         return -ERANGE;
0348 
0349     /* Take final values */
0350     start = s;
0351     size = e - s;
0352 
0353     /* Grow the blocks, if needed */
0354     r = assure_empty(info, 1);
0355     if (r < 0)
0356         return r;
0357 
0358     blk = get_slot(info);
0359     blk->start = start;
0360     blk->size = size;
0361     blk->owner = NULL;
0362 
0363     attach_free_block(info, blk);
0364 
0365     return 0;
0366 }
0367 EXPORT_SYMBOL_GPL(rh_attach_region);
0368 
0369 /* Detatch given address range, splits free block if needed. */
0370 unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size)
0371 {
0372     struct list_head *l;
0373     rh_block_t *blk, *newblk;
0374     unsigned long s, e, m, bs, be;
0375 
0376     /* Validate size */
0377     if (size <= 0)
0378         return (unsigned long) -EINVAL;
0379 
0380     /* The region must be aligned */
0381     s = start;
0382     e = s + size;
0383     m = info->alignment - 1;
0384 
0385     /* Round start up */
0386     s = (s + m) & ~m;
0387 
0388     /* Round end down */
0389     e = e & ~m;
0390 
0391     if (assure_empty(info, 1) < 0)
0392         return (unsigned long) -ENOMEM;
0393 
0394     blk = NULL;
0395     list_for_each(l, &info->free_list) {
0396         blk = list_entry(l, rh_block_t, list);
0397         /* The range must lie entirely inside one free block */
0398         bs = blk->start;
0399         be = blk->start + blk->size;
0400         if (s >= bs && e <= be)
0401             break;
0402         blk = NULL;
0403     }
0404 
0405     if (blk == NULL)
0406         return (unsigned long) -ENOMEM;
0407 
0408     /* Perfect fit */
0409     if (bs == s && be == e) {
0410         /* Delete from free list, release slot */
0411         list_del(&blk->list);
0412         release_slot(info, blk);
0413         return s;
0414     }
0415 
0416     /* blk still in free list, with updated start and/or size */
0417     if (bs == s || be == e) {
0418         if (bs == s)
0419             blk->start += size;
0420         blk->size -= size;
0421 
0422     } else {
0423         /* The front free fragment */
0424         blk->size = s - bs;
0425 
0426         /* the back free fragment */
0427         newblk = get_slot(info);
0428         newblk->start = e;
0429         newblk->size = be - e;
0430 
0431         list_add(&newblk->list, &blk->list);
0432     }
0433 
0434     return s;
0435 }
0436 EXPORT_SYMBOL_GPL(rh_detach_region);
0437 
0438 /* Allocate a block of memory at the specified alignment.  The value returned
0439  * is an offset into the buffer initialized by rh_init(), or a negative number
0440  * if there is an error.
0441  */
0442 unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owner)
0443 {
0444     struct list_head *l;
0445     rh_block_t *blk;
0446     rh_block_t *newblk;
0447     unsigned long start, sp_size;
0448 
0449     /* Validate size, and alignment must be power of two */
0450     if (size <= 0 || (alignment & (alignment - 1)) != 0)
0451         return (unsigned long) -EINVAL;
0452 
0453     /* Align to configured alignment */
0454     size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
0455 
0456     if (assure_empty(info, 2) < 0)
0457         return (unsigned long) -ENOMEM;
0458 
0459     blk = NULL;
0460     list_for_each(l, &info->free_list) {
0461         blk = list_entry(l, rh_block_t, list);
0462         if (size <= blk->size) {
0463             start = (blk->start + alignment - 1) & ~(alignment - 1);
0464             if (start + size <= blk->start + blk->size)
0465                 break;
0466         }
0467         blk = NULL;
0468     }
0469 
0470     if (blk == NULL)
0471         return (unsigned long) -ENOMEM;
0472 
0473     /* Just fits */
0474     if (blk->size == size) {
0475         /* Move from free list to taken list */
0476         list_del(&blk->list);
0477         newblk = blk;
0478     } else {
0479         /* Fragment caused, split if needed */
0480         /* Create block for fragment in the beginning */
0481         sp_size = start - blk->start;
0482         if (sp_size) {
0483             rh_block_t *spblk;
0484 
0485             spblk = get_slot(info);
0486             spblk->start = blk->start;
0487             spblk->size = sp_size;
0488             /* add before the blk */
0489             list_add(&spblk->list, blk->list.prev);
0490         }
0491         newblk = get_slot(info);
0492         newblk->start = start;
0493         newblk->size = size;
0494 
0495         /* blk still in free list, with updated start and size
0496          * for fragment in the end */
0497         blk->start = start + size;
0498         blk->size -= sp_size + size;
0499         /* No fragment in the end, remove blk */
0500         if (blk->size == 0) {
0501             list_del(&blk->list);
0502             release_slot(info, blk);
0503         }
0504     }
0505 
0506     newblk->owner = owner;
0507     attach_taken_block(info, newblk);
0508 
0509     return start;
0510 }
0511 EXPORT_SYMBOL_GPL(rh_alloc_align);
0512 
0513 /* Allocate a block of memory at the default alignment.  The value returned is
0514  * an offset into the buffer initialized by rh_init(), or a negative number if
0515  * there is an error.
0516  */
0517 unsigned long rh_alloc(rh_info_t * info, int size, const char *owner)
0518 {
0519     return rh_alloc_align(info, size, info->alignment, owner);
0520 }
0521 EXPORT_SYMBOL_GPL(rh_alloc);
0522 
0523 /* Allocate a block of memory at the given offset, rounded up to the default
0524  * alignment.  The value returned is an offset into the buffer initialized by
0525  * rh_init(), or a negative number if there is an error.
0526  */
0527 unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner)
0528 {
0529     struct list_head *l;
0530     rh_block_t *blk, *newblk1, *newblk2;
0531     unsigned long s, e, m, bs = 0, be = 0;
0532 
0533     /* Validate size */
0534     if (size <= 0)
0535         return (unsigned long) -EINVAL;
0536 
0537     /* The region must be aligned */
0538     s = start;
0539     e = s + size;
0540     m = info->alignment - 1;
0541 
0542     /* Round start up */
0543     s = (s + m) & ~m;
0544 
0545     /* Round end down */
0546     e = e & ~m;
0547 
0548     if (assure_empty(info, 2) < 0)
0549         return (unsigned long) -ENOMEM;
0550 
0551     blk = NULL;
0552     list_for_each(l, &info->free_list) {
0553         blk = list_entry(l, rh_block_t, list);
0554         /* The range must lie entirely inside one free block */
0555         bs = blk->start;
0556         be = blk->start + blk->size;
0557         if (s >= bs && e <= be)
0558             break;
0559         blk = NULL;
0560     }
0561 
0562     if (blk == NULL)
0563         return (unsigned long) -ENOMEM;
0564 
0565     /* Perfect fit */
0566     if (bs == s && be == e) {
0567         /* Move from free list to taken list */
0568         list_del(&blk->list);
0569         blk->owner = owner;
0570 
0571         start = blk->start;
0572         attach_taken_block(info, blk);
0573 
0574         return start;
0575 
0576     }
0577 
0578     /* blk still in free list, with updated start and/or size */
0579     if (bs == s || be == e) {
0580         if (bs == s)
0581             blk->start += size;
0582         blk->size -= size;
0583 
0584     } else {
0585         /* The front free fragment */
0586         blk->size = s - bs;
0587 
0588         /* The back free fragment */
0589         newblk2 = get_slot(info);
0590         newblk2->start = e;
0591         newblk2->size = be - e;
0592 
0593         list_add(&newblk2->list, &blk->list);
0594     }
0595 
0596     newblk1 = get_slot(info);
0597     newblk1->start = s;
0598     newblk1->size = e - s;
0599     newblk1->owner = owner;
0600 
0601     start = newblk1->start;
0602     attach_taken_block(info, newblk1);
0603 
0604     return start;
0605 }
0606 EXPORT_SYMBOL_GPL(rh_alloc_fixed);
0607 
0608 /* Deallocate the memory previously allocated by one of the rh_alloc functions.
0609  * The return value is the size of the deallocated block, or a negative number
0610  * if there is an error.
0611  */
0612 int rh_free(rh_info_t * info, unsigned long start)
0613 {
0614     rh_block_t *blk, *blk2;
0615     struct list_head *l;
0616     int size;
0617 
0618     /* Linear search for block */
0619     blk = NULL;
0620     list_for_each(l, &info->taken_list) {
0621         blk2 = list_entry(l, rh_block_t, list);
0622         if (start < blk2->start)
0623             break;
0624         blk = blk2;
0625     }
0626 
0627     if (blk == NULL || start > (blk->start + blk->size))
0628         return -EINVAL;
0629 
0630     /* Remove from taken list */
0631     list_del(&blk->list);
0632 
0633     /* Get size of freed block */
0634     size = blk->size;
0635     attach_free_block(info, blk);
0636 
0637     return size;
0638 }
0639 EXPORT_SYMBOL_GPL(rh_free);
0640 
0641 int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats)
0642 {
0643     rh_block_t *blk;
0644     struct list_head *l;
0645     struct list_head *h;
0646     int nr;
0647 
0648     switch (what) {
0649 
0650     case RHGS_FREE:
0651         h = &info->free_list;
0652         break;
0653 
0654     case RHGS_TAKEN:
0655         h = &info->taken_list;
0656         break;
0657 
0658     default:
0659         return -EINVAL;
0660     }
0661 
0662     /* Linear search for block */
0663     nr = 0;
0664     list_for_each(l, h) {
0665         blk = list_entry(l, rh_block_t, list);
0666         if (stats != NULL && nr < max_stats) {
0667             stats->start = blk->start;
0668             stats->size = blk->size;
0669             stats->owner = blk->owner;
0670             stats++;
0671         }
0672         nr++;
0673     }
0674 
0675     return nr;
0676 }
0677 EXPORT_SYMBOL_GPL(rh_get_stats);
0678 
0679 int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner)
0680 {
0681     rh_block_t *blk, *blk2;
0682     struct list_head *l;
0683     int size;
0684 
0685     /* Linear search for block */
0686     blk = NULL;
0687     list_for_each(l, &info->taken_list) {
0688         blk2 = list_entry(l, rh_block_t, list);
0689         if (start < blk2->start)
0690             break;
0691         blk = blk2;
0692     }
0693 
0694     if (blk == NULL || start > (blk->start + blk->size))
0695         return -EINVAL;
0696 
0697     blk->owner = owner;
0698     size = blk->size;
0699 
0700     return size;
0701 }
0702 EXPORT_SYMBOL_GPL(rh_set_owner);
0703 
0704 void rh_dump(rh_info_t * info)
0705 {
0706     static rh_stats_t st[32];   /* XXX maximum 32 blocks */
0707     int maxnr;
0708     int i, nr;
0709 
0710     maxnr = ARRAY_SIZE(st);
0711 
0712     printk(KERN_INFO
0713            "info @0x%p (%d slots empty / %d max)\n",
0714            info, info->empty_slots, info->max_blocks);
0715 
0716     printk(KERN_INFO "  Free:\n");
0717     nr = rh_get_stats(info, RHGS_FREE, maxnr, st);
0718     if (nr > maxnr)
0719         nr = maxnr;
0720     for (i = 0; i < nr; i++)
0721         printk(KERN_INFO
0722                "    0x%lx-0x%lx (%u)\n",
0723                st[i].start, st[i].start + st[i].size,
0724                st[i].size);
0725     printk(KERN_INFO "\n");
0726 
0727     printk(KERN_INFO "  Taken:\n");
0728     nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st);
0729     if (nr > maxnr)
0730         nr = maxnr;
0731     for (i = 0; i < nr; i++)
0732         printk(KERN_INFO
0733                "    0x%lx-0x%lx (%u) %s\n",
0734                st[i].start, st[i].start + st[i].size,
0735                st[i].size, st[i].owner != NULL ? st[i].owner : "");
0736     printk(KERN_INFO "\n");
0737 }
0738 EXPORT_SYMBOL_GPL(rh_dump);
0739 
0740 void rh_dump_blk(rh_info_t * info, rh_block_t * blk)
0741 {
0742     printk(KERN_INFO
0743            "blk @0x%p: 0x%lx-0x%lx (%u)\n",
0744            blk, blk->start, blk->start + blk->size, blk->size);
0745 }
0746 EXPORT_SYMBOL_GPL(rh_dump_blk);
0747