0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/kernel.h>
0009 #include <linux/printk.h>
0010 #include <linux/skbuff.h>
0011 #include <net/hwbm.h>
0012
0013 void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf)
0014 {
0015 if (likely(bm_pool->frag_size <= PAGE_SIZE))
0016 skb_free_frag(buf);
0017 else
0018 kfree(buf);
0019 }
0020 EXPORT_SYMBOL_GPL(hwbm_buf_free);
0021
0022
0023 int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp)
0024 {
0025 int frag_size = bm_pool->frag_size;
0026 void *buf;
0027
0028 if (likely(frag_size <= PAGE_SIZE))
0029 buf = netdev_alloc_frag(frag_size);
0030 else
0031 buf = kmalloc(frag_size, gfp);
0032
0033 if (!buf)
0034 return -ENOMEM;
0035
0036 if (bm_pool->construct)
0037 if (bm_pool->construct(bm_pool, buf)) {
0038 hwbm_buf_free(bm_pool, buf);
0039 return -ENOMEM;
0040 }
0041
0042 return 0;
0043 }
0044 EXPORT_SYMBOL_GPL(hwbm_pool_refill);
0045
0046 int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num)
0047 {
0048 int err, i;
0049
0050 mutex_lock(&bm_pool->buf_lock);
0051 if (bm_pool->buf_num == bm_pool->size) {
0052 pr_warn("pool already filled\n");
0053 mutex_unlock(&bm_pool->buf_lock);
0054 return bm_pool->buf_num;
0055 }
0056
0057 if (buf_num + bm_pool->buf_num > bm_pool->size) {
0058 pr_warn("cannot allocate %d buffers for pool\n",
0059 buf_num);
0060 mutex_unlock(&bm_pool->buf_lock);
0061 return 0;
0062 }
0063
0064 if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) {
0065 pr_warn("Adding %d buffers to the %d current buffers will overflow\n",
0066 buf_num, bm_pool->buf_num);
0067 mutex_unlock(&bm_pool->buf_lock);
0068 return 0;
0069 }
0070
0071 for (i = 0; i < buf_num; i++) {
0072 err = hwbm_pool_refill(bm_pool, GFP_KERNEL);
0073 if (err < 0)
0074 break;
0075 }
0076
0077
0078 bm_pool->buf_num += i;
0079
0080 pr_debug("hwpm pool: %d of %d buffers added\n", i, buf_num);
0081 mutex_unlock(&bm_pool->buf_lock);
0082
0083 return i;
0084 }
0085 EXPORT_SYMBOL_GPL(hwbm_pool_add);