0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 #ifndef _NET_PAGE_POOL_H
0031 #define _NET_PAGE_POOL_H
0032
0033 #include <linux/mm.h> /* Needed by ptr_ring */
0034 #include <linux/ptr_ring.h>
0035 #include <linux/dma-direction.h>
0036
0037 #define PP_FLAG_DMA_MAP BIT(0)
0038
0039
0040 #define PP_FLAG_DMA_SYNC_DEV BIT(1)
0041
0042
0043
0044
0045
0046
0047
0048 #define PP_FLAG_PAGE_FRAG BIT(2)
0049 #define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\
0050 PP_FLAG_DMA_SYNC_DEV |\
0051 PP_FLAG_PAGE_FRAG)
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067 #define PP_ALLOC_CACHE_SIZE 128
0068 #define PP_ALLOC_CACHE_REFILL 64
0069 struct pp_alloc_cache {
0070 u32 count;
0071 struct page *cache[PP_ALLOC_CACHE_SIZE];
0072 };
0073
0074 struct page_pool_params {
0075 unsigned int flags;
0076 unsigned int order;
0077 unsigned int pool_size;
0078 int nid;
0079 struct device *dev;
0080 enum dma_data_direction dma_dir;
0081 unsigned int max_len;
0082 unsigned int offset;
0083 void (*init_callback)(struct page *page, void *arg);
0084 void *init_arg;
0085 };
0086
0087 #ifdef CONFIG_PAGE_POOL_STATS
0088 struct page_pool_alloc_stats {
0089 u64 fast;
0090 u64 slow;
0091 u64 slow_high_order;
0092 u64 empty;
0093
0094
0095 u64 refill;
0096 u64 waive;
0097 };
0098
0099 struct page_pool_recycle_stats {
0100 u64 cached;
0101 u64 cache_full;
0102 u64 ring;
0103 u64 ring_full;
0104
0105
0106 u64 released_refcnt;
0107
0108
0109 };
0110
0111
0112
0113
0114
0115 struct page_pool_stats {
0116 struct page_pool_alloc_stats alloc_stats;
0117 struct page_pool_recycle_stats recycle_stats;
0118 };
0119
0120 int page_pool_ethtool_stats_get_count(void);
0121 u8 *page_pool_ethtool_stats_get_strings(u8 *data);
0122 u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
0123
0124
0125
0126
0127
0128
0129 bool page_pool_get_stats(struct page_pool *pool,
0130 struct page_pool_stats *stats);
0131 #else
0132
0133 static inline int page_pool_ethtool_stats_get_count(void)
0134 {
0135 return 0;
0136 }
0137
0138 static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
0139 {
0140 return data;
0141 }
0142
0143 static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
0144 {
0145 return data;
0146 }
0147
0148 #endif
0149
0150 struct page_pool {
0151 struct page_pool_params p;
0152
0153 struct delayed_work release_dw;
0154 void (*disconnect)(void *);
0155 unsigned long defer_start;
0156 unsigned long defer_warn;
0157
0158 u32 pages_state_hold_cnt;
0159 unsigned int frag_offset;
0160 struct page *frag_page;
0161 long frag_users;
0162
0163 #ifdef CONFIG_PAGE_POOL_STATS
0164
0165 struct page_pool_alloc_stats alloc_stats;
0166 #endif
0167 u32 xdp_mem_id;
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182 struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195 struct ptr_ring ring;
0196
0197 #ifdef CONFIG_PAGE_POOL_STATS
0198
0199 struct page_pool_recycle_stats __percpu *recycle_stats;
0200 #endif
0201 atomic_t pages_state_release_cnt;
0202
0203
0204
0205
0206
0207 refcount_t user_cnt;
0208
0209 u64 destroy_cnt;
0210 };
0211
0212 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
0213
0214 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
0215 {
0216 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
0217
0218 return page_pool_alloc_pages(pool, gfp);
0219 }
0220
0221 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
0222 unsigned int size, gfp_t gfp);
0223
0224 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
0225 unsigned int *offset,
0226 unsigned int size)
0227 {
0228 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
0229
0230 return page_pool_alloc_frag(pool, offset, size, gfp);
0231 }
0232
0233
0234
0235
0236 static
0237 inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
0238 {
0239 return pool->p.dma_dir;
0240 }
0241
0242 bool page_pool_return_skb_page(struct page *page);
0243
0244 struct page_pool *page_pool_create(const struct page_pool_params *params);
0245
0246 struct xdp_mem_info;
0247
0248 #ifdef CONFIG_PAGE_POOL
0249 void page_pool_destroy(struct page_pool *pool);
0250 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
0251 struct xdp_mem_info *mem);
0252 void page_pool_release_page(struct page_pool *pool, struct page *page);
0253 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
0254 int count);
0255 #else
0256 static inline void page_pool_destroy(struct page_pool *pool)
0257 {
0258 }
0259
0260 static inline void page_pool_use_xdp_mem(struct page_pool *pool,
0261 void (*disconnect)(void *),
0262 struct xdp_mem_info *mem)
0263 {
0264 }
0265 static inline void page_pool_release_page(struct page_pool *pool,
0266 struct page *page)
0267 {
0268 }
0269
0270 static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
0271 int count)
0272 {
0273 }
0274 #endif
0275
0276 void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
0277 unsigned int dma_sync_size,
0278 bool allow_direct);
0279
0280 static inline void page_pool_fragment_page(struct page *page, long nr)
0281 {
0282 atomic_long_set(&page->pp_frag_count, nr);
0283 }
0284
0285 static inline long page_pool_defrag_page(struct page *page, long nr)
0286 {
0287 long ret;
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298 if (atomic_long_read(&page->pp_frag_count) == nr)
0299 return 0;
0300
0301 ret = atomic_long_sub_return(nr, &page->pp_frag_count);
0302 WARN_ON(ret < 0);
0303 return ret;
0304 }
0305
0306 static inline bool page_pool_is_last_frag(struct page_pool *pool,
0307 struct page *page)
0308 {
0309
0310 return !(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
0311 (page_pool_defrag_page(page, 1) == 0);
0312 }
0313
0314 static inline void page_pool_put_page(struct page_pool *pool,
0315 struct page *page,
0316 unsigned int dma_sync_size,
0317 bool allow_direct)
0318 {
0319
0320
0321
0322 #ifdef CONFIG_PAGE_POOL
0323 if (!page_pool_is_last_frag(pool, page))
0324 return;
0325
0326 page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
0327 #endif
0328 }
0329
0330
0331 static inline void page_pool_put_full_page(struct page_pool *pool,
0332 struct page *page, bool allow_direct)
0333 {
0334 page_pool_put_page(pool, page, -1, allow_direct);
0335 }
0336
0337
0338 static inline void page_pool_recycle_direct(struct page_pool *pool,
0339 struct page *page)
0340 {
0341 page_pool_put_full_page(pool, page, true);
0342 }
0343
0344 #define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \
0345 (sizeof(dma_addr_t) > sizeof(unsigned long))
0346
0347 static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
0348 {
0349 dma_addr_t ret = page->dma_addr;
0350
0351 if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
0352 ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
0353
0354 return ret;
0355 }
0356
0357 static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
0358 {
0359 page->dma_addr = addr;
0360 if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
0361 page->dma_addr_upper = upper_32_bits(addr);
0362 }
0363
0364 static inline bool is_page_pool_compiled_in(void)
0365 {
0366 #ifdef CONFIG_PAGE_POOL
0367 return true;
0368 #else
0369 return false;
0370 #endif
0371 }
0372
0373 static inline bool page_pool_put(struct page_pool *pool)
0374 {
0375 return refcount_dec_and_test(&pool->user_cnt);
0376 }
0377
0378
0379 void page_pool_update_nid(struct page_pool *pool, int new_nid);
0380 static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
0381 {
0382 if (unlikely(pool->p.nid != new_nid))
0383 page_pool_update_nid(pool, new_nid);
0384 }
0385
0386 static inline void page_pool_ring_lock(struct page_pool *pool)
0387 __acquires(&pool->ring.producer_lock)
0388 {
0389 if (in_serving_softirq())
0390 spin_lock(&pool->ring.producer_lock);
0391 else
0392 spin_lock_bh(&pool->ring.producer_lock);
0393 }
0394
0395 static inline void page_pool_ring_unlock(struct page_pool *pool)
0396 __releases(&pool->ring.producer_lock)
0397 {
0398 if (in_serving_softirq())
0399 spin_unlock(&pool->ring.producer_lock);
0400 else
0401 spin_unlock_bh(&pool->ring.producer_lock);
0402 }
0403
0404 #endif