Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0
0002  *
0003  * page_pool.h
0004  *  Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
0005  *  Copyright (C) 2016 Red Hat, Inc.
0006  */
0007 
0008 /**
0009  * DOC: page_pool allocator
0010  *
0011  * This page_pool allocator is optimized for the XDP mode that
0012  * uses one-frame-per-page, but have fallbacks that act like the
0013  * regular page allocator APIs.
0014  *
0015  * Basic use involve replacing alloc_pages() calls with the
0016  * page_pool_alloc_pages() call.  Drivers should likely use
0017  * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
0018  *
0019  * API keeps track of in-flight pages, in-order to let API user know
0020  * when it is safe to dealloactor page_pool object.  Thus, API users
0021  * must make sure to call page_pool_release_page() when a page is
0022  * "leaving" the page_pool.  Or call page_pool_put_page() where
0023  * appropiate.  For maintaining correct accounting.
0024  *
0025  * API user must only call page_pool_put_page() once on a page, as it
0026  * will either recycle the page, or in case of elevated refcnt, it
0027  * will release the DMA mapping and in-flight state accounting.  We
0028  * hope to lift this requirement in the future.
0029  */
0030 #ifndef _NET_PAGE_POOL_H
0031 #define _NET_PAGE_POOL_H
0032 
0033 #include <linux/mm.h> /* Needed by ptr_ring */
0034 #include <linux/ptr_ring.h>
0035 #include <linux/dma-direction.h>
0036 
0037 #define PP_FLAG_DMA_MAP     BIT(0) /* Should page_pool do the DMA
0038                     * map/unmap
0039                     */
0040 #define PP_FLAG_DMA_SYNC_DEV    BIT(1) /* If set all pages that the driver gets
0041                     * from page_pool will be
0042                     * DMA-synced-for-device according to
0043                     * the length provided by the device
0044                     * driver.
0045                     * Please note DMA-sync-for-CPU is still
0046                     * device driver responsibility
0047                     */
0048 #define PP_FLAG_PAGE_FRAG   BIT(2) /* for page frag feature */
0049 #define PP_FLAG_ALL     (PP_FLAG_DMA_MAP |\
0050                  PP_FLAG_DMA_SYNC_DEV |\
0051                  PP_FLAG_PAGE_FRAG)
0052 
0053 /*
0054  * Fast allocation side cache array/stack
0055  *
0056  * The cache size and refill watermark is related to the network
0057  * use-case.  The NAPI budget is 64 packets.  After a NAPI poll the RX
0058  * ring is usually refilled and the max consumed elements will be 64,
0059  * thus a natural max size of objects needed in the cache.
0060  *
0061  * Keeping room for more objects, is due to XDP_DROP use-case.  As
0062  * XDP_DROP allows the opportunity to recycle objects directly into
0063  * this array, as it shares the same softirq/NAPI protection.  If
0064  * cache is already full (or partly full) then the XDP_DROP recycles
0065  * would have to take a slower code path.
0066  */
0067 #define PP_ALLOC_CACHE_SIZE 128
0068 #define PP_ALLOC_CACHE_REFILL   64
0069 struct pp_alloc_cache {
0070     u32 count;
0071     struct page *cache[PP_ALLOC_CACHE_SIZE];
0072 };
0073 
0074 struct page_pool_params {
0075     unsigned int    flags;
0076     unsigned int    order;
0077     unsigned int    pool_size;
0078     int     nid;  /* Numa node id to allocate from pages from */
0079     struct device   *dev; /* device, for DMA pre-mapping purposes */
0080     enum dma_data_direction dma_dir; /* DMA mapping direction */
0081     unsigned int    max_len; /* max DMA sync memory size */
0082     unsigned int    offset;  /* DMA addr offset */
0083     void (*init_callback)(struct page *page, void *arg);
0084     void *init_arg;
0085 };
0086 
0087 #ifdef CONFIG_PAGE_POOL_STATS
0088 struct page_pool_alloc_stats {
0089     u64 fast; /* fast path allocations */
0090     u64 slow; /* slow-path order 0 allocations */
0091     u64 slow_high_order; /* slow-path high order allocations */
0092     u64 empty; /* failed refills due to empty ptr ring, forcing
0093             * slow path allocation
0094             */
0095     u64 refill; /* allocations via successful refill */
0096     u64 waive;  /* failed refills due to numa zone mismatch */
0097 };
0098 
0099 struct page_pool_recycle_stats {
0100     u64 cached; /* recycling placed page in the cache. */
0101     u64 cache_full; /* cache was full */
0102     u64 ring;   /* recycling placed page back into ptr ring */
0103     u64 ring_full;  /* page was released from page-pool because
0104              * PTR ring was full.
0105              */
0106     u64 released_refcnt; /* page released because of elevated
0107                   * refcnt
0108                   */
0109 };
0110 
0111 /* This struct wraps the above stats structs so users of the
0112  * page_pool_get_stats API can pass a single argument when requesting the
0113  * stats for the page pool.
0114  */
0115 struct page_pool_stats {
0116     struct page_pool_alloc_stats alloc_stats;
0117     struct page_pool_recycle_stats recycle_stats;
0118 };
0119 
0120 int page_pool_ethtool_stats_get_count(void);
0121 u8 *page_pool_ethtool_stats_get_strings(u8 *data);
0122 u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
0123 
0124 /*
0125  * Drivers that wish to harvest page pool stats and report them to users
0126  * (perhaps via ethtool, debugfs, or another mechanism) can allocate a
0127  * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool.
0128  */
0129 bool page_pool_get_stats(struct page_pool *pool,
0130              struct page_pool_stats *stats);
0131 #else
0132 
0133 static inline int page_pool_ethtool_stats_get_count(void)
0134 {
0135     return 0;
0136 }
0137 
0138 static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
0139 {
0140     return data;
0141 }
0142 
0143 static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
0144 {
0145     return data;
0146 }
0147 
0148 #endif
0149 
0150 struct page_pool {
0151     struct page_pool_params p;
0152 
0153     struct delayed_work release_dw;
0154     void (*disconnect)(void *);
0155     unsigned long defer_start;
0156     unsigned long defer_warn;
0157 
0158     u32 pages_state_hold_cnt;
0159     unsigned int frag_offset;
0160     struct page *frag_page;
0161     long frag_users;
0162 
0163 #ifdef CONFIG_PAGE_POOL_STATS
0164     /* these stats are incremented while in softirq context */
0165     struct page_pool_alloc_stats alloc_stats;
0166 #endif
0167     u32 xdp_mem_id;
0168 
0169     /*
0170      * Data structure for allocation side
0171      *
0172      * Drivers allocation side usually already perform some kind
0173      * of resource protection.  Piggyback on this protection, and
0174      * require driver to protect allocation side.
0175      *
0176      * For NIC drivers this means, allocate a page_pool per
0177      * RX-queue. As the RX-queue is already protected by
0178      * Softirq/BH scheduling and napi_schedule. NAPI schedule
0179      * guarantee that a single napi_struct will only be scheduled
0180      * on a single CPU (see napi_schedule).
0181      */
0182     struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
0183 
0184     /* Data structure for storing recycled pages.
0185      *
0186      * Returning/freeing pages is more complicated synchronization
0187      * wise, because free's can happen on remote CPUs, with no
0188      * association with allocation resource.
0189      *
0190      * Use ptr_ring, as it separates consumer and producer
0191      * effeciently, it a way that doesn't bounce cache-lines.
0192      *
0193      * TODO: Implement bulk return pages into this structure.
0194      */
0195     struct ptr_ring ring;
0196 
0197 #ifdef CONFIG_PAGE_POOL_STATS
0198     /* recycle stats are per-cpu to avoid locking */
0199     struct page_pool_recycle_stats __percpu *recycle_stats;
0200 #endif
0201     atomic_t pages_state_release_cnt;
0202 
0203     /* A page_pool is strictly tied to a single RX-queue being
0204      * protected by NAPI, due to above pp_alloc_cache. This
0205      * refcnt serves purpose is to simplify drivers error handling.
0206      */
0207     refcount_t user_cnt;
0208 
0209     u64 destroy_cnt;
0210 };
0211 
0212 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
0213 
0214 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
0215 {
0216     gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
0217 
0218     return page_pool_alloc_pages(pool, gfp);
0219 }
0220 
0221 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
0222                   unsigned int size, gfp_t gfp);
0223 
0224 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
0225                             unsigned int *offset,
0226                             unsigned int size)
0227 {
0228     gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
0229 
0230     return page_pool_alloc_frag(pool, offset, size, gfp);
0231 }
0232 
0233 /* get the stored dma direction. A driver might decide to treat this locally and
0234  * avoid the extra cache line from page_pool to determine the direction
0235  */
0236 static
0237 inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
0238 {
0239     return pool->p.dma_dir;
0240 }
0241 
0242 bool page_pool_return_skb_page(struct page *page);
0243 
0244 struct page_pool *page_pool_create(const struct page_pool_params *params);
0245 
0246 struct xdp_mem_info;
0247 
0248 #ifdef CONFIG_PAGE_POOL
0249 void page_pool_destroy(struct page_pool *pool);
0250 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
0251                struct xdp_mem_info *mem);
0252 void page_pool_release_page(struct page_pool *pool, struct page *page);
0253 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
0254                  int count);
0255 #else
0256 static inline void page_pool_destroy(struct page_pool *pool)
0257 {
0258 }
0259 
0260 static inline void page_pool_use_xdp_mem(struct page_pool *pool,
0261                      void (*disconnect)(void *),
0262                      struct xdp_mem_info *mem)
0263 {
0264 }
0265 static inline void page_pool_release_page(struct page_pool *pool,
0266                       struct page *page)
0267 {
0268 }
0269 
0270 static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
0271                        int count)
0272 {
0273 }
0274 #endif
0275 
0276 void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
0277                   unsigned int dma_sync_size,
0278                   bool allow_direct);
0279 
0280 static inline void page_pool_fragment_page(struct page *page, long nr)
0281 {
0282     atomic_long_set(&page->pp_frag_count, nr);
0283 }
0284 
0285 static inline long page_pool_defrag_page(struct page *page, long nr)
0286 {
0287     long ret;
0288 
0289     /* If nr == pp_frag_count then we have cleared all remaining
0290      * references to the page. No need to actually overwrite it, instead
0291      * we can leave this to be overwritten by the calling function.
0292      *
0293      * The main advantage to doing this is that an atomic_read is
0294      * generally a much cheaper operation than an atomic update,
0295      * especially when dealing with a page that may be partitioned
0296      * into only 2 or 3 pieces.
0297      */
0298     if (atomic_long_read(&page->pp_frag_count) == nr)
0299         return 0;
0300 
0301     ret = atomic_long_sub_return(nr, &page->pp_frag_count);
0302     WARN_ON(ret < 0);
0303     return ret;
0304 }
0305 
0306 static inline bool page_pool_is_last_frag(struct page_pool *pool,
0307                       struct page *page)
0308 {
0309     /* If fragments aren't enabled or count is 0 we were the last user */
0310     return !(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
0311            (page_pool_defrag_page(page, 1) == 0);
0312 }
0313 
0314 static inline void page_pool_put_page(struct page_pool *pool,
0315                       struct page *page,
0316                       unsigned int dma_sync_size,
0317                       bool allow_direct)
0318 {
0319     /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
0320      * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
0321      */
0322 #ifdef CONFIG_PAGE_POOL
0323     if (!page_pool_is_last_frag(pool, page))
0324         return;
0325 
0326     page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
0327 #endif
0328 }
0329 
0330 /* Same as above but will try to sync the entire area pool->max_len */
0331 static inline void page_pool_put_full_page(struct page_pool *pool,
0332                        struct page *page, bool allow_direct)
0333 {
0334     page_pool_put_page(pool, page, -1, allow_direct);
0335 }
0336 
0337 /* Same as above but the caller must guarantee safe context. e.g NAPI */
0338 static inline void page_pool_recycle_direct(struct page_pool *pool,
0339                         struct page *page)
0340 {
0341     page_pool_put_full_page(pool, page, true);
0342 }
0343 
0344 #define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \
0345         (sizeof(dma_addr_t) > sizeof(unsigned long))
0346 
0347 static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
0348 {
0349     dma_addr_t ret = page->dma_addr;
0350 
0351     if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
0352         ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
0353 
0354     return ret;
0355 }
0356 
0357 static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
0358 {
0359     page->dma_addr = addr;
0360     if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
0361         page->dma_addr_upper = upper_32_bits(addr);
0362 }
0363 
0364 static inline bool is_page_pool_compiled_in(void)
0365 {
0366 #ifdef CONFIG_PAGE_POOL
0367     return true;
0368 #else
0369     return false;
0370 #endif
0371 }
0372 
0373 static inline bool page_pool_put(struct page_pool *pool)
0374 {
0375     return refcount_dec_and_test(&pool->user_cnt);
0376 }
0377 
0378 /* Caller must provide appropriate safe context, e.g. NAPI. */
0379 void page_pool_update_nid(struct page_pool *pool, int new_nid);
0380 static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
0381 {
0382     if (unlikely(pool->p.nid != new_nid))
0383         page_pool_update_nid(pool, new_nid);
0384 }
0385 
0386 static inline void page_pool_ring_lock(struct page_pool *pool)
0387     __acquires(&pool->ring.producer_lock)
0388 {
0389     if (in_serving_softirq())
0390         spin_lock(&pool->ring.producer_lock);
0391     else
0392         spin_lock_bh(&pool->ring.producer_lock);
0393 }
0394 
0395 static inline void page_pool_ring_unlock(struct page_pool *pool)
0396     __releases(&pool->ring.producer_lock)
0397 {
0398     if (in_serving_softirq())
0399         spin_unlock(&pool->ring.producer_lock);
0400     else
0401         spin_unlock_bh(&pool->ring.producer_lock);
0402 }
0403 
0404 #endif /* _NET_PAGE_POOL_H */