0001
0002
0003
0004
0005
0006 #include <linux/bpf.h>
0007 #include <linux/filter.h>
0008 #include <linux/types.h>
0009 #include <linux/mm.h>
0010 #include <linux/netdevice.h>
0011 #include <linux/slab.h>
0012 #include <linux/idr.h>
0013 #include <linux/rhashtable.h>
0014 #include <linux/bug.h>
0015 #include <net/page_pool.h>
0016
0017 #include <net/xdp.h>
0018 #include <net/xdp_priv.h> /* struct xdp_mem_allocator */
0019 #include <trace/events/xdp.h>
0020 #include <net/xdp_sock_drv.h>
0021
0022 #define REG_STATE_NEW 0x0
0023 #define REG_STATE_REGISTERED 0x1
0024 #define REG_STATE_UNREGISTERED 0x2
0025 #define REG_STATE_UNUSED 0x3
0026
0027 static DEFINE_IDA(mem_id_pool);
0028 static DEFINE_MUTEX(mem_id_lock);
0029 #define MEM_ID_MAX 0xFFFE
0030 #define MEM_ID_MIN 1
0031 static int mem_id_next = MEM_ID_MIN;
0032
0033 static bool mem_id_init;
0034 static struct rhashtable *mem_id_ht;
0035
0036 static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
0037 {
0038 const u32 *k = data;
0039 const u32 key = *k;
0040
0041 BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id)
0042 != sizeof(u32));
0043
0044
0045 return key;
0046 }
0047
0048 static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
0049 const void *ptr)
0050 {
0051 const struct xdp_mem_allocator *xa = ptr;
0052 u32 mem_id = *(u32 *)arg->key;
0053
0054 return xa->mem.id != mem_id;
0055 }
0056
0057 static const struct rhashtable_params mem_id_rht_params = {
0058 .nelem_hint = 64,
0059 .head_offset = offsetof(struct xdp_mem_allocator, node),
0060 .key_offset = offsetof(struct xdp_mem_allocator, mem.id),
0061 .key_len = sizeof_field(struct xdp_mem_allocator, mem.id),
0062 .max_size = MEM_ID_MAX,
0063 .min_size = 8,
0064 .automatic_shrinking = true,
0065 .hashfn = xdp_mem_id_hashfn,
0066 .obj_cmpfn = xdp_mem_id_cmp,
0067 };
0068
0069 static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
0070 {
0071 struct xdp_mem_allocator *xa;
0072
0073 xa = container_of(rcu, struct xdp_mem_allocator, rcu);
0074
0075
0076 ida_simple_remove(&mem_id_pool, xa->mem.id);
0077
0078 kfree(xa);
0079 }
0080
0081 static void mem_xa_remove(struct xdp_mem_allocator *xa)
0082 {
0083 trace_mem_disconnect(xa);
0084
0085 if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
0086 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
0087 }
0088
0089 static void mem_allocator_disconnect(void *allocator)
0090 {
0091 struct xdp_mem_allocator *xa;
0092 struct rhashtable_iter iter;
0093
0094 mutex_lock(&mem_id_lock);
0095
0096 rhashtable_walk_enter(mem_id_ht, &iter);
0097 do {
0098 rhashtable_walk_start(&iter);
0099
0100 while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
0101 if (xa->allocator == allocator)
0102 mem_xa_remove(xa);
0103 }
0104
0105 rhashtable_walk_stop(&iter);
0106
0107 } while (xa == ERR_PTR(-EAGAIN));
0108 rhashtable_walk_exit(&iter);
0109
0110 mutex_unlock(&mem_id_lock);
0111 }
0112
0113 void xdp_unreg_mem_model(struct xdp_mem_info *mem)
0114 {
0115 struct xdp_mem_allocator *xa;
0116 int type = mem->type;
0117 int id = mem->id;
0118
0119
0120 mem->id = 0;
0121 mem->type = 0;
0122
0123 if (id == 0)
0124 return;
0125
0126 if (type == MEM_TYPE_PAGE_POOL) {
0127 rcu_read_lock();
0128 xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
0129 page_pool_destroy(xa->page_pool);
0130 rcu_read_unlock();
0131 }
0132 }
0133 EXPORT_SYMBOL_GPL(xdp_unreg_mem_model);
0134
0135 void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
0136 {
0137 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
0138 WARN(1, "Missing register, driver bug");
0139 return;
0140 }
0141
0142 xdp_unreg_mem_model(&xdp_rxq->mem);
0143 }
0144 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
0145
0146 void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
0147 {
0148
0149 if (xdp_rxq->reg_state == REG_STATE_UNUSED)
0150 return;
0151
0152 xdp_rxq_info_unreg_mem_model(xdp_rxq);
0153
0154 xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
0155 xdp_rxq->dev = NULL;
0156 }
0157 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
0158
0159 static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
0160 {
0161 memset(xdp_rxq, 0, sizeof(*xdp_rxq));
0162 }
0163
0164
0165 int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
0166 struct net_device *dev, u32 queue_index,
0167 unsigned int napi_id, u32 frag_size)
0168 {
0169 if (!dev) {
0170 WARN(1, "Missing net_device from driver");
0171 return -ENODEV;
0172 }
0173
0174 if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
0175 WARN(1, "Driver promised not to register this");
0176 return -EINVAL;
0177 }
0178
0179 if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
0180 WARN(1, "Missing unregister, handled but fix driver");
0181 xdp_rxq_info_unreg(xdp_rxq);
0182 }
0183
0184
0185 xdp_rxq_info_init(xdp_rxq);
0186 xdp_rxq->dev = dev;
0187 xdp_rxq->queue_index = queue_index;
0188 xdp_rxq->napi_id = napi_id;
0189 xdp_rxq->frag_size = frag_size;
0190
0191 xdp_rxq->reg_state = REG_STATE_REGISTERED;
0192 return 0;
0193 }
0194 EXPORT_SYMBOL_GPL(__xdp_rxq_info_reg);
0195
0196 void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
0197 {
0198 xdp_rxq->reg_state = REG_STATE_UNUSED;
0199 }
0200 EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
0201
0202 bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
0203 {
0204 return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
0205 }
0206 EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
0207
0208 static int __mem_id_init_hash_table(void)
0209 {
0210 struct rhashtable *rht;
0211 int ret;
0212
0213 if (unlikely(mem_id_init))
0214 return 0;
0215
0216 rht = kzalloc(sizeof(*rht), GFP_KERNEL);
0217 if (!rht)
0218 return -ENOMEM;
0219
0220 ret = rhashtable_init(rht, &mem_id_rht_params);
0221 if (ret < 0) {
0222 kfree(rht);
0223 return ret;
0224 }
0225 mem_id_ht = rht;
0226 smp_mb();
0227 mem_id_init = true;
0228
0229 return 0;
0230 }
0231
0232
0233
0234
0235
0236
0237 static int __mem_id_cyclic_get(gfp_t gfp)
0238 {
0239 int retries = 1;
0240 int id;
0241
0242 again:
0243 id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
0244 if (id < 0) {
0245 if (id == -ENOSPC) {
0246
0247 if (retries--) {
0248 mem_id_next = MEM_ID_MIN;
0249 goto again;
0250 }
0251 }
0252 return id;
0253 }
0254 mem_id_next = id + 1;
0255
0256 return id;
0257 }
0258
0259 static bool __is_supported_mem_type(enum xdp_mem_type type)
0260 {
0261 if (type == MEM_TYPE_PAGE_POOL)
0262 return is_page_pool_compiled_in();
0263
0264 if (type >= MEM_TYPE_MAX)
0265 return false;
0266
0267 return true;
0268 }
0269
0270 static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
0271 enum xdp_mem_type type,
0272 void *allocator)
0273 {
0274 struct xdp_mem_allocator *xdp_alloc;
0275 gfp_t gfp = GFP_KERNEL;
0276 int id, errno, ret;
0277 void *ptr;
0278
0279 if (!__is_supported_mem_type(type))
0280 return ERR_PTR(-EOPNOTSUPP);
0281
0282 mem->type = type;
0283
0284 if (!allocator) {
0285 if (type == MEM_TYPE_PAGE_POOL)
0286 return ERR_PTR(-EINVAL);
0287 return NULL;
0288 }
0289
0290
0291 if (!mem_id_init) {
0292 mutex_lock(&mem_id_lock);
0293 ret = __mem_id_init_hash_table();
0294 mutex_unlock(&mem_id_lock);
0295 if (ret < 0) {
0296 WARN_ON(1);
0297 return ERR_PTR(ret);
0298 }
0299 }
0300
0301 xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
0302 if (!xdp_alloc)
0303 return ERR_PTR(-ENOMEM);
0304
0305 mutex_lock(&mem_id_lock);
0306 id = __mem_id_cyclic_get(gfp);
0307 if (id < 0) {
0308 errno = id;
0309 goto err;
0310 }
0311 mem->id = id;
0312 xdp_alloc->mem = *mem;
0313 xdp_alloc->allocator = allocator;
0314
0315
0316 ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
0317 if (IS_ERR(ptr)) {
0318 ida_simple_remove(&mem_id_pool, mem->id);
0319 mem->id = 0;
0320 errno = PTR_ERR(ptr);
0321 goto err;
0322 }
0323
0324 if (type == MEM_TYPE_PAGE_POOL)
0325 page_pool_use_xdp_mem(allocator, mem_allocator_disconnect, mem);
0326
0327 mutex_unlock(&mem_id_lock);
0328
0329 return xdp_alloc;
0330 err:
0331 mutex_unlock(&mem_id_lock);
0332 kfree(xdp_alloc);
0333 return ERR_PTR(errno);
0334 }
0335
0336 int xdp_reg_mem_model(struct xdp_mem_info *mem,
0337 enum xdp_mem_type type, void *allocator)
0338 {
0339 struct xdp_mem_allocator *xdp_alloc;
0340
0341 xdp_alloc = __xdp_reg_mem_model(mem, type, allocator);
0342 if (IS_ERR(xdp_alloc))
0343 return PTR_ERR(xdp_alloc);
0344 return 0;
0345 }
0346 EXPORT_SYMBOL_GPL(xdp_reg_mem_model);
0347
0348 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
0349 enum xdp_mem_type type, void *allocator)
0350 {
0351 struct xdp_mem_allocator *xdp_alloc;
0352
0353 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
0354 WARN(1, "Missing register, driver bug");
0355 return -EFAULT;
0356 }
0357
0358 xdp_alloc = __xdp_reg_mem_model(&xdp_rxq->mem, type, allocator);
0359 if (IS_ERR(xdp_alloc))
0360 return PTR_ERR(xdp_alloc);
0361
0362 if (trace_mem_connect_enabled() && xdp_alloc)
0363 trace_mem_connect(xdp_alloc, xdp_rxq);
0364 return 0;
0365 }
0366
0367 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
0368
0369
0370
0371
0372
0373
0374
0375 void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
0376 struct xdp_buff *xdp)
0377 {
0378 struct xdp_mem_allocator *xa;
0379 struct page *page;
0380
0381 switch (mem->type) {
0382 case MEM_TYPE_PAGE_POOL:
0383 rcu_read_lock();
0384
0385 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
0386 page = virt_to_head_page(data);
0387 if (napi_direct && xdp_return_frame_no_direct())
0388 napi_direct = false;
0389 page_pool_put_full_page(xa->page_pool, page, napi_direct);
0390 rcu_read_unlock();
0391 break;
0392 case MEM_TYPE_PAGE_SHARED:
0393 page_frag_free(data);
0394 break;
0395 case MEM_TYPE_PAGE_ORDER0:
0396 page = virt_to_page(data);
0397 put_page(page);
0398 break;
0399 case MEM_TYPE_XSK_BUFF_POOL:
0400
0401 xsk_buff_free(xdp);
0402 break;
0403 default:
0404
0405 WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
0406 break;
0407 }
0408 }
0409
0410 void xdp_return_frame(struct xdp_frame *xdpf)
0411 {
0412 struct skb_shared_info *sinfo;
0413 int i;
0414
0415 if (likely(!xdp_frame_has_frags(xdpf)))
0416 goto out;
0417
0418 sinfo = xdp_get_shared_info_from_frame(xdpf);
0419 for (i = 0; i < sinfo->nr_frags; i++) {
0420 struct page *page = skb_frag_page(&sinfo->frags[i]);
0421
0422 __xdp_return(page_address(page), &xdpf->mem, false, NULL);
0423 }
0424 out:
0425 __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
0426 }
0427 EXPORT_SYMBOL_GPL(xdp_return_frame);
0428
0429 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
0430 {
0431 struct skb_shared_info *sinfo;
0432 int i;
0433
0434 if (likely(!xdp_frame_has_frags(xdpf)))
0435 goto out;
0436
0437 sinfo = xdp_get_shared_info_from_frame(xdpf);
0438 for (i = 0; i < sinfo->nr_frags; i++) {
0439 struct page *page = skb_frag_page(&sinfo->frags[i]);
0440
0441 __xdp_return(page_address(page), &xdpf->mem, true, NULL);
0442 }
0443 out:
0444 __xdp_return(xdpf->data, &xdpf->mem, true, NULL);
0445 }
0446 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458 void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
0459 {
0460 struct xdp_mem_allocator *xa = bq->xa;
0461
0462 if (unlikely(!xa || !bq->count))
0463 return;
0464
0465 page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count);
0466
0467 bq->count = 0;
0468 }
0469 EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
0470
0471
0472 void xdp_return_frame_bulk(struct xdp_frame *xdpf,
0473 struct xdp_frame_bulk *bq)
0474 {
0475 struct xdp_mem_info *mem = &xdpf->mem;
0476 struct xdp_mem_allocator *xa;
0477
0478 if (mem->type != MEM_TYPE_PAGE_POOL) {
0479 xdp_return_frame(xdpf);
0480 return;
0481 }
0482
0483 xa = bq->xa;
0484 if (unlikely(!xa)) {
0485 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
0486 bq->count = 0;
0487 bq->xa = xa;
0488 }
0489
0490 if (bq->count == XDP_BULK_QUEUE_SIZE)
0491 xdp_flush_frame_bulk(bq);
0492
0493 if (unlikely(mem->id != xa->mem.id)) {
0494 xdp_flush_frame_bulk(bq);
0495 bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
0496 }
0497
0498 if (unlikely(xdp_frame_has_frags(xdpf))) {
0499 struct skb_shared_info *sinfo;
0500 int i;
0501
0502 sinfo = xdp_get_shared_info_from_frame(xdpf);
0503 for (i = 0; i < sinfo->nr_frags; i++) {
0504 skb_frag_t *frag = &sinfo->frags[i];
0505
0506 bq->q[bq->count++] = skb_frag_address(frag);
0507 if (bq->count == XDP_BULK_QUEUE_SIZE)
0508 xdp_flush_frame_bulk(bq);
0509 }
0510 }
0511 bq->q[bq->count++] = xdpf->data;
0512 }
0513 EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
0514
0515 void xdp_return_buff(struct xdp_buff *xdp)
0516 {
0517 struct skb_shared_info *sinfo;
0518 int i;
0519
0520 if (likely(!xdp_buff_has_frags(xdp)))
0521 goto out;
0522
0523 sinfo = xdp_get_shared_info_from_buff(xdp);
0524 for (i = 0; i < sinfo->nr_frags; i++) {
0525 struct page *page = skb_frag_page(&sinfo->frags[i]);
0526
0527 __xdp_return(page_address(page), &xdp->rxq->mem, true, xdp);
0528 }
0529 out:
0530 __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
0531 }
0532 EXPORT_SYMBOL_GPL(xdp_return_buff);
0533
0534
0535 void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
0536 {
0537 struct xdp_mem_allocator *xa;
0538 struct page *page;
0539
0540 rcu_read_lock();
0541 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
0542 page = virt_to_head_page(data);
0543 if (xa)
0544 page_pool_release_page(xa->page_pool, page);
0545 rcu_read_unlock();
0546 }
0547 EXPORT_SYMBOL_GPL(__xdp_release_frame);
0548
0549 void xdp_attachment_setup(struct xdp_attachment_info *info,
0550 struct netdev_bpf *bpf)
0551 {
0552 if (info->prog)
0553 bpf_prog_put(info->prog);
0554 info->prog = bpf->prog;
0555 info->flags = bpf->flags;
0556 }
0557 EXPORT_SYMBOL_GPL(xdp_attachment_setup);
0558
0559 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
0560 {
0561 unsigned int metasize, totsize;
0562 void *addr, *data_to_copy;
0563 struct xdp_frame *xdpf;
0564 struct page *page;
0565
0566
0567 metasize = xdp_data_meta_unsupported(xdp) ? 0 :
0568 xdp->data - xdp->data_meta;
0569 totsize = xdp->data_end - xdp->data + metasize;
0570
0571 if (sizeof(*xdpf) + totsize > PAGE_SIZE)
0572 return NULL;
0573
0574 page = dev_alloc_page();
0575 if (!page)
0576 return NULL;
0577
0578 addr = page_to_virt(page);
0579 xdpf = addr;
0580 memset(xdpf, 0, sizeof(*xdpf));
0581
0582 addr += sizeof(*xdpf);
0583 data_to_copy = metasize ? xdp->data_meta : xdp->data;
0584 memcpy(addr, data_to_copy, totsize);
0585
0586 xdpf->data = addr + metasize;
0587 xdpf->len = totsize - metasize;
0588 xdpf->headroom = 0;
0589 xdpf->metasize = metasize;
0590 xdpf->frame_sz = PAGE_SIZE;
0591 xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
0592
0593 xsk_buff_free(xdp);
0594 return xdpf;
0595 }
0596 EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
0597
0598
0599 void xdp_warn(const char *msg, const char *func, const int line)
0600 {
0601 WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg);
0602 };
0603 EXPORT_SYMBOL_GPL(xdp_warn);
0604
0605 int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
0606 {
0607 n_skb = kmem_cache_alloc_bulk(skbuff_head_cache, gfp,
0608 n_skb, skbs);
0609 if (unlikely(!n_skb))
0610 return -ENOMEM;
0611
0612 return 0;
0613 }
0614 EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk);
0615
0616 struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
0617 struct sk_buff *skb,
0618 struct net_device *dev)
0619 {
0620 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
0621 unsigned int headroom, frame_size;
0622 void *hard_start;
0623 u8 nr_frags;
0624
0625
0626 if (unlikely(xdp_frame_has_frags(xdpf)))
0627 nr_frags = sinfo->nr_frags;
0628
0629
0630 headroom = sizeof(*xdpf) + xdpf->headroom;
0631
0632
0633
0634
0635 frame_size = xdpf->frame_sz;
0636
0637 hard_start = xdpf->data - headroom;
0638 skb = build_skb_around(skb, hard_start, frame_size);
0639 if (unlikely(!skb))
0640 return NULL;
0641
0642 skb_reserve(skb, headroom);
0643 __skb_put(skb, xdpf->len);
0644 if (xdpf->metasize)
0645 skb_metadata_set(skb, xdpf->metasize);
0646
0647 if (unlikely(xdp_frame_has_frags(xdpf)))
0648 xdp_update_skb_shared_info(skb, nr_frags,
0649 sinfo->xdp_frags_size,
0650 nr_frags * xdpf->frame_sz,
0651 xdp_frame_is_frag_pfmemalloc(xdpf));
0652
0653
0654 skb->protocol = eth_type_trans(skb, dev);
0655
0656
0657
0658
0659
0660
0661
0662
0663 xdp_release_frame(xdpf);
0664
0665
0666 xdp_scrub_frame(xdpf);
0667
0668 return skb;
0669 }
0670 EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame);
0671
0672 struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
0673 struct net_device *dev)
0674 {
0675 struct sk_buff *skb;
0676
0677 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
0678 if (unlikely(!skb))
0679 return NULL;
0680
0681 memset(skb, 0, offsetof(struct sk_buff, tail));
0682
0683 return __xdp_build_skb_from_frame(xdpf, skb, dev);
0684 }
0685 EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame);
0686
0687 struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf)
0688 {
0689 unsigned int headroom, totalsize;
0690 struct xdp_frame *nxdpf;
0691 struct page *page;
0692 void *addr;
0693
0694 headroom = xdpf->headroom + sizeof(*xdpf);
0695 totalsize = headroom + xdpf->len;
0696
0697 if (unlikely(totalsize > PAGE_SIZE))
0698 return NULL;
0699 page = dev_alloc_page();
0700 if (!page)
0701 return NULL;
0702 addr = page_to_virt(page);
0703
0704 memcpy(addr, xdpf, totalsize);
0705
0706 nxdpf = addr;
0707 nxdpf->data = addr + headroom;
0708 nxdpf->frame_sz = PAGE_SIZE;
0709 nxdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
0710 nxdpf->mem.id = 0;
0711
0712 return nxdpf;
0713 }