0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 #ifndef __ASM_GNTTAB_H__
0038 #define __ASM_GNTTAB_H__
0039
0040 #include <asm/page.h>
0041
0042 #include <xen/interface/xen.h>
0043 #include <xen/interface/grant_table.h>
0044
0045 #include <asm/xen/hypervisor.h>
0046
0047 #include <xen/features.h>
0048 #include <xen/page.h>
0049 #include <linux/mm_types.h>
0050 #include <linux/page-flags.h>
0051 #include <linux/kernel.h>
0052
0053
0054
0055
0056
0057 #define INVALID_GRANT_REF ((grant_ref_t)-1)
0058 #define INVALID_GRANT_HANDLE ((grant_handle_t)-1)
0059
0060
0061 #define NR_GRANT_FRAMES 4
0062
0063 struct gnttab_free_callback {
0064 struct gnttab_free_callback *next;
0065 void (*fn)(void *);
0066 void *arg;
0067 u16 count;
0068 };
0069
0070 struct gntab_unmap_queue_data;
0071
0072 typedef void (*gnttab_unmap_refs_done)(int result, struct gntab_unmap_queue_data *data);
0073
0074 struct gntab_unmap_queue_data
0075 {
0076 struct delayed_work gnttab_work;
0077 void *data;
0078 gnttab_unmap_refs_done done;
0079 struct gnttab_unmap_grant_ref *unmap_ops;
0080 struct gnttab_unmap_grant_ref *kunmap_ops;
0081 struct page **pages;
0082 unsigned int count;
0083 unsigned int age;
0084 };
0085
0086 int gnttab_init(void);
0087 int gnttab_suspend(void);
0088 int gnttab_resume(void);
0089
0090 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
0091 int readonly);
0092
0093
0094
0095
0096
0097
0098 int gnttab_end_foreign_access_ref(grant_ref_t ref);
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115 void gnttab_end_foreign_access(grant_ref_t ref, struct page *page);
0116
0117
0118
0119
0120
0121
0122
0123 int gnttab_try_end_foreign_access(grant_ref_t ref);
0124
0125
0126
0127
0128 int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
0129
0130 int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first);
0131
0132 void gnttab_free_grant_reference(grant_ref_t ref);
0133
0134 void gnttab_free_grant_references(grant_ref_t head);
0135
0136 void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count);
0137
0138 int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
0139
0140 int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
0141
0142 void gnttab_release_grant_reference(grant_ref_t *private_head,
0143 grant_ref_t release);
0144
0145 void gnttab_request_free_callback(struct gnttab_free_callback *callback,
0146 void (*fn)(void *), void *arg, u16 count);
0147 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
0148
0149 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
0150 unsigned long frame, int readonly);
0151
0152
0153 static inline void gnttab_page_grant_foreign_access_ref_one(
0154 grant_ref_t ref, domid_t domid,
0155 struct page *page, int readonly)
0156 {
0157 gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page),
0158 readonly);
0159 }
0160
0161 static inline void
0162 gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
0163 uint32_t flags, grant_ref_t ref, domid_t domid)
0164 {
0165 if (flags & GNTMAP_contains_pte)
0166 map->host_addr = addr;
0167 else if (xen_feature(XENFEAT_auto_translated_physmap))
0168 map->host_addr = __pa(addr);
0169 else
0170 map->host_addr = addr;
0171
0172 map->flags = flags;
0173 map->ref = ref;
0174 map->dom = domid;
0175 map->status = 1;
0176 }
0177
0178 static inline void
0179 gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
0180 uint32_t flags, grant_handle_t handle)
0181 {
0182 if (flags & GNTMAP_contains_pte)
0183 unmap->host_addr = addr;
0184 else if (xen_feature(XENFEAT_auto_translated_physmap))
0185 unmap->host_addr = __pa(addr);
0186 else
0187 unmap->host_addr = addr;
0188
0189 unmap->handle = handle;
0190 unmap->dev_bus_addr = 0;
0191 }
0192
0193 int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
0194 int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
0195 unsigned long max_nr_gframes,
0196 void **__shared);
0197 int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
0198 unsigned long max_nr_gframes,
0199 grant_status_t **__shared);
0200 void arch_gnttab_unmap(void *shared, unsigned long nr_gframes);
0201
0202 struct grant_frames {
0203 xen_pfn_t *pfn;
0204 unsigned int count;
0205 void *vaddr;
0206 };
0207 extern struct grant_frames xen_auto_xlat_grant_frames;
0208 unsigned int gnttab_max_grant_frames(void);
0209 int gnttab_setup_auto_xlat_frames(phys_addr_t addr);
0210 void gnttab_free_auto_xlat_frames(void);
0211
0212 #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
0213
0214 int gnttab_alloc_pages(int nr_pages, struct page **pages);
0215 void gnttab_free_pages(int nr_pages, struct page **pages);
0216
0217 struct gnttab_page_cache {
0218 spinlock_t lock;
0219 #ifdef CONFIG_XEN_UNPOPULATED_ALLOC
0220 struct page *pages;
0221 #else
0222 struct list_head pages;
0223 #endif
0224 unsigned int num_pages;
0225 };
0226
0227 void gnttab_page_cache_init(struct gnttab_page_cache *cache);
0228 int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page);
0229 void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
0230 unsigned int num);
0231 void gnttab_page_cache_shrink(struct gnttab_page_cache *cache,
0232 unsigned int num);
0233
0234 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
0235 struct gnttab_dma_alloc_args {
0236
0237 struct device *dev;
0238
0239 bool coherent;
0240
0241 int nr_pages;
0242 struct page **pages;
0243 xen_pfn_t *frames;
0244 void *vaddr;
0245 dma_addr_t dev_bus_addr;
0246 };
0247
0248 int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args);
0249 int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args);
0250 #endif
0251
0252 int gnttab_pages_set_private(int nr_pages, struct page **pages);
0253 void gnttab_pages_clear_private(int nr_pages, struct page **pages);
0254
0255 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
0256 struct gnttab_map_grant_ref *kmap_ops,
0257 struct page **pages, unsigned int count);
0258 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
0259 struct gnttab_unmap_grant_ref *kunmap_ops,
0260 struct page **pages, unsigned int count);
0261 void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
0262 int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item);
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
0275 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
0276
0277
0278 struct xen_page_foreign {
0279 domid_t domid;
0280 grant_ref_t gref;
0281 };
0282
0283 static inline struct xen_page_foreign *xen_page_foreign(struct page *page)
0284 {
0285 if (!PageForeign(page))
0286 return NULL;
0287 #if BITS_PER_LONG < 64
0288 return (struct xen_page_foreign *)page->private;
0289 #else
0290 BUILD_BUG_ON(sizeof(struct xen_page_foreign) > BITS_PER_LONG);
0291 return (struct xen_page_foreign *)&page->private;
0292 #endif
0293 }
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303 typedef void (*xen_grant_fn_t)(unsigned long gfn, unsigned int offset,
0304 unsigned int len, void *data);
0305
0306 void gnttab_foreach_grant_in_range(struct page *page,
0307 unsigned int offset,
0308 unsigned int len,
0309 xen_grant_fn_t fn,
0310 void *data);
0311
0312
0313 static inline void gnttab_for_one_grant(struct page *page, unsigned int offset,
0314 unsigned len, xen_grant_fn_t fn,
0315 void *data)
0316 {
0317
0318 len = min_t(unsigned int, XEN_PAGE_SIZE - (offset & ~XEN_PAGE_MASK),
0319 len);
0320
0321 gnttab_foreach_grant_in_range(page, offset, len, fn, data);
0322 }
0323
0324
0325 void gnttab_foreach_grant(struct page **pages,
0326 unsigned int nr_grefs,
0327 xen_grant_fn_t fn,
0328 void *data);
0329
0330
0331
0332
0333
0334
0335 static inline unsigned int gnttab_count_grant(unsigned int start,
0336 unsigned int len)
0337 {
0338 return XEN_PFN_UP(xen_offset_in_page(start) + len);
0339 }
0340
0341 #endif