0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/dma-buf.h>
0012 #include <linux/scatterlist.h>
0013 #include <linux/shmem_fs.h>
0014
0015 #include <drm/drm_fb_helper.h>
0016 #include <drm/drm_gem.h>
0017 #include <drm/drm_prime.h>
0018 #include <drm/drm_probe_helper.h>
0019
0020 #include <xen/balloon.h>
0021 #include <xen/xen.h>
0022
0023 #include "xen_drm_front.h"
0024 #include "xen_drm_front_gem.h"
0025
0026 struct xen_gem_object {
0027 struct drm_gem_object base;
0028
0029 size_t num_pages;
0030 struct page **pages;
0031
0032
0033 bool be_alloc;
0034
0035
0036 struct sg_table *sgt_imported;
0037 };
0038
0039 static inline struct xen_gem_object *
0040 to_xen_gem_obj(struct drm_gem_object *gem_obj)
0041 {
0042 return container_of(gem_obj, struct xen_gem_object, base);
0043 }
0044
0045 static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
0046 size_t buf_size)
0047 {
0048 xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
0049 xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
0050 sizeof(struct page *), GFP_KERNEL);
0051 return !xen_obj->pages ? -ENOMEM : 0;
0052 }
0053
0054 static void gem_free_pages_array(struct xen_gem_object *xen_obj)
0055 {
0056 kvfree(xen_obj->pages);
0057 xen_obj->pages = NULL;
0058 }
0059
0060 static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj,
0061 struct vm_area_struct *vma)
0062 {
0063 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
0064 int ret;
0065
0066 vma->vm_ops = gem_obj->funcs->vm_ops;
0067
0068
0069
0070
0071
0072
0073 vma->vm_flags &= ~VM_PFNMAP;
0074 vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
0075 vma->vm_pgoff = 0;
0076
0077
0078
0079
0080
0081
0082
0083
0084 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094 ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
0095 if (ret < 0)
0096 DRM_ERROR("Failed to map pages into vma: %d\n", ret);
0097
0098 return ret;
0099 }
0100
0101 static const struct vm_operations_struct xen_drm_drv_vm_ops = {
0102 .open = drm_gem_vm_open,
0103 .close = drm_gem_vm_close,
0104 };
0105
0106 static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = {
0107 .free = xen_drm_front_gem_object_free,
0108 .get_sg_table = xen_drm_front_gem_get_sg_table,
0109 .vmap = xen_drm_front_gem_prime_vmap,
0110 .vunmap = xen_drm_front_gem_prime_vunmap,
0111 .mmap = xen_drm_front_gem_object_mmap,
0112 .vm_ops = &xen_drm_drv_vm_ops,
0113 };
0114
0115 static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
0116 size_t size)
0117 {
0118 struct xen_gem_object *xen_obj;
0119 int ret;
0120
0121 xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
0122 if (!xen_obj)
0123 return ERR_PTR(-ENOMEM);
0124
0125 xen_obj->base.funcs = &xen_drm_front_gem_object_funcs;
0126
0127 ret = drm_gem_object_init(dev, &xen_obj->base, size);
0128 if (ret < 0) {
0129 kfree(xen_obj);
0130 return ERR_PTR(ret);
0131 }
0132
0133 return xen_obj;
0134 }
0135
0136 static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
0137 {
0138 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
0139 struct xen_gem_object *xen_obj;
0140 int ret;
0141
0142 size = round_up(size, PAGE_SIZE);
0143 xen_obj = gem_create_obj(dev, size);
0144 if (IS_ERR(xen_obj))
0145 return xen_obj;
0146
0147 if (drm_info->front_info->cfg.be_alloc) {
0148
0149
0150
0151
0152 ret = gem_alloc_pages_array(xen_obj, size);
0153 if (ret < 0)
0154 goto fail;
0155
0156
0157
0158
0159
0160 ret = xen_alloc_unpopulated_pages(xen_obj->num_pages,
0161 xen_obj->pages);
0162 if (ret < 0) {
0163 DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
0164 xen_obj->num_pages, ret);
0165 gem_free_pages_array(xen_obj);
0166 goto fail;
0167 }
0168
0169 xen_obj->be_alloc = true;
0170 return xen_obj;
0171 }
0172
0173
0174
0175
0176 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
0177 xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
0178 if (IS_ERR(xen_obj->pages)) {
0179 ret = PTR_ERR(xen_obj->pages);
0180 xen_obj->pages = NULL;
0181 goto fail;
0182 }
0183
0184 return xen_obj;
0185
0186 fail:
0187 DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
0188 return ERR_PTR(ret);
0189 }
0190
0191 struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
0192 size_t size)
0193 {
0194 struct xen_gem_object *xen_obj;
0195
0196 xen_obj = gem_create(dev, size);
0197 if (IS_ERR(xen_obj))
0198 return ERR_CAST(xen_obj);
0199
0200 return &xen_obj->base;
0201 }
0202
0203 void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
0204 {
0205 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
0206
0207 if (xen_obj->base.import_attach) {
0208 drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
0209 gem_free_pages_array(xen_obj);
0210 } else {
0211 if (xen_obj->pages) {
0212 if (xen_obj->be_alloc) {
0213 xen_free_unpopulated_pages(xen_obj->num_pages,
0214 xen_obj->pages);
0215 gem_free_pages_array(xen_obj);
0216 } else {
0217 drm_gem_put_pages(&xen_obj->base,
0218 xen_obj->pages, true, false);
0219 }
0220 }
0221 }
0222 drm_gem_object_release(gem_obj);
0223 kfree(xen_obj);
0224 }
0225
0226 struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
0227 {
0228 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
0229
0230 return xen_obj->pages;
0231 }
0232
0233 struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
0234 {
0235 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
0236
0237 if (!xen_obj->pages)
0238 return ERR_PTR(-ENOMEM);
0239
0240 return drm_prime_pages_to_sg(gem_obj->dev,
0241 xen_obj->pages, xen_obj->num_pages);
0242 }
0243
0244 struct drm_gem_object *
0245 xen_drm_front_gem_import_sg_table(struct drm_device *dev,
0246 struct dma_buf_attachment *attach,
0247 struct sg_table *sgt)
0248 {
0249 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
0250 struct xen_gem_object *xen_obj;
0251 size_t size;
0252 int ret;
0253
0254 size = attach->dmabuf->size;
0255 xen_obj = gem_create_obj(dev, size);
0256 if (IS_ERR(xen_obj))
0257 return ERR_CAST(xen_obj);
0258
0259 ret = gem_alloc_pages_array(xen_obj, size);
0260 if (ret < 0)
0261 return ERR_PTR(ret);
0262
0263 xen_obj->sgt_imported = sgt;
0264
0265 ret = drm_prime_sg_to_page_array(sgt, xen_obj->pages,
0266 xen_obj->num_pages);
0267 if (ret < 0)
0268 return ERR_PTR(ret);
0269
0270 ret = xen_drm_front_dbuf_create(drm_info->front_info,
0271 xen_drm_front_dbuf_to_cookie(&xen_obj->base),
0272 0, 0, 0, size, sgt->sgl->offset,
0273 xen_obj->pages);
0274 if (ret < 0)
0275 return ERR_PTR(ret);
0276
0277 DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
0278 size, sgt->orig_nents);
0279
0280 return &xen_obj->base;
0281 }
0282
0283 int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj,
0284 struct iosys_map *map)
0285 {
0286 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
0287 void *vaddr;
0288
0289 if (!xen_obj->pages)
0290 return -ENOMEM;
0291
0292
0293 vaddr = vmap(xen_obj->pages, xen_obj->num_pages,
0294 VM_MAP, PAGE_KERNEL);
0295 if (!vaddr)
0296 return -ENOMEM;
0297 iosys_map_set_vaddr(map, vaddr);
0298
0299 return 0;
0300 }
0301
0302 void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
0303 struct iosys_map *map)
0304 {
0305 vunmap(map->vaddr);
0306 }