0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #include <linux/gfp.h>
0027 #include <linux/slab.h>
0028
0029 #include "qxl_drv.h"
0030 #include "qxl_object.h"
0031
0032 static int
0033 qxl_allocate_chunk(struct qxl_device *qdev,
0034 struct qxl_release *release,
0035 struct qxl_drm_image *image,
0036 unsigned int chunk_size)
0037 {
0038 struct qxl_drm_chunk *chunk;
0039 int ret;
0040
0041 chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL);
0042 if (!chunk)
0043 return -ENOMEM;
0044
0045 ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo);
0046 if (ret) {
0047 kfree(chunk);
0048 return ret;
0049 }
0050
0051 list_add_tail(&chunk->head, &image->chunk_list);
0052 return 0;
0053 }
0054
0055 int
0056 qxl_image_alloc_objects(struct qxl_device *qdev,
0057 struct qxl_release *release,
0058 struct qxl_drm_image **image_ptr,
0059 int height, int stride)
0060 {
0061 struct qxl_drm_image *image;
0062 int ret;
0063
0064 image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL);
0065 if (!image)
0066 return -ENOMEM;
0067
0068 INIT_LIST_HEAD(&image->chunk_list);
0069
0070 ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo);
0071 if (ret) {
0072 kfree(image);
0073 return ret;
0074 }
0075
0076 ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height);
0077 if (ret) {
0078 qxl_bo_unref(&image->bo);
0079 kfree(image);
0080 return ret;
0081 }
0082 *image_ptr = image;
0083 return 0;
0084 }
0085
0086 void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage)
0087 {
0088 struct qxl_drm_chunk *chunk, *tmp;
0089
0090 list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) {
0091 qxl_bo_unref(&chunk->bo);
0092 kfree(chunk);
0093 }
0094
0095 qxl_bo_unref(&dimage->bo);
0096 kfree(dimage);
0097 }
0098
0099 static int
0100 qxl_image_init_helper(struct qxl_device *qdev,
0101 struct qxl_release *release,
0102 struct qxl_drm_image *dimage,
0103 const uint8_t *data,
0104 int width, int height,
0105 int depth, unsigned int hash,
0106 int stride)
0107 {
0108 struct qxl_drm_chunk *drv_chunk;
0109 struct qxl_image *image;
0110 struct qxl_data_chunk *chunk;
0111 int i;
0112 int chunk_stride;
0113 int linesize = width * depth / 8;
0114 struct qxl_bo *chunk_bo, *image_bo;
0115 void *ptr;
0116
0117
0118
0119
0120 drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head);
0121
0122 chunk_bo = drv_chunk->bo;
0123 chunk_stride = stride;
0124
0125
0126
0127 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
0128 chunk = ptr;
0129 chunk->data_size = height * chunk_stride;
0130 chunk->prev_chunk = 0;
0131 chunk->next_chunk = 0;
0132 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
0133
0134 {
0135 void *k_data, *i_data;
0136 int remain;
0137 int page;
0138 int size;
0139
0140 if (stride == linesize && chunk_stride == stride) {
0141 remain = linesize * height;
0142 page = 0;
0143 i_data = (void *)data;
0144
0145 while (remain > 0) {
0146 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT);
0147
0148 if (page == 0) {
0149 chunk = ptr;
0150 k_data = chunk->data;
0151 size = PAGE_SIZE - offsetof(struct qxl_data_chunk, data);
0152 } else {
0153 k_data = ptr;
0154 size = PAGE_SIZE;
0155 }
0156 size = min(size, remain);
0157
0158 memcpy(k_data, i_data, size);
0159
0160 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
0161 i_data += size;
0162 remain -= size;
0163 page++;
0164 }
0165 } else {
0166 unsigned int page_base, page_offset, out_offset;
0167
0168 for (i = 0 ; i < height ; ++i) {
0169 i_data = (void *)data + i * stride;
0170 remain = linesize;
0171 out_offset = offsetof(struct qxl_data_chunk, data) + i * chunk_stride;
0172
0173 while (remain > 0) {
0174 page_base = out_offset & PAGE_MASK;
0175 page_offset = offset_in_page(out_offset);
0176 size = min((int)(PAGE_SIZE - page_offset), remain);
0177
0178 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
0179 k_data = ptr + page_offset;
0180 memcpy(k_data, i_data, size);
0181 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
0182 remain -= size;
0183 i_data += size;
0184 out_offset += size;
0185 }
0186 }
0187 }
0188 }
0189 qxl_bo_vunmap_locked(chunk_bo);
0190
0191 image_bo = dimage->bo;
0192 ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
0193 image = ptr;
0194
0195 image->descriptor.id = 0;
0196 image->descriptor.type = SPICE_IMAGE_TYPE_BITMAP;
0197
0198 image->descriptor.flags = 0;
0199 image->descriptor.width = width;
0200 image->descriptor.height = height;
0201
0202 switch (depth) {
0203 case 1:
0204
0205 image->u.bitmap.format = SPICE_BITMAP_FMT_1BIT_BE;
0206 break;
0207 case 24:
0208 image->u.bitmap.format = SPICE_BITMAP_FMT_24BIT;
0209 break;
0210 case 32:
0211 image->u.bitmap.format = SPICE_BITMAP_FMT_32BIT;
0212 break;
0213 default:
0214 DRM_ERROR("unsupported image bit depth\n");
0215 qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
0216 return -EINVAL;
0217 }
0218 image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
0219 image->u.bitmap.x = width;
0220 image->u.bitmap.y = height;
0221 image->u.bitmap.stride = chunk_stride;
0222 image->u.bitmap.palette = 0;
0223 image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
0224
0225 qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
0226
0227 return 0;
0228 }
0229
0230 int qxl_image_init(struct qxl_device *qdev,
0231 struct qxl_release *release,
0232 struct qxl_drm_image *dimage,
0233 const uint8_t *data,
0234 int x, int y, int width, int height,
0235 int depth, int stride)
0236 {
0237 data += y * stride + x * (depth / 8);
0238 return qxl_image_init_helper(qdev, release, dimage, data,
0239 width, height, depth, 0, stride);
0240 }