0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #include <linux/uaccess.h>
0020
0021 #include <drm/drm_blend.h>
0022 #include <drm/drm_file.h>
0023 #include <drm/drm_fourcc.h>
0024 #include <drm/drm_mode.h>
0025 #include <drm/exynos_drm.h>
0026
0027 #include "exynos_drm_drv.h"
0028 #include "exynos_drm_gem.h"
0029 #include "exynos_drm_ipp.h"
0030
0031 static int num_ipp;
0032 static LIST_HEAD(ipp_list);
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049 int exynos_drm_ipp_register(struct device *dev, struct exynos_drm_ipp *ipp,
0050 const struct exynos_drm_ipp_funcs *funcs, unsigned int caps,
0051 const struct exynos_drm_ipp_formats *formats,
0052 unsigned int num_formats, const char *name)
0053 {
0054 WARN_ON(!ipp);
0055 WARN_ON(!funcs);
0056 WARN_ON(!formats);
0057 WARN_ON(!num_formats);
0058
0059 spin_lock_init(&ipp->lock);
0060 INIT_LIST_HEAD(&ipp->todo_list);
0061 init_waitqueue_head(&ipp->done_wq);
0062 ipp->dev = dev;
0063 ipp->funcs = funcs;
0064 ipp->capabilities = caps;
0065 ipp->name = name;
0066 ipp->formats = formats;
0067 ipp->num_formats = num_formats;
0068
0069
0070 list_add_tail(&ipp->head, &ipp_list);
0071 ipp->id = num_ipp++;
0072
0073 DRM_DEV_DEBUG_DRIVER(dev, "Registered ipp %d\n", ipp->id);
0074
0075 return 0;
0076 }
0077
0078
0079
0080
0081
0082
0083 void exynos_drm_ipp_unregister(struct device *dev,
0084 struct exynos_drm_ipp *ipp)
0085 {
0086 WARN_ON(ipp->task);
0087 WARN_ON(!list_empty(&ipp->todo_list));
0088 list_del(&ipp->head);
0089 }
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104 int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev, void *data,
0105 struct drm_file *file_priv)
0106 {
0107 struct drm_exynos_ioctl_ipp_get_res *resp = data;
0108 struct exynos_drm_ipp *ipp;
0109 uint32_t __user *ipp_ptr = (uint32_t __user *)
0110 (unsigned long)resp->ipp_id_ptr;
0111 unsigned int count = num_ipp, copied = 0;
0112
0113
0114
0115
0116
0117 if (count && resp->count_ipps >= count) {
0118 list_for_each_entry(ipp, &ipp_list, head) {
0119 if (put_user(ipp->id, ipp_ptr + copied))
0120 return -EFAULT;
0121 copied++;
0122 }
0123 }
0124 resp->count_ipps = count;
0125
0126 return 0;
0127 }
0128
0129 static inline struct exynos_drm_ipp *__ipp_get(uint32_t id)
0130 {
0131 struct exynos_drm_ipp *ipp;
0132
0133 list_for_each_entry(ipp, &ipp_list, head)
0134 if (ipp->id == id)
0135 return ipp;
0136 return NULL;
0137 }
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152 int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev, void *data,
0153 struct drm_file *file_priv)
0154 {
0155 struct drm_exynos_ioctl_ipp_get_caps *resp = data;
0156 void __user *ptr = (void __user *)(unsigned long)resp->formats_ptr;
0157 struct exynos_drm_ipp *ipp;
0158 int i;
0159
0160 ipp = __ipp_get(resp->ipp_id);
0161 if (!ipp)
0162 return -ENOENT;
0163
0164 resp->ipp_id = ipp->id;
0165 resp->capabilities = ipp->capabilities;
0166
0167
0168
0169
0170
0171 if (resp->formats_count >= ipp->num_formats) {
0172 for (i = 0; i < ipp->num_formats; i++) {
0173 struct drm_exynos_ipp_format tmp = {
0174 .fourcc = ipp->formats[i].fourcc,
0175 .type = ipp->formats[i].type,
0176 .modifier = ipp->formats[i].modifier,
0177 };
0178
0179 if (copy_to_user(ptr, &tmp, sizeof(tmp)))
0180 return -EFAULT;
0181 ptr += sizeof(tmp);
0182 }
0183 }
0184 resp->formats_count = ipp->num_formats;
0185
0186 return 0;
0187 }
0188
0189 static inline const struct exynos_drm_ipp_formats *__ipp_format_get(
0190 struct exynos_drm_ipp *ipp, uint32_t fourcc,
0191 uint64_t mod, unsigned int type)
0192 {
0193 int i;
0194
0195 for (i = 0; i < ipp->num_formats; i++) {
0196 if ((ipp->formats[i].type & type) &&
0197 ipp->formats[i].fourcc == fourcc &&
0198 ipp->formats[i].modifier == mod)
0199 return &ipp->formats[i];
0200 }
0201 return NULL;
0202 }
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218 int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev, void *data,
0219 struct drm_file *file_priv)
0220 {
0221 struct drm_exynos_ioctl_ipp_get_limits *resp = data;
0222 void __user *ptr = (void __user *)(unsigned long)resp->limits_ptr;
0223 const struct exynos_drm_ipp_formats *format;
0224 struct exynos_drm_ipp *ipp;
0225
0226 if (resp->type != DRM_EXYNOS_IPP_FORMAT_SOURCE &&
0227 resp->type != DRM_EXYNOS_IPP_FORMAT_DESTINATION)
0228 return -EINVAL;
0229
0230 ipp = __ipp_get(resp->ipp_id);
0231 if (!ipp)
0232 return -ENOENT;
0233
0234 format = __ipp_format_get(ipp, resp->fourcc, resp->modifier,
0235 resp->type);
0236 if (!format)
0237 return -EINVAL;
0238
0239
0240
0241
0242
0243 if (format->num_limits && resp->limits_count >= format->num_limits)
0244 if (copy_to_user((void __user *)ptr, format->limits,
0245 sizeof(*format->limits) * format->num_limits))
0246 return -EFAULT;
0247 resp->limits_count = format->num_limits;
0248
0249 return 0;
0250 }
0251
0252 struct drm_pending_exynos_ipp_event {
0253 struct drm_pending_event base;
0254 struct drm_exynos_ipp_event event;
0255 };
0256
0257 static inline struct exynos_drm_ipp_task *
0258 exynos_drm_ipp_task_alloc(struct exynos_drm_ipp *ipp)
0259 {
0260 struct exynos_drm_ipp_task *task;
0261
0262 task = kzalloc(sizeof(*task), GFP_KERNEL);
0263 if (!task)
0264 return NULL;
0265
0266 task->dev = ipp->dev;
0267 task->ipp = ipp;
0268
0269
0270 task->src.rect.w = task->dst.rect.w = UINT_MAX;
0271 task->src.rect.h = task->dst.rect.h = UINT_MAX;
0272 task->transform.rotation = DRM_MODE_ROTATE_0;
0273
0274 DRM_DEV_DEBUG_DRIVER(task->dev, "Allocated task %pK\n", task);
0275
0276 return task;
0277 }
0278
0279 static const struct exynos_drm_param_map {
0280 unsigned int id;
0281 unsigned int size;
0282 unsigned int offset;
0283 } exynos_drm_ipp_params_maps[] = {
0284 {
0285 DRM_EXYNOS_IPP_TASK_BUFFER | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE,
0286 sizeof(struct drm_exynos_ipp_task_buffer),
0287 offsetof(struct exynos_drm_ipp_task, src.buf),
0288 }, {
0289 DRM_EXYNOS_IPP_TASK_BUFFER |
0290 DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION,
0291 sizeof(struct drm_exynos_ipp_task_buffer),
0292 offsetof(struct exynos_drm_ipp_task, dst.buf),
0293 }, {
0294 DRM_EXYNOS_IPP_TASK_RECTANGLE | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE,
0295 sizeof(struct drm_exynos_ipp_task_rect),
0296 offsetof(struct exynos_drm_ipp_task, src.rect),
0297 }, {
0298 DRM_EXYNOS_IPP_TASK_RECTANGLE |
0299 DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION,
0300 sizeof(struct drm_exynos_ipp_task_rect),
0301 offsetof(struct exynos_drm_ipp_task, dst.rect),
0302 }, {
0303 DRM_EXYNOS_IPP_TASK_TRANSFORM,
0304 sizeof(struct drm_exynos_ipp_task_transform),
0305 offsetof(struct exynos_drm_ipp_task, transform),
0306 }, {
0307 DRM_EXYNOS_IPP_TASK_ALPHA,
0308 sizeof(struct drm_exynos_ipp_task_alpha),
0309 offsetof(struct exynos_drm_ipp_task, alpha),
0310 },
0311 };
0312
0313 static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task,
0314 struct drm_exynos_ioctl_ipp_commit *arg)
0315 {
0316 const struct exynos_drm_param_map *map = exynos_drm_ipp_params_maps;
0317 void __user *params = (void __user *)(unsigned long)arg->params_ptr;
0318 unsigned int size = arg->params_size;
0319 uint32_t id;
0320 int i;
0321
0322 while (size) {
0323 if (get_user(id, (uint32_t __user *)params))
0324 return -EFAULT;
0325
0326 for (i = 0; i < ARRAY_SIZE(exynos_drm_ipp_params_maps); i++)
0327 if (map[i].id == id)
0328 break;
0329 if (i == ARRAY_SIZE(exynos_drm_ipp_params_maps) ||
0330 map[i].size > size)
0331 return -EINVAL;
0332
0333 if (copy_from_user((void *)task + map[i].offset, params,
0334 map[i].size))
0335 return -EFAULT;
0336
0337 params += map[i].size;
0338 size -= map[i].size;
0339 }
0340
0341 DRM_DEV_DEBUG_DRIVER(task->dev,
0342 "Got task %pK configuration from userspace\n",
0343 task);
0344 return 0;
0345 }
0346
0347 static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
0348 struct drm_file *filp)
0349 {
0350 int ret = 0;
0351 int i;
0352
0353
0354 for (i = 0; i < buf->format->num_planes; i++) {
0355 unsigned int height = (i == 0) ? buf->buf.height :
0356 DIV_ROUND_UP(buf->buf.height, buf->format->vsub);
0357 unsigned long size = height * buf->buf.pitch[i];
0358 struct exynos_drm_gem *gem = exynos_drm_gem_get(filp,
0359 buf->buf.gem_id[i]);
0360 if (!gem) {
0361 ret = -ENOENT;
0362 goto gem_free;
0363 }
0364 buf->exynos_gem[i] = gem;
0365
0366 if (size + buf->buf.offset[i] > buf->exynos_gem[i]->size) {
0367 i++;
0368 ret = -EINVAL;
0369 goto gem_free;
0370 }
0371 buf->dma_addr[i] = buf->exynos_gem[i]->dma_addr +
0372 buf->buf.offset[i];
0373 }
0374
0375 return 0;
0376 gem_free:
0377 while (i--) {
0378 exynos_drm_gem_put(buf->exynos_gem[i]);
0379 buf->exynos_gem[i] = NULL;
0380 }
0381 return ret;
0382 }
0383
0384 static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
0385 {
0386 int i;
0387
0388 if (!buf->exynos_gem[0])
0389 return;
0390 for (i = 0; i < buf->format->num_planes; i++)
0391 exynos_drm_gem_put(buf->exynos_gem[i]);
0392 }
0393
0394 static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
0395 struct exynos_drm_ipp_task *task)
0396 {
0397 DRM_DEV_DEBUG_DRIVER(task->dev, "Freeing task %pK\n", task);
0398
0399 exynos_drm_ipp_task_release_buf(&task->src);
0400 exynos_drm_ipp_task_release_buf(&task->dst);
0401 if (task->event)
0402 drm_event_cancel_free(ipp->drm_dev, &task->event->base);
0403 kfree(task);
0404 }
0405
0406 struct drm_ipp_limit {
0407 struct drm_exynos_ipp_limit_val h;
0408 struct drm_exynos_ipp_limit_val v;
0409 };
0410
0411 enum drm_ipp_size_id {
0412 IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX
0413 };
0414
0415 static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = {
0416 [IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
0417 [IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
0418 DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
0419 [IPP_LIMIT_ROTATED] = { DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED,
0420 DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
0421 DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
0422 };
0423
0424 static inline void __limit_set_val(unsigned int *ptr, unsigned int val)
0425 {
0426 if (!*ptr)
0427 *ptr = val;
0428 }
0429
0430 static void __get_size_limit(const struct drm_exynos_ipp_limit *limits,
0431 unsigned int num_limits, enum drm_ipp_size_id id,
0432 struct drm_ipp_limit *res)
0433 {
0434 const struct drm_exynos_ipp_limit *l = limits;
0435 int i = 0;
0436
0437 memset(res, 0, sizeof(*res));
0438 for (i = 0; limit_id_fallback[id][i]; i++)
0439 for (l = limits; l - limits < num_limits; l++) {
0440 if (((l->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) !=
0441 DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE) ||
0442 ((l->type & DRM_EXYNOS_IPP_LIMIT_SIZE_MASK) !=
0443 limit_id_fallback[id][i]))
0444 continue;
0445 __limit_set_val(&res->h.min, l->h.min);
0446 __limit_set_val(&res->h.max, l->h.max);
0447 __limit_set_val(&res->h.align, l->h.align);
0448 __limit_set_val(&res->v.min, l->v.min);
0449 __limit_set_val(&res->v.max, l->v.max);
0450 __limit_set_val(&res->v.align, l->v.align);
0451 }
0452 }
0453
0454 static inline bool __align_check(unsigned int val, unsigned int align)
0455 {
0456 if (align && (val & (align - 1))) {
0457 DRM_DEBUG_DRIVER("Value %d exceeds HW limits (align %d)\n",
0458 val, align);
0459 return false;
0460 }
0461 return true;
0462 }
0463
0464 static inline bool __size_limit_check(unsigned int val,
0465 struct drm_exynos_ipp_limit_val *l)
0466 {
0467 if ((l->min && val < l->min) || (l->max && val > l->max)) {
0468 DRM_DEBUG_DRIVER("Value %d exceeds HW limits (min %d, max %d)\n",
0469 val, l->min, l->max);
0470 return false;
0471 }
0472 return __align_check(val, l->align);
0473 }
0474
0475 static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf,
0476 const struct drm_exynos_ipp_limit *limits, unsigned int num_limits,
0477 bool rotate, bool swap)
0478 {
0479 enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA;
0480 struct drm_ipp_limit l;
0481 struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v;
0482 int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
0483
0484 if (!limits)
0485 return 0;
0486
0487 __get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l);
0488 if (!__size_limit_check(real_width, &l.h) ||
0489 !__size_limit_check(buf->buf.height, &l.v))
0490 return -EINVAL;
0491
0492 if (swap) {
0493 lv = &l.h;
0494 lh = &l.v;
0495 }
0496 __get_size_limit(limits, num_limits, id, &l);
0497 if (!__size_limit_check(buf->rect.w, lh) ||
0498 !__align_check(buf->rect.x, lh->align) ||
0499 !__size_limit_check(buf->rect.h, lv) ||
0500 !__align_check(buf->rect.y, lv->align))
0501 return -EINVAL;
0502
0503 return 0;
0504 }
0505
0506 static inline bool __scale_limit_check(unsigned int src, unsigned int dst,
0507 unsigned int min, unsigned int max)
0508 {
0509 if ((max && (dst << 16) > src * max) ||
0510 (min && (dst << 16) < src * min)) {
0511 DRM_DEBUG_DRIVER("Scale from %d to %d exceeds HW limits (ratio min %d.%05d, max %d.%05d)\n",
0512 src, dst,
0513 min >> 16, 100000 * (min & 0xffff) / (1 << 16),
0514 max >> 16, 100000 * (max & 0xffff) / (1 << 16));
0515 return false;
0516 }
0517 return true;
0518 }
0519
0520 static int exynos_drm_ipp_check_scale_limits(
0521 struct drm_exynos_ipp_task_rect *src,
0522 struct drm_exynos_ipp_task_rect *dst,
0523 const struct drm_exynos_ipp_limit *limits,
0524 unsigned int num_limits, bool swap)
0525 {
0526 const struct drm_exynos_ipp_limit_val *lh, *lv;
0527 int dw, dh;
0528
0529 for (; num_limits; limits++, num_limits--)
0530 if ((limits->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) ==
0531 DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE)
0532 break;
0533 if (!num_limits)
0534 return 0;
0535
0536 lh = (!swap) ? &limits->h : &limits->v;
0537 lv = (!swap) ? &limits->v : &limits->h;
0538 dw = (!swap) ? dst->w : dst->h;
0539 dh = (!swap) ? dst->h : dst->w;
0540
0541 if (!__scale_limit_check(src->w, dw, lh->min, lh->max) ||
0542 !__scale_limit_check(src->h, dh, lv->min, lv->max))
0543 return -EINVAL;
0544
0545 return 0;
0546 }
0547
0548 static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
0549 struct exynos_drm_ipp_buffer *buf,
0550 struct exynos_drm_ipp_buffer *src,
0551 struct exynos_drm_ipp_buffer *dst,
0552 bool rotate, bool swap)
0553 {
0554 const struct exynos_drm_ipp_formats *fmt;
0555 int ret, i;
0556
0557 fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier,
0558 buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE :
0559 DRM_EXYNOS_IPP_FORMAT_DESTINATION);
0560 if (!fmt) {
0561 DRM_DEV_DEBUG_DRIVER(task->dev,
0562 "Task %pK: %s format not supported\n",
0563 task, buf == src ? "src" : "dst");
0564 return -EINVAL;
0565 }
0566
0567
0568 if (buf->buf.width == 0 || buf->buf.height == 0)
0569 return -EINVAL;
0570
0571 buf->format = drm_format_info(buf->buf.fourcc);
0572 for (i = 0; i < buf->format->num_planes; i++) {
0573 unsigned int width = (i == 0) ? buf->buf.width :
0574 DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
0575
0576 if (buf->buf.pitch[i] == 0)
0577 buf->buf.pitch[i] = width * buf->format->cpp[i];
0578 if (buf->buf.pitch[i] < width * buf->format->cpp[i])
0579 return -EINVAL;
0580 if (!buf->buf.gem_id[i])
0581 return -ENOENT;
0582 }
0583
0584
0585 if (buf->format->num_planes > 2 &&
0586 buf->buf.pitch[1] != buf->buf.pitch[2])
0587 return -EINVAL;
0588
0589
0590 ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits,
0591 fmt->num_limits,
0592 rotate,
0593 buf == dst ? swap : false);
0594 if (ret)
0595 return ret;
0596 ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
0597 fmt->limits,
0598 fmt->num_limits, swap);
0599 return ret;
0600 }
0601
0602 static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
0603 {
0604 struct exynos_drm_ipp *ipp = task->ipp;
0605 struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
0606 unsigned int rotation = task->transform.rotation;
0607 int ret = 0;
0608 bool swap = drm_rotation_90_or_270(rotation);
0609 bool rotate = (rotation != DRM_MODE_ROTATE_0);
0610 bool scale = false;
0611
0612 DRM_DEV_DEBUG_DRIVER(task->dev, "Checking task %pK\n", task);
0613
0614 if (src->rect.w == UINT_MAX)
0615 src->rect.w = src->buf.width;
0616 if (src->rect.h == UINT_MAX)
0617 src->rect.h = src->buf.height;
0618 if (dst->rect.w == UINT_MAX)
0619 dst->rect.w = dst->buf.width;
0620 if (dst->rect.h == UINT_MAX)
0621 dst->rect.h = dst->buf.height;
0622
0623 if (src->rect.x + src->rect.w > (src->buf.width) ||
0624 src->rect.y + src->rect.h > (src->buf.height) ||
0625 dst->rect.x + dst->rect.w > (dst->buf.width) ||
0626 dst->rect.y + dst->rect.h > (dst->buf.height)) {
0627 DRM_DEV_DEBUG_DRIVER(task->dev,
0628 "Task %pK: defined area is outside provided buffers\n",
0629 task);
0630 return -EINVAL;
0631 }
0632
0633 if ((!swap && (src->rect.w != dst->rect.w ||
0634 src->rect.h != dst->rect.h)) ||
0635 (swap && (src->rect.w != dst->rect.h ||
0636 src->rect.h != dst->rect.w)))
0637 scale = true;
0638
0639 if ((!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CROP) &&
0640 (src->rect.x || src->rect.y || dst->rect.x || dst->rect.y)) ||
0641 (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_ROTATE) && rotate) ||
0642 (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_SCALE) && scale) ||
0643 (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CONVERT) &&
0644 src->buf.fourcc != dst->buf.fourcc)) {
0645 DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: hw capabilities exceeded\n",
0646 task);
0647 return -EINVAL;
0648 }
0649
0650 ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap);
0651 if (ret)
0652 return ret;
0653
0654 ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap);
0655 if (ret)
0656 return ret;
0657
0658 DRM_DEV_DEBUG_DRIVER(ipp->dev, "Task %pK: all checks done.\n",
0659 task);
0660
0661 return ret;
0662 }
0663
0664 static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task,
0665 struct drm_file *filp)
0666 {
0667 struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
0668 int ret = 0;
0669
0670 DRM_DEV_DEBUG_DRIVER(task->dev, "Setting buffer for task %pK\n",
0671 task);
0672
0673 ret = exynos_drm_ipp_task_setup_buffer(src, filp);
0674 if (ret) {
0675 DRM_DEV_DEBUG_DRIVER(task->dev,
0676 "Task %pK: src buffer setup failed\n",
0677 task);
0678 return ret;
0679 }
0680 ret = exynos_drm_ipp_task_setup_buffer(dst, filp);
0681 if (ret) {
0682 DRM_DEV_DEBUG_DRIVER(task->dev,
0683 "Task %pK: dst buffer setup failed\n",
0684 task);
0685 return ret;
0686 }
0687
0688 DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: buffers prepared.\n",
0689 task);
0690
0691 return ret;
0692 }
0693
0694
0695 static int exynos_drm_ipp_event_create(struct exynos_drm_ipp_task *task,
0696 struct drm_file *file_priv, uint64_t user_data)
0697 {
0698 struct drm_pending_exynos_ipp_event *e = NULL;
0699 int ret;
0700
0701 e = kzalloc(sizeof(*e), GFP_KERNEL);
0702 if (!e)
0703 return -ENOMEM;
0704
0705 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
0706 e->event.base.length = sizeof(e->event);
0707 e->event.user_data = user_data;
0708
0709 ret = drm_event_reserve_init(task->ipp->drm_dev, file_priv, &e->base,
0710 &e->event.base);
0711 if (ret)
0712 goto free;
0713
0714 task->event = e;
0715 return 0;
0716 free:
0717 kfree(e);
0718 return ret;
0719 }
0720
0721 static void exynos_drm_ipp_event_send(struct exynos_drm_ipp_task *task)
0722 {
0723 struct timespec64 now;
0724
0725 ktime_get_ts64(&now);
0726 task->event->event.tv_sec = now.tv_sec;
0727 task->event->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
0728 task->event->event.sequence = atomic_inc_return(&task->ipp->sequence);
0729
0730 drm_send_event(task->ipp->drm_dev, &task->event->base);
0731 }
0732
0733 static int exynos_drm_ipp_task_cleanup(struct exynos_drm_ipp_task *task)
0734 {
0735 int ret = task->ret;
0736
0737 if (ret == 0 && task->event) {
0738 exynos_drm_ipp_event_send(task);
0739
0740 task->event = NULL;
0741 }
0742
0743 exynos_drm_ipp_task_free(task->ipp, task);
0744 return ret;
0745 }
0746
0747 static void exynos_drm_ipp_cleanup_work(struct work_struct *work)
0748 {
0749 struct exynos_drm_ipp_task *task = container_of(work,
0750 struct exynos_drm_ipp_task, cleanup_work);
0751
0752 exynos_drm_ipp_task_cleanup(task);
0753 }
0754
0755 static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp);
0756
0757
0758
0759
0760
0761
0762 void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret)
0763 {
0764 struct exynos_drm_ipp *ipp = task->ipp;
0765 unsigned long flags;
0766
0767 DRM_DEV_DEBUG_DRIVER(task->dev, "ipp: %d, task %pK done: %d\n",
0768 ipp->id, task, ret);
0769
0770 spin_lock_irqsave(&ipp->lock, flags);
0771 if (ipp->task == task)
0772 ipp->task = NULL;
0773 task->flags |= DRM_EXYNOS_IPP_TASK_DONE;
0774 task->ret = ret;
0775 spin_unlock_irqrestore(&ipp->lock, flags);
0776
0777 exynos_drm_ipp_next_task(ipp);
0778 wake_up(&ipp->done_wq);
0779
0780 if (task->flags & DRM_EXYNOS_IPP_TASK_ASYNC) {
0781 INIT_WORK(&task->cleanup_work, exynos_drm_ipp_cleanup_work);
0782 schedule_work(&task->cleanup_work);
0783 }
0784 }
0785
0786 static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp)
0787 {
0788 struct exynos_drm_ipp_task *task;
0789 unsigned long flags;
0790 int ret;
0791
0792 DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, try to run new task\n",
0793 ipp->id);
0794
0795 spin_lock_irqsave(&ipp->lock, flags);
0796
0797 if (ipp->task || list_empty(&ipp->todo_list)) {
0798 spin_unlock_irqrestore(&ipp->lock, flags);
0799 return;
0800 }
0801
0802 task = list_first_entry(&ipp->todo_list, struct exynos_drm_ipp_task,
0803 head);
0804 list_del_init(&task->head);
0805 ipp->task = task;
0806
0807 spin_unlock_irqrestore(&ipp->lock, flags);
0808
0809 DRM_DEV_DEBUG_DRIVER(ipp->dev,
0810 "ipp: %d, selected task %pK to run\n", ipp->id,
0811 task);
0812
0813 ret = ipp->funcs->commit(ipp, task);
0814 if (ret)
0815 exynos_drm_ipp_task_done(task, ret);
0816 }
0817
0818 static void exynos_drm_ipp_schedule_task(struct exynos_drm_ipp *ipp,
0819 struct exynos_drm_ipp_task *task)
0820 {
0821 unsigned long flags;
0822
0823 spin_lock_irqsave(&ipp->lock, flags);
0824 list_add(&task->head, &ipp->todo_list);
0825 spin_unlock_irqrestore(&ipp->lock, flags);
0826
0827 exynos_drm_ipp_next_task(ipp);
0828 }
0829
0830 static void exynos_drm_ipp_task_abort(struct exynos_drm_ipp *ipp,
0831 struct exynos_drm_ipp_task *task)
0832 {
0833 unsigned long flags;
0834
0835 spin_lock_irqsave(&ipp->lock, flags);
0836 if (task->flags & DRM_EXYNOS_IPP_TASK_DONE) {
0837
0838 exynos_drm_ipp_task_cleanup(task);
0839 } else if (ipp->task != task) {
0840
0841 list_del_init(&task->head);
0842 exynos_drm_ipp_task_cleanup(task);
0843 } else {
0844
0845
0846
0847
0848 task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
0849 spin_unlock_irqrestore(&ipp->lock, flags);
0850 if (ipp->funcs->abort)
0851 ipp->funcs->abort(ipp, task);
0852 return;
0853 }
0854 spin_unlock_irqrestore(&ipp->lock, flags);
0855 }
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871 int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, void *data,
0872 struct drm_file *file_priv)
0873 {
0874 struct drm_exynos_ioctl_ipp_commit *arg = data;
0875 struct exynos_drm_ipp *ipp;
0876 struct exynos_drm_ipp_task *task;
0877 int ret = 0;
0878
0879 if ((arg->flags & ~DRM_EXYNOS_IPP_FLAGS) || arg->reserved)
0880 return -EINVAL;
0881
0882
0883 if ((arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY) &&
0884 (arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT))
0885 return -EINVAL;
0886
0887 ipp = __ipp_get(arg->ipp_id);
0888 if (!ipp)
0889 return -ENOENT;
0890
0891 task = exynos_drm_ipp_task_alloc(ipp);
0892 if (!task)
0893 return -ENOMEM;
0894
0895 ret = exynos_drm_ipp_task_set(task, arg);
0896 if (ret)
0897 goto free;
0898
0899 ret = exynos_drm_ipp_task_check(task);
0900 if (ret)
0901 goto free;
0902
0903 ret = exynos_drm_ipp_task_setup_buffers(task, file_priv);
0904 if (ret || arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY)
0905 goto free;
0906
0907 if (arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT) {
0908 ret = exynos_drm_ipp_event_create(task, file_priv,
0909 arg->user_data);
0910 if (ret)
0911 goto free;
0912 }
0913
0914
0915
0916
0917
0918 if (arg->flags & DRM_EXYNOS_IPP_FLAG_NONBLOCK) {
0919 DRM_DEV_DEBUG_DRIVER(ipp->dev,
0920 "ipp: %d, nonblocking processing task %pK\n",
0921 ipp->id, task);
0922
0923 task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
0924 exynos_drm_ipp_schedule_task(task->ipp, task);
0925 ret = 0;
0926 } else {
0927 DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, processing task %pK\n",
0928 ipp->id, task);
0929 exynos_drm_ipp_schedule_task(ipp, task);
0930 ret = wait_event_interruptible(ipp->done_wq,
0931 task->flags & DRM_EXYNOS_IPP_TASK_DONE);
0932 if (ret)
0933 exynos_drm_ipp_task_abort(ipp, task);
0934 else
0935 ret = exynos_drm_ipp_task_cleanup(task);
0936 }
0937 return ret;
0938 free:
0939 exynos_drm_ipp_task_free(ipp, task);
0940
0941 return ret;
0942 }