0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/slab.h>
0025
0026 #include <drm/drm_flip_work.h>
0027 #include <drm/drm_print.h>
0028 #include <drm/drm_util.h>
0029
0030
0031
0032
0033
0034
0035
0036
0037 struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
0038 {
0039 struct drm_flip_task *task;
0040
0041 task = kzalloc(sizeof(*task), flags);
0042 if (task)
0043 task->data = data;
0044
0045 return task;
0046 }
0047 EXPORT_SYMBOL(drm_flip_work_allocate_task);
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057 void drm_flip_work_queue_task(struct drm_flip_work *work,
0058 struct drm_flip_task *task)
0059 {
0060 unsigned long flags;
0061
0062 spin_lock_irqsave(&work->lock, flags);
0063 list_add_tail(&task->node, &work->queued);
0064 spin_unlock_irqrestore(&work->lock, flags);
0065 }
0066 EXPORT_SYMBOL(drm_flip_work_queue_task);
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076 void drm_flip_work_queue(struct drm_flip_work *work, void *val)
0077 {
0078 struct drm_flip_task *task;
0079
0080 task = drm_flip_work_allocate_task(val,
0081 drm_can_sleep() ? GFP_KERNEL : GFP_ATOMIC);
0082 if (task) {
0083 drm_flip_work_queue_task(work, task);
0084 } else {
0085 DRM_ERROR("%s could not allocate task!\n", work->name);
0086 work->func(work, val);
0087 }
0088 }
0089 EXPORT_SYMBOL(drm_flip_work_queue);
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101 void drm_flip_work_commit(struct drm_flip_work *work,
0102 struct workqueue_struct *wq)
0103 {
0104 unsigned long flags;
0105
0106 spin_lock_irqsave(&work->lock, flags);
0107 list_splice_tail(&work->queued, &work->commited);
0108 INIT_LIST_HEAD(&work->queued);
0109 spin_unlock_irqrestore(&work->lock, flags);
0110 queue_work(wq, &work->worker);
0111 }
0112 EXPORT_SYMBOL(drm_flip_work_commit);
0113
0114 static void flip_worker(struct work_struct *w)
0115 {
0116 struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
0117 struct list_head tasks;
0118 unsigned long flags;
0119
0120 while (1) {
0121 struct drm_flip_task *task, *tmp;
0122
0123 INIT_LIST_HEAD(&tasks);
0124 spin_lock_irqsave(&work->lock, flags);
0125 list_splice_tail(&work->commited, &tasks);
0126 INIT_LIST_HEAD(&work->commited);
0127 spin_unlock_irqrestore(&work->lock, flags);
0128
0129 if (list_empty(&tasks))
0130 break;
0131
0132 list_for_each_entry_safe(task, tmp, &tasks, node) {
0133 work->func(work, task->data);
0134 kfree(task);
0135 }
0136 }
0137 }
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147 void drm_flip_work_init(struct drm_flip_work *work,
0148 const char *name, drm_flip_func_t func)
0149 {
0150 work->name = name;
0151 INIT_LIST_HEAD(&work->queued);
0152 INIT_LIST_HEAD(&work->commited);
0153 spin_lock_init(&work->lock);
0154 work->func = func;
0155
0156 INIT_WORK(&work->worker, flip_worker);
0157 }
0158 EXPORT_SYMBOL(drm_flip_work_init);
0159
0160
0161
0162
0163
0164
0165
0166 void drm_flip_work_cleanup(struct drm_flip_work *work)
0167 {
0168 WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited));
0169 }
0170 EXPORT_SYMBOL(drm_flip_work_cleanup);