0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/iommu.h>
0009 #include <linux/list.h>
0010 #include <linux/sched/mm.h>
0011 #include <linux/slab.h>
0012 #include <linux/workqueue.h>
0013
0014 #include "iommu-sva-lib.h"
0015
0016
0017
0018
0019
0020
0021
0022 struct iopf_queue {
0023 struct workqueue_struct *wq;
0024 struct list_head devices;
0025 struct mutex lock;
0026 };
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 struct iopf_device_param {
0037 struct device *dev;
0038 struct iopf_queue *queue;
0039 struct list_head queue_list;
0040 struct list_head partial;
0041 };
0042
0043 struct iopf_fault {
0044 struct iommu_fault fault;
0045 struct list_head list;
0046 };
0047
0048 struct iopf_group {
0049 struct iopf_fault last_fault;
0050 struct list_head faults;
0051 struct work_struct work;
0052 struct device *dev;
0053 };
0054
0055 static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
0056 enum iommu_page_response_code status)
0057 {
0058 struct iommu_page_response resp = {
0059 .version = IOMMU_PAGE_RESP_VERSION_1,
0060 .pasid = iopf->fault.prm.pasid,
0061 .grpid = iopf->fault.prm.grpid,
0062 .code = status,
0063 };
0064
0065 if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) &&
0066 (iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID))
0067 resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
0068
0069 return iommu_page_response(dev, &resp);
0070 }
0071
0072 static enum iommu_page_response_code
0073 iopf_handle_single(struct iopf_fault *iopf)
0074 {
0075 vm_fault_t ret;
0076 struct mm_struct *mm;
0077 struct vm_area_struct *vma;
0078 unsigned int access_flags = 0;
0079 unsigned int fault_flags = FAULT_FLAG_REMOTE;
0080 struct iommu_fault_page_request *prm = &iopf->fault.prm;
0081 enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
0082
0083 if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
0084 return status;
0085
0086 mm = iommu_sva_find(prm->pasid);
0087 if (IS_ERR_OR_NULL(mm))
0088 return status;
0089
0090 mmap_read_lock(mm);
0091
0092 vma = find_extend_vma(mm, prm->addr);
0093 if (!vma)
0094
0095 goto out_put_mm;
0096
0097 if (prm->perm & IOMMU_FAULT_PERM_READ)
0098 access_flags |= VM_READ;
0099
0100 if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
0101 access_flags |= VM_WRITE;
0102 fault_flags |= FAULT_FLAG_WRITE;
0103 }
0104
0105 if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
0106 access_flags |= VM_EXEC;
0107 fault_flags |= FAULT_FLAG_INSTRUCTION;
0108 }
0109
0110 if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
0111 fault_flags |= FAULT_FLAG_USER;
0112
0113 if (access_flags & ~vma->vm_flags)
0114
0115 goto out_put_mm;
0116
0117 ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
0118 status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
0119 IOMMU_PAGE_RESP_SUCCESS;
0120
0121 out_put_mm:
0122 mmap_read_unlock(mm);
0123 mmput(mm);
0124
0125 return status;
0126 }
0127
0128 static void iopf_handle_group(struct work_struct *work)
0129 {
0130 struct iopf_group *group;
0131 struct iopf_fault *iopf, *next;
0132 enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
0133
0134 group = container_of(work, struct iopf_group, work);
0135
0136 list_for_each_entry_safe(iopf, next, &group->faults, list) {
0137
0138
0139
0140
0141 if (status == IOMMU_PAGE_RESP_SUCCESS)
0142 status = iopf_handle_single(iopf);
0143
0144 if (!(iopf->fault.prm.flags &
0145 IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
0146 kfree(iopf);
0147 }
0148
0149 iopf_complete_group(group->dev, &group->last_fault, status);
0150 kfree(group);
0151 }
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186 int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
0187 {
0188 int ret;
0189 struct iopf_group *group;
0190 struct iopf_fault *iopf, *next;
0191 struct iopf_device_param *iopf_param;
0192
0193 struct device *dev = cookie;
0194 struct dev_iommu *param = dev->iommu;
0195
0196 lockdep_assert_held(¶m->lock);
0197
0198 if (fault->type != IOMMU_FAULT_PAGE_REQ)
0199
0200 return -EOPNOTSUPP;
0201
0202
0203
0204
0205
0206 iopf_param = param->iopf_param;
0207 if (!iopf_param)
0208 return -ENODEV;
0209
0210 if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
0211 iopf = kzalloc(sizeof(*iopf), GFP_KERNEL);
0212 if (!iopf)
0213 return -ENOMEM;
0214
0215 iopf->fault = *fault;
0216
0217
0218 list_add(&iopf->list, &iopf_param->partial);
0219
0220 return 0;
0221 }
0222
0223 group = kzalloc(sizeof(*group), GFP_KERNEL);
0224 if (!group) {
0225
0226
0227
0228
0229
0230 ret = -ENOMEM;
0231 goto cleanup_partial;
0232 }
0233
0234 group->dev = dev;
0235 group->last_fault.fault = *fault;
0236 INIT_LIST_HEAD(&group->faults);
0237 list_add(&group->last_fault.list, &group->faults);
0238 INIT_WORK(&group->work, iopf_handle_group);
0239
0240
0241 list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
0242 if (iopf->fault.prm.grpid == fault->prm.grpid)
0243
0244 list_move(&iopf->list, &group->faults);
0245 }
0246
0247 queue_work(iopf_param->queue->wq, &group->work);
0248 return 0;
0249
0250 cleanup_partial:
0251 list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
0252 if (iopf->fault.prm.grpid == fault->prm.grpid) {
0253 list_del(&iopf->list);
0254 kfree(iopf);
0255 }
0256 }
0257 return ret;
0258 }
0259 EXPORT_SYMBOL_GPL(iommu_queue_iopf);
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273 int iopf_queue_flush_dev(struct device *dev)
0274 {
0275 int ret = 0;
0276 struct iopf_device_param *iopf_param;
0277 struct dev_iommu *param = dev->iommu;
0278
0279 if (!param)
0280 return -ENODEV;
0281
0282 mutex_lock(¶m->lock);
0283 iopf_param = param->iopf_param;
0284 if (iopf_param)
0285 flush_workqueue(iopf_param->queue->wq);
0286 else
0287 ret = -ENODEV;
0288 mutex_unlock(¶m->lock);
0289
0290 return ret;
0291 }
0292 EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304 int iopf_queue_discard_partial(struct iopf_queue *queue)
0305 {
0306 struct iopf_fault *iopf, *next;
0307 struct iopf_device_param *iopf_param;
0308
0309 if (!queue)
0310 return -EINVAL;
0311
0312 mutex_lock(&queue->lock);
0313 list_for_each_entry(iopf_param, &queue->devices, queue_list) {
0314 list_for_each_entry_safe(iopf, next, &iopf_param->partial,
0315 list) {
0316 list_del(&iopf->list);
0317 kfree(iopf);
0318 }
0319 }
0320 mutex_unlock(&queue->lock);
0321 return 0;
0322 }
0323 EXPORT_SYMBOL_GPL(iopf_queue_discard_partial);
0324
0325
0326
0327
0328
0329
0330
0331
0332 int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
0333 {
0334 int ret = -EBUSY;
0335 struct iopf_device_param *iopf_param;
0336 struct dev_iommu *param = dev->iommu;
0337
0338 if (!param)
0339 return -ENODEV;
0340
0341 iopf_param = kzalloc(sizeof(*iopf_param), GFP_KERNEL);
0342 if (!iopf_param)
0343 return -ENOMEM;
0344
0345 INIT_LIST_HEAD(&iopf_param->partial);
0346 iopf_param->queue = queue;
0347 iopf_param->dev = dev;
0348
0349 mutex_lock(&queue->lock);
0350 mutex_lock(¶m->lock);
0351 if (!param->iopf_param) {
0352 list_add(&iopf_param->queue_list, &queue->devices);
0353 param->iopf_param = iopf_param;
0354 ret = 0;
0355 }
0356 mutex_unlock(¶m->lock);
0357 mutex_unlock(&queue->lock);
0358
0359 if (ret)
0360 kfree(iopf_param);
0361
0362 return ret;
0363 }
0364 EXPORT_SYMBOL_GPL(iopf_queue_add_device);
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375 int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
0376 {
0377 int ret = -EINVAL;
0378 struct iopf_fault *iopf, *next;
0379 struct iopf_device_param *iopf_param;
0380 struct dev_iommu *param = dev->iommu;
0381
0382 if (!param || !queue)
0383 return -EINVAL;
0384
0385 mutex_lock(&queue->lock);
0386 mutex_lock(¶m->lock);
0387 iopf_param = param->iopf_param;
0388 if (iopf_param && iopf_param->queue == queue) {
0389 list_del(&iopf_param->queue_list);
0390 param->iopf_param = NULL;
0391 ret = 0;
0392 }
0393 mutex_unlock(¶m->lock);
0394 mutex_unlock(&queue->lock);
0395 if (ret)
0396 return ret;
0397
0398
0399 list_for_each_entry_safe(iopf, next, &iopf_param->partial, list)
0400 kfree(iopf);
0401
0402 kfree(iopf_param);
0403
0404 return 0;
0405 }
0406 EXPORT_SYMBOL_GPL(iopf_queue_remove_device);
0407
0408
0409
0410
0411
0412
0413
0414 struct iopf_queue *iopf_queue_alloc(const char *name)
0415 {
0416 struct iopf_queue *queue;
0417
0418 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
0419 if (!queue)
0420 return NULL;
0421
0422
0423
0424
0425
0426
0427
0428 queue->wq = alloc_workqueue("iopf_queue/%s", WQ_UNBOUND, 0, name);
0429 if (!queue->wq) {
0430 kfree(queue);
0431 return NULL;
0432 }
0433
0434 INIT_LIST_HEAD(&queue->devices);
0435 mutex_init(&queue->lock);
0436
0437 return queue;
0438 }
0439 EXPORT_SYMBOL_GPL(iopf_queue_alloc);
0440
0441
0442
0443
0444
0445
0446
0447
0448 void iopf_queue_free(struct iopf_queue *queue)
0449 {
0450 struct iopf_device_param *iopf_param, *next;
0451
0452 if (!queue)
0453 return;
0454
0455 list_for_each_entry_safe(iopf_param, next, &queue->devices, queue_list)
0456 iopf_queue_remove_device(queue, iopf_param->dev);
0457
0458 destroy_workqueue(queue->wq);
0459 kfree(queue);
0460 }
0461 EXPORT_SYMBOL_GPL(iopf_queue_free);