Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Handle device page faults
0004  *
0005  * Copyright (C) 2020 ARM Ltd.
0006  */
0007 
0008 #include <linux/iommu.h>
0009 #include <linux/list.h>
0010 #include <linux/sched/mm.h>
0011 #include <linux/slab.h>
0012 #include <linux/workqueue.h>
0013 
0014 #include "iommu-sva-lib.h"
0015 
0016 /**
0017  * struct iopf_queue - IO Page Fault queue
0018  * @wq: the fault workqueue
0019  * @devices: devices attached to this queue
0020  * @lock: protects the device list
0021  */
0022 struct iopf_queue {
0023     struct workqueue_struct     *wq;
0024     struct list_head        devices;
0025     struct mutex            lock;
0026 };
0027 
0028 /**
0029  * struct iopf_device_param - IO Page Fault data attached to a device
0030  * @dev: the device that owns this param
0031  * @queue: IOPF queue
0032  * @queue_list: index into queue->devices
0033  * @partial: faults that are part of a Page Request Group for which the last
0034  *           request hasn't been submitted yet.
0035  */
0036 struct iopf_device_param {
0037     struct device           *dev;
0038     struct iopf_queue       *queue;
0039     struct list_head        queue_list;
0040     struct list_head        partial;
0041 };
0042 
0043 struct iopf_fault {
0044     struct iommu_fault      fault;
0045     struct list_head        list;
0046 };
0047 
0048 struct iopf_group {
0049     struct iopf_fault       last_fault;
0050     struct list_head        faults;
0051     struct work_struct      work;
0052     struct device           *dev;
0053 };
0054 
0055 static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
0056                    enum iommu_page_response_code status)
0057 {
0058     struct iommu_page_response resp = {
0059         .version        = IOMMU_PAGE_RESP_VERSION_1,
0060         .pasid          = iopf->fault.prm.pasid,
0061         .grpid          = iopf->fault.prm.grpid,
0062         .code           = status,
0063     };
0064 
0065     if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) &&
0066         (iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID))
0067         resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
0068 
0069     return iommu_page_response(dev, &resp);
0070 }
0071 
0072 static enum iommu_page_response_code
0073 iopf_handle_single(struct iopf_fault *iopf)
0074 {
0075     vm_fault_t ret;
0076     struct mm_struct *mm;
0077     struct vm_area_struct *vma;
0078     unsigned int access_flags = 0;
0079     unsigned int fault_flags = FAULT_FLAG_REMOTE;
0080     struct iommu_fault_page_request *prm = &iopf->fault.prm;
0081     enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
0082 
0083     if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
0084         return status;
0085 
0086     mm = iommu_sva_find(prm->pasid);
0087     if (IS_ERR_OR_NULL(mm))
0088         return status;
0089 
0090     mmap_read_lock(mm);
0091 
0092     vma = find_extend_vma(mm, prm->addr);
0093     if (!vma)
0094         /* Unmapped area */
0095         goto out_put_mm;
0096 
0097     if (prm->perm & IOMMU_FAULT_PERM_READ)
0098         access_flags |= VM_READ;
0099 
0100     if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
0101         access_flags |= VM_WRITE;
0102         fault_flags |= FAULT_FLAG_WRITE;
0103     }
0104 
0105     if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
0106         access_flags |= VM_EXEC;
0107         fault_flags |= FAULT_FLAG_INSTRUCTION;
0108     }
0109 
0110     if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
0111         fault_flags |= FAULT_FLAG_USER;
0112 
0113     if (access_flags & ~vma->vm_flags)
0114         /* Access fault */
0115         goto out_put_mm;
0116 
0117     ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
0118     status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
0119         IOMMU_PAGE_RESP_SUCCESS;
0120 
0121 out_put_mm:
0122     mmap_read_unlock(mm);
0123     mmput(mm);
0124 
0125     return status;
0126 }
0127 
0128 static void iopf_handle_group(struct work_struct *work)
0129 {
0130     struct iopf_group *group;
0131     struct iopf_fault *iopf, *next;
0132     enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
0133 
0134     group = container_of(work, struct iopf_group, work);
0135 
0136     list_for_each_entry_safe(iopf, next, &group->faults, list) {
0137         /*
0138          * For the moment, errors are sticky: don't handle subsequent
0139          * faults in the group if there is an error.
0140          */
0141         if (status == IOMMU_PAGE_RESP_SUCCESS)
0142             status = iopf_handle_single(iopf);
0143 
0144         if (!(iopf->fault.prm.flags &
0145               IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
0146             kfree(iopf);
0147     }
0148 
0149     iopf_complete_group(group->dev, &group->last_fault, status);
0150     kfree(group);
0151 }
0152 
0153 /**
0154  * iommu_queue_iopf - IO Page Fault handler
0155  * @fault: fault event
0156  * @cookie: struct device, passed to iommu_register_device_fault_handler.
0157  *
0158  * Add a fault to the device workqueue, to be handled by mm.
0159  *
0160  * This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard
0161  * them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't
0162  * expect a response. It may be generated when disabling a PASID (issuing a
0163  * PASID stop request) by some PCI devices.
0164  *
0165  * The PASID stop request is issued by the device driver before unbind(). Once
0166  * it completes, no page request is generated for this PASID anymore and
0167  * outstanding ones have been pushed to the IOMMU (as per PCIe 4.0r1.0 - 6.20.1
0168  * and 10.4.1.2 - Managing PASID TLP Prefix Usage). Some PCI devices will wait
0169  * for all outstanding page requests to come back with a response before
0170  * completing the PASID stop request. Others do not wait for page responses, and
0171  * instead issue this Stop Marker that tells us when the PASID can be
0172  * reallocated.
0173  *
0174  * It is safe to discard the Stop Marker because it is an optimization.
0175  * a. Page requests, which are posted requests, have been flushed to the IOMMU
0176  *    when the stop request completes.
0177  * b. The IOMMU driver flushes all fault queues on unbind() before freeing the
0178  *    PASID.
0179  *
0180  * So even though the Stop Marker might be issued by the device *after* the stop
0181  * request completes, outstanding faults will have been dealt with by the time
0182  * the PASID is freed.
0183  *
0184  * Return: 0 on success and <0 on error.
0185  */
0186 int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
0187 {
0188     int ret;
0189     struct iopf_group *group;
0190     struct iopf_fault *iopf, *next;
0191     struct iopf_device_param *iopf_param;
0192 
0193     struct device *dev = cookie;
0194     struct dev_iommu *param = dev->iommu;
0195 
0196     lockdep_assert_held(&param->lock);
0197 
0198     if (fault->type != IOMMU_FAULT_PAGE_REQ)
0199         /* Not a recoverable page fault */
0200         return -EOPNOTSUPP;
0201 
0202     /*
0203      * As long as we're holding param->lock, the queue can't be unlinked
0204      * from the device and therefore cannot disappear.
0205      */
0206     iopf_param = param->iopf_param;
0207     if (!iopf_param)
0208         return -ENODEV;
0209 
0210     if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
0211         iopf = kzalloc(sizeof(*iopf), GFP_KERNEL);
0212         if (!iopf)
0213             return -ENOMEM;
0214 
0215         iopf->fault = *fault;
0216 
0217         /* Non-last request of a group. Postpone until the last one */
0218         list_add(&iopf->list, &iopf_param->partial);
0219 
0220         return 0;
0221     }
0222 
0223     group = kzalloc(sizeof(*group), GFP_KERNEL);
0224     if (!group) {
0225         /*
0226          * The caller will send a response to the hardware. But we do
0227          * need to clean up before leaving, otherwise partial faults
0228          * will be stuck.
0229          */
0230         ret = -ENOMEM;
0231         goto cleanup_partial;
0232     }
0233 
0234     group->dev = dev;
0235     group->last_fault.fault = *fault;
0236     INIT_LIST_HEAD(&group->faults);
0237     list_add(&group->last_fault.list, &group->faults);
0238     INIT_WORK(&group->work, iopf_handle_group);
0239 
0240     /* See if we have partial faults for this group */
0241     list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
0242         if (iopf->fault.prm.grpid == fault->prm.grpid)
0243             /* Insert *before* the last fault */
0244             list_move(&iopf->list, &group->faults);
0245     }
0246 
0247     queue_work(iopf_param->queue->wq, &group->work);
0248     return 0;
0249 
0250 cleanup_partial:
0251     list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
0252         if (iopf->fault.prm.grpid == fault->prm.grpid) {
0253             list_del(&iopf->list);
0254             kfree(iopf);
0255         }
0256     }
0257     return ret;
0258 }
0259 EXPORT_SYMBOL_GPL(iommu_queue_iopf);
0260 
0261 /**
0262  * iopf_queue_flush_dev - Ensure that all queued faults have been processed
0263  * @dev: the endpoint whose faults need to be flushed.
0264  *
0265  * The IOMMU driver calls this before releasing a PASID, to ensure that all
0266  * pending faults for this PASID have been handled, and won't hit the address
0267  * space of the next process that uses this PASID. The driver must make sure
0268  * that no new fault is added to the queue. In particular it must flush its
0269  * low-level queue before calling this function.
0270  *
0271  * Return: 0 on success and <0 on error.
0272  */
0273 int iopf_queue_flush_dev(struct device *dev)
0274 {
0275     int ret = 0;
0276     struct iopf_device_param *iopf_param;
0277     struct dev_iommu *param = dev->iommu;
0278 
0279     if (!param)
0280         return -ENODEV;
0281 
0282     mutex_lock(&param->lock);
0283     iopf_param = param->iopf_param;
0284     if (iopf_param)
0285         flush_workqueue(iopf_param->queue->wq);
0286     else
0287         ret = -ENODEV;
0288     mutex_unlock(&param->lock);
0289 
0290     return ret;
0291 }
0292 EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
0293 
0294 /**
0295  * iopf_queue_discard_partial - Remove all pending partial fault
0296  * @queue: the queue whose partial faults need to be discarded
0297  *
0298  * When the hardware queue overflows, last page faults in a group may have been
0299  * lost and the IOMMU driver calls this to discard all partial faults. The
0300  * driver shouldn't be adding new faults to this queue concurrently.
0301  *
0302  * Return: 0 on success and <0 on error.
0303  */
0304 int iopf_queue_discard_partial(struct iopf_queue *queue)
0305 {
0306     struct iopf_fault *iopf, *next;
0307     struct iopf_device_param *iopf_param;
0308 
0309     if (!queue)
0310         return -EINVAL;
0311 
0312     mutex_lock(&queue->lock);
0313     list_for_each_entry(iopf_param, &queue->devices, queue_list) {
0314         list_for_each_entry_safe(iopf, next, &iopf_param->partial,
0315                      list) {
0316             list_del(&iopf->list);
0317             kfree(iopf);
0318         }
0319     }
0320     mutex_unlock(&queue->lock);
0321     return 0;
0322 }
0323 EXPORT_SYMBOL_GPL(iopf_queue_discard_partial);
0324 
0325 /**
0326  * iopf_queue_add_device - Add producer to the fault queue
0327  * @queue: IOPF queue
0328  * @dev: device to add
0329  *
0330  * Return: 0 on success and <0 on error.
0331  */
0332 int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
0333 {
0334     int ret = -EBUSY;
0335     struct iopf_device_param *iopf_param;
0336     struct dev_iommu *param = dev->iommu;
0337 
0338     if (!param)
0339         return -ENODEV;
0340 
0341     iopf_param = kzalloc(sizeof(*iopf_param), GFP_KERNEL);
0342     if (!iopf_param)
0343         return -ENOMEM;
0344 
0345     INIT_LIST_HEAD(&iopf_param->partial);
0346     iopf_param->queue = queue;
0347     iopf_param->dev = dev;
0348 
0349     mutex_lock(&queue->lock);
0350     mutex_lock(&param->lock);
0351     if (!param->iopf_param) {
0352         list_add(&iopf_param->queue_list, &queue->devices);
0353         param->iopf_param = iopf_param;
0354         ret = 0;
0355     }
0356     mutex_unlock(&param->lock);
0357     mutex_unlock(&queue->lock);
0358 
0359     if (ret)
0360         kfree(iopf_param);
0361 
0362     return ret;
0363 }
0364 EXPORT_SYMBOL_GPL(iopf_queue_add_device);
0365 
0366 /**
0367  * iopf_queue_remove_device - Remove producer from fault queue
0368  * @queue: IOPF queue
0369  * @dev: device to remove
0370  *
0371  * Caller makes sure that no more faults are reported for this device.
0372  *
0373  * Return: 0 on success and <0 on error.
0374  */
0375 int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
0376 {
0377     int ret = -EINVAL;
0378     struct iopf_fault *iopf, *next;
0379     struct iopf_device_param *iopf_param;
0380     struct dev_iommu *param = dev->iommu;
0381 
0382     if (!param || !queue)
0383         return -EINVAL;
0384 
0385     mutex_lock(&queue->lock);
0386     mutex_lock(&param->lock);
0387     iopf_param = param->iopf_param;
0388     if (iopf_param && iopf_param->queue == queue) {
0389         list_del(&iopf_param->queue_list);
0390         param->iopf_param = NULL;
0391         ret = 0;
0392     }
0393     mutex_unlock(&param->lock);
0394     mutex_unlock(&queue->lock);
0395     if (ret)
0396         return ret;
0397 
0398     /* Just in case some faults are still stuck */
0399     list_for_each_entry_safe(iopf, next, &iopf_param->partial, list)
0400         kfree(iopf);
0401 
0402     kfree(iopf_param);
0403 
0404     return 0;
0405 }
0406 EXPORT_SYMBOL_GPL(iopf_queue_remove_device);
0407 
0408 /**
0409  * iopf_queue_alloc - Allocate and initialize a fault queue
0410  * @name: a unique string identifying the queue (for workqueue)
0411  *
0412  * Return: the queue on success and NULL on error.
0413  */
0414 struct iopf_queue *iopf_queue_alloc(const char *name)
0415 {
0416     struct iopf_queue *queue;
0417 
0418     queue = kzalloc(sizeof(*queue), GFP_KERNEL);
0419     if (!queue)
0420         return NULL;
0421 
0422     /*
0423      * The WQ is unordered because the low-level handler enqueues faults by
0424      * group. PRI requests within a group have to be ordered, but once
0425      * that's dealt with, the high-level function can handle groups out of
0426      * order.
0427      */
0428     queue->wq = alloc_workqueue("iopf_queue/%s", WQ_UNBOUND, 0, name);
0429     if (!queue->wq) {
0430         kfree(queue);
0431         return NULL;
0432     }
0433 
0434     INIT_LIST_HEAD(&queue->devices);
0435     mutex_init(&queue->lock);
0436 
0437     return queue;
0438 }
0439 EXPORT_SYMBOL_GPL(iopf_queue_alloc);
0440 
0441 /**
0442  * iopf_queue_free - Free IOPF queue
0443  * @queue: queue to free
0444  *
0445  * Counterpart to iopf_queue_alloc(). The driver must not be queuing faults or
0446  * adding/removing devices on this queue anymore.
0447  */
0448 void iopf_queue_free(struct iopf_queue *queue)
0449 {
0450     struct iopf_device_param *iopf_param, *next;
0451 
0452     if (!queue)
0453         return;
0454 
0455     list_for_each_entry_safe(iopf_param, next, &queue->devices, queue_list)
0456         iopf_queue_remove_device(queue, iopf_param->dev);
0457 
0458     destroy_workqueue(queue->wq);
0459     kfree(queue);
0460 }
0461 EXPORT_SYMBOL_GPL(iopf_queue_free);