Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
0003 #include <linux/init.h>
0004 #include <linux/kernel.h>
0005 #include <linux/module.h>
0006 #include <linux/pci.h>
0007 #include <linux/io-64-nonatomic-lo-hi.h>
0008 #include <linux/dmaengine.h>
0009 #include <linux/irq.h>
0010 #include <linux/msi.h>
0011 #include <uapi/linux/idxd.h>
0012 #include "../dmaengine.h"
0013 #include "idxd.h"
0014 #include "registers.h"
0015 
0016 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
0017               u32 *status);
0018 static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
0019 static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
0020 
0021 /* Interrupt control bits */
0022 void idxd_unmask_error_interrupts(struct idxd_device *idxd)
0023 {
0024     union genctrl_reg genctrl;
0025 
0026     genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
0027     genctrl.softerr_int_en = 1;
0028     genctrl.halt_int_en = 1;
0029     iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
0030 }
0031 
0032 void idxd_mask_error_interrupts(struct idxd_device *idxd)
0033 {
0034     union genctrl_reg genctrl;
0035 
0036     genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
0037     genctrl.softerr_int_en = 0;
0038     genctrl.halt_int_en = 0;
0039     iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
0040 }
0041 
0042 static void free_hw_descs(struct idxd_wq *wq)
0043 {
0044     int i;
0045 
0046     for (i = 0; i < wq->num_descs; i++)
0047         kfree(wq->hw_descs[i]);
0048 
0049     kfree(wq->hw_descs);
0050 }
0051 
0052 static int alloc_hw_descs(struct idxd_wq *wq, int num)
0053 {
0054     struct device *dev = &wq->idxd->pdev->dev;
0055     int i;
0056     int node = dev_to_node(dev);
0057 
0058     wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
0059                     GFP_KERNEL, node);
0060     if (!wq->hw_descs)
0061         return -ENOMEM;
0062 
0063     for (i = 0; i < num; i++) {
0064         wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
0065                            GFP_KERNEL, node);
0066         if (!wq->hw_descs[i]) {
0067             free_hw_descs(wq);
0068             return -ENOMEM;
0069         }
0070     }
0071 
0072     return 0;
0073 }
0074 
0075 static void free_descs(struct idxd_wq *wq)
0076 {
0077     int i;
0078 
0079     for (i = 0; i < wq->num_descs; i++)
0080         kfree(wq->descs[i]);
0081 
0082     kfree(wq->descs);
0083 }
0084 
0085 static int alloc_descs(struct idxd_wq *wq, int num)
0086 {
0087     struct device *dev = &wq->idxd->pdev->dev;
0088     int i;
0089     int node = dev_to_node(dev);
0090 
0091     wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
0092                  GFP_KERNEL, node);
0093     if (!wq->descs)
0094         return -ENOMEM;
0095 
0096     for (i = 0; i < num; i++) {
0097         wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
0098                         GFP_KERNEL, node);
0099         if (!wq->descs[i]) {
0100             free_descs(wq);
0101             return -ENOMEM;
0102         }
0103     }
0104 
0105     return 0;
0106 }
0107 
0108 /* WQ control bits */
0109 int idxd_wq_alloc_resources(struct idxd_wq *wq)
0110 {
0111     struct idxd_device *idxd = wq->idxd;
0112     struct device *dev = &idxd->pdev->dev;
0113     int rc, num_descs, i;
0114 
0115     if (wq->type != IDXD_WQT_KERNEL)
0116         return 0;
0117 
0118     num_descs = wq_dedicated(wq) ? wq->size : wq->threshold;
0119     wq->num_descs = num_descs;
0120 
0121     rc = alloc_hw_descs(wq, num_descs);
0122     if (rc < 0)
0123         return rc;
0124 
0125     wq->compls_size = num_descs * idxd->data->compl_size;
0126     wq->compls = dma_alloc_coherent(dev, wq->compls_size, &wq->compls_addr, GFP_KERNEL);
0127     if (!wq->compls) {
0128         rc = -ENOMEM;
0129         goto fail_alloc_compls;
0130     }
0131 
0132     rc = alloc_descs(wq, num_descs);
0133     if (rc < 0)
0134         goto fail_alloc_descs;
0135 
0136     rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL,
0137                      dev_to_node(dev));
0138     if (rc < 0)
0139         goto fail_sbitmap_init;
0140 
0141     for (i = 0; i < num_descs; i++) {
0142         struct idxd_desc *desc = wq->descs[i];
0143 
0144         desc->hw = wq->hw_descs[i];
0145         if (idxd->data->type == IDXD_TYPE_DSA)
0146             desc->completion = &wq->compls[i];
0147         else if (idxd->data->type == IDXD_TYPE_IAX)
0148             desc->iax_completion = &wq->iax_compls[i];
0149         desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i;
0150         desc->id = i;
0151         desc->wq = wq;
0152         desc->cpu = -1;
0153     }
0154 
0155     return 0;
0156 
0157  fail_sbitmap_init:
0158     free_descs(wq);
0159  fail_alloc_descs:
0160     dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
0161  fail_alloc_compls:
0162     free_hw_descs(wq);
0163     return rc;
0164 }
0165 
0166 void idxd_wq_free_resources(struct idxd_wq *wq)
0167 {
0168     struct device *dev = &wq->idxd->pdev->dev;
0169 
0170     if (wq->type != IDXD_WQT_KERNEL)
0171         return;
0172 
0173     free_hw_descs(wq);
0174     free_descs(wq);
0175     dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
0176     sbitmap_queue_free(&wq->sbq);
0177 }
0178 
0179 int idxd_wq_enable(struct idxd_wq *wq)
0180 {
0181     struct idxd_device *idxd = wq->idxd;
0182     struct device *dev = &idxd->pdev->dev;
0183     u32 status;
0184 
0185     if (wq->state == IDXD_WQ_ENABLED) {
0186         dev_dbg(dev, "WQ %d already enabled\n", wq->id);
0187         return 0;
0188     }
0189 
0190     idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
0191 
0192     if (status != IDXD_CMDSTS_SUCCESS &&
0193         status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
0194         dev_dbg(dev, "WQ enable failed: %#x\n", status);
0195         return -ENXIO;
0196     }
0197 
0198     wq->state = IDXD_WQ_ENABLED;
0199     dev_dbg(dev, "WQ %d enabled\n", wq->id);
0200     return 0;
0201 }
0202 
0203 int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
0204 {
0205     struct idxd_device *idxd = wq->idxd;
0206     struct device *dev = &idxd->pdev->dev;
0207     u32 status, operand;
0208 
0209     dev_dbg(dev, "Disabling WQ %d\n", wq->id);
0210 
0211     if (wq->state != IDXD_WQ_ENABLED) {
0212         dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
0213         return 0;
0214     }
0215 
0216     operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
0217     idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
0218 
0219     if (status != IDXD_CMDSTS_SUCCESS) {
0220         dev_dbg(dev, "WQ disable failed: %#x\n", status);
0221         return -ENXIO;
0222     }
0223 
0224     if (reset_config)
0225         idxd_wq_disable_cleanup(wq);
0226     wq->state = IDXD_WQ_DISABLED;
0227     dev_dbg(dev, "WQ %d disabled\n", wq->id);
0228     return 0;
0229 }
0230 
0231 void idxd_wq_drain(struct idxd_wq *wq)
0232 {
0233     struct idxd_device *idxd = wq->idxd;
0234     struct device *dev = &idxd->pdev->dev;
0235     u32 operand;
0236 
0237     if (wq->state != IDXD_WQ_ENABLED) {
0238         dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
0239         return;
0240     }
0241 
0242     dev_dbg(dev, "Draining WQ %d\n", wq->id);
0243     operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
0244     idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
0245 }
0246 
0247 void idxd_wq_reset(struct idxd_wq *wq)
0248 {
0249     struct idxd_device *idxd = wq->idxd;
0250     struct device *dev = &idxd->pdev->dev;
0251     u32 operand;
0252 
0253     if (wq->state != IDXD_WQ_ENABLED) {
0254         dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
0255         return;
0256     }
0257 
0258     operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
0259     idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
0260     idxd_wq_disable_cleanup(wq);
0261     wq->state = IDXD_WQ_DISABLED;
0262 }
0263 
0264 int idxd_wq_map_portal(struct idxd_wq *wq)
0265 {
0266     struct idxd_device *idxd = wq->idxd;
0267     struct pci_dev *pdev = idxd->pdev;
0268     struct device *dev = &pdev->dev;
0269     resource_size_t start;
0270 
0271     start = pci_resource_start(pdev, IDXD_WQ_BAR);
0272     start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
0273 
0274     wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
0275     if (!wq->portal)
0276         return -ENOMEM;
0277 
0278     return 0;
0279 }
0280 
0281 void idxd_wq_unmap_portal(struct idxd_wq *wq)
0282 {
0283     struct device *dev = &wq->idxd->pdev->dev;
0284 
0285     devm_iounmap(dev, wq->portal);
0286     wq->portal = NULL;
0287     wq->portal_offset = 0;
0288 }
0289 
0290 void idxd_wqs_unmap_portal(struct idxd_device *idxd)
0291 {
0292     int i;
0293 
0294     for (i = 0; i < idxd->max_wqs; i++) {
0295         struct idxd_wq *wq = idxd->wqs[i];
0296 
0297         if (wq->portal)
0298             idxd_wq_unmap_portal(wq);
0299     }
0300 }
0301 
0302 static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv)
0303 {
0304     struct idxd_device *idxd = wq->idxd;
0305     union wqcfg wqcfg;
0306     unsigned int offset;
0307 
0308     offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX);
0309     spin_lock(&idxd->dev_lock);
0310     wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset);
0311     wqcfg.priv = priv;
0312     wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX];
0313     iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset);
0314     spin_unlock(&idxd->dev_lock);
0315 }
0316 
0317 static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
0318 {
0319     struct idxd_device *idxd = wq->idxd;
0320     union wqcfg wqcfg;
0321     unsigned int offset;
0322 
0323     offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
0324     spin_lock(&idxd->dev_lock);
0325     wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
0326     wqcfg.pasid_en = 1;
0327     wqcfg.pasid = pasid;
0328     wq->wqcfg->bits[WQCFG_PASID_IDX] = wqcfg.bits[WQCFG_PASID_IDX];
0329     iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
0330     spin_unlock(&idxd->dev_lock);
0331 }
0332 
0333 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
0334 {
0335     int rc;
0336 
0337     rc = idxd_wq_disable(wq, false);
0338     if (rc < 0)
0339         return rc;
0340 
0341     __idxd_wq_set_pasid_locked(wq, pasid);
0342 
0343     rc = idxd_wq_enable(wq);
0344     if (rc < 0)
0345         return rc;
0346 
0347     return 0;
0348 }
0349 
0350 int idxd_wq_disable_pasid(struct idxd_wq *wq)
0351 {
0352     struct idxd_device *idxd = wq->idxd;
0353     int rc;
0354     union wqcfg wqcfg;
0355     unsigned int offset;
0356 
0357     rc = idxd_wq_disable(wq, false);
0358     if (rc < 0)
0359         return rc;
0360 
0361     offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
0362     spin_lock(&idxd->dev_lock);
0363     wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
0364     wqcfg.pasid_en = 0;
0365     wqcfg.pasid = 0;
0366     iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
0367     spin_unlock(&idxd->dev_lock);
0368 
0369     rc = idxd_wq_enable(wq);
0370     if (rc < 0)
0371         return rc;
0372 
0373     return 0;
0374 }
0375 
0376 static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
0377 {
0378     struct idxd_device *idxd = wq->idxd;
0379 
0380     lockdep_assert_held(&wq->wq_lock);
0381     memset(wq->wqcfg, 0, idxd->wqcfg_size);
0382     wq->type = IDXD_WQT_NONE;
0383     wq->threshold = 0;
0384     wq->priority = 0;
0385     wq->ats_dis = 0;
0386     wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
0387     clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
0388     clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
0389     memset(wq->name, 0, WQ_NAME_SIZE);
0390     wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
0391     wq->max_batch_size = WQ_DEFAULT_MAX_BATCH;
0392 }
0393 
0394 static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
0395 {
0396     lockdep_assert_held(&wq->wq_lock);
0397 
0398     wq->size = 0;
0399     wq->group = NULL;
0400 }
0401 
0402 static void idxd_wq_ref_release(struct percpu_ref *ref)
0403 {
0404     struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active);
0405 
0406     complete(&wq->wq_dead);
0407 }
0408 
0409 int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
0410 {
0411     int rc;
0412 
0413     memset(&wq->wq_active, 0, sizeof(wq->wq_active));
0414     rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release,
0415                  PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
0416     if (rc < 0)
0417         return rc;
0418     reinit_completion(&wq->wq_dead);
0419     reinit_completion(&wq->wq_resurrect);
0420     return 0;
0421 }
0422 
0423 void __idxd_wq_quiesce(struct idxd_wq *wq)
0424 {
0425     lockdep_assert_held(&wq->wq_lock);
0426     reinit_completion(&wq->wq_resurrect);
0427     percpu_ref_kill(&wq->wq_active);
0428     complete_all(&wq->wq_resurrect);
0429     wait_for_completion(&wq->wq_dead);
0430 }
0431 
0432 void idxd_wq_quiesce(struct idxd_wq *wq)
0433 {
0434     mutex_lock(&wq->wq_lock);
0435     __idxd_wq_quiesce(wq);
0436     mutex_unlock(&wq->wq_lock);
0437 }
0438 
0439 /* Device control bits */
0440 static inline bool idxd_is_enabled(struct idxd_device *idxd)
0441 {
0442     union gensts_reg gensts;
0443 
0444     gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
0445 
0446     if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
0447         return true;
0448     return false;
0449 }
0450 
0451 static inline bool idxd_device_is_halted(struct idxd_device *idxd)
0452 {
0453     union gensts_reg gensts;
0454 
0455     gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
0456 
0457     return (gensts.state == IDXD_DEVICE_STATE_HALT);
0458 }
0459 
0460 /*
0461  * This is function is only used for reset during probe and will
0462  * poll for completion. Once the device is setup with interrupts,
0463  * all commands will be done via interrupt completion.
0464  */
0465 int idxd_device_init_reset(struct idxd_device *idxd)
0466 {
0467     struct device *dev = &idxd->pdev->dev;
0468     union idxd_command_reg cmd;
0469 
0470     if (idxd_device_is_halted(idxd)) {
0471         dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
0472         return -ENXIO;
0473     }
0474 
0475     memset(&cmd, 0, sizeof(cmd));
0476     cmd.cmd = IDXD_CMD_RESET_DEVICE;
0477     dev_dbg(dev, "%s: sending reset for init.\n", __func__);
0478     spin_lock(&idxd->cmd_lock);
0479     iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
0480 
0481     while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
0482            IDXD_CMDSTS_ACTIVE)
0483         cpu_relax();
0484     spin_unlock(&idxd->cmd_lock);
0485     return 0;
0486 }
0487 
0488 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
0489               u32 *status)
0490 {
0491     union idxd_command_reg cmd;
0492     DECLARE_COMPLETION_ONSTACK(done);
0493     u32 stat;
0494 
0495     if (idxd_device_is_halted(idxd)) {
0496         dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
0497         if (status)
0498             *status = IDXD_CMDSTS_HW_ERR;
0499         return;
0500     }
0501 
0502     memset(&cmd, 0, sizeof(cmd));
0503     cmd.cmd = cmd_code;
0504     cmd.operand = operand;
0505     cmd.int_req = 1;
0506 
0507     spin_lock(&idxd->cmd_lock);
0508     wait_event_lock_irq(idxd->cmd_waitq,
0509                 !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
0510                 idxd->cmd_lock);
0511 
0512     dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
0513         __func__, cmd_code, operand);
0514 
0515     idxd->cmd_status = 0;
0516     __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
0517     idxd->cmd_done = &done;
0518     iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
0519 
0520     /*
0521      * After command submitted, release lock and go to sleep until
0522      * the command completes via interrupt.
0523      */
0524     spin_unlock(&idxd->cmd_lock);
0525     wait_for_completion(&done);
0526     stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
0527     spin_lock(&idxd->cmd_lock);
0528     if (status)
0529         *status = stat;
0530     idxd->cmd_status = stat & GENMASK(7, 0);
0531 
0532     __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
0533     /* Wake up other pending commands */
0534     wake_up(&idxd->cmd_waitq);
0535     spin_unlock(&idxd->cmd_lock);
0536 }
0537 
0538 int idxd_device_enable(struct idxd_device *idxd)
0539 {
0540     struct device *dev = &idxd->pdev->dev;
0541     u32 status;
0542 
0543     if (idxd_is_enabled(idxd)) {
0544         dev_dbg(dev, "Device already enabled\n");
0545         return -ENXIO;
0546     }
0547 
0548     idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
0549 
0550     /* If the command is successful or if the device was enabled */
0551     if (status != IDXD_CMDSTS_SUCCESS &&
0552         status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
0553         dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
0554         return -ENXIO;
0555     }
0556 
0557     idxd->state = IDXD_DEV_ENABLED;
0558     return 0;
0559 }
0560 
0561 int idxd_device_disable(struct idxd_device *idxd)
0562 {
0563     struct device *dev = &idxd->pdev->dev;
0564     u32 status;
0565 
0566     if (!idxd_is_enabled(idxd)) {
0567         dev_dbg(dev, "Device is not enabled\n");
0568         return 0;
0569     }
0570 
0571     idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
0572 
0573     /* If the command is successful or if the device was disabled */
0574     if (status != IDXD_CMDSTS_SUCCESS &&
0575         !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
0576         dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
0577         return -ENXIO;
0578     }
0579 
0580     idxd_device_clear_state(idxd);
0581     return 0;
0582 }
0583 
0584 void idxd_device_reset(struct idxd_device *idxd)
0585 {
0586     idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
0587     idxd_device_clear_state(idxd);
0588     spin_lock(&idxd->dev_lock);
0589     idxd_unmask_error_interrupts(idxd);
0590     spin_unlock(&idxd->dev_lock);
0591 }
0592 
0593 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
0594 {
0595     struct device *dev = &idxd->pdev->dev;
0596     u32 operand;
0597 
0598     operand = pasid;
0599     dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand);
0600     idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL);
0601     dev_dbg(dev, "pasid %d drained\n", pasid);
0602 }
0603 
0604 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
0605                    enum idxd_interrupt_type irq_type)
0606 {
0607     struct device *dev = &idxd->pdev->dev;
0608     u32 operand, status;
0609 
0610     if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)))
0611         return -EOPNOTSUPP;
0612 
0613     dev_dbg(dev, "get int handle, idx %d\n", idx);
0614 
0615     operand = idx & GENMASK(15, 0);
0616     if (irq_type == IDXD_IRQ_IMS)
0617         operand |= CMD_INT_HANDLE_IMS;
0618 
0619     dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_REQUEST_INT_HANDLE, operand);
0620 
0621     idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status);
0622 
0623     if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
0624         dev_dbg(dev, "request int handle failed: %#x\n", status);
0625         return -ENXIO;
0626     }
0627 
0628     *handle = (status >> IDXD_CMDSTS_RES_SHIFT) & GENMASK(15, 0);
0629 
0630     dev_dbg(dev, "int handle acquired: %u\n", *handle);
0631     return 0;
0632 }
0633 
0634 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
0635                    enum idxd_interrupt_type irq_type)
0636 {
0637     struct device *dev = &idxd->pdev->dev;
0638     u32 operand, status;
0639     union idxd_command_reg cmd;
0640 
0641     if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)))
0642         return -EOPNOTSUPP;
0643 
0644     dev_dbg(dev, "release int handle, handle %d\n", handle);
0645 
0646     memset(&cmd, 0, sizeof(cmd));
0647     operand = handle & GENMASK(15, 0);
0648 
0649     if (irq_type == IDXD_IRQ_IMS)
0650         operand |= CMD_INT_HANDLE_IMS;
0651 
0652     cmd.cmd = IDXD_CMD_RELEASE_INT_HANDLE;
0653     cmd.operand = operand;
0654 
0655     dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand);
0656 
0657     spin_lock(&idxd->cmd_lock);
0658     iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
0659 
0660     while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE)
0661         cpu_relax();
0662     status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
0663     spin_unlock(&idxd->cmd_lock);
0664 
0665     if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
0666         dev_dbg(dev, "release int handle failed: %#x\n", status);
0667         return -ENXIO;
0668     }
0669 
0670     dev_dbg(dev, "int handle released.\n");
0671     return 0;
0672 }
0673 
0674 /* Device configuration bits */
0675 static void idxd_engines_clear_state(struct idxd_device *idxd)
0676 {
0677     struct idxd_engine *engine;
0678     int i;
0679 
0680     lockdep_assert_held(&idxd->dev_lock);
0681     for (i = 0; i < idxd->max_engines; i++) {
0682         engine = idxd->engines[i];
0683         engine->group = NULL;
0684     }
0685 }
0686 
0687 static void idxd_groups_clear_state(struct idxd_device *idxd)
0688 {
0689     struct idxd_group *group;
0690     int i;
0691 
0692     lockdep_assert_held(&idxd->dev_lock);
0693     for (i = 0; i < idxd->max_groups; i++) {
0694         group = idxd->groups[i];
0695         memset(&group->grpcfg, 0, sizeof(group->grpcfg));
0696         group->num_engines = 0;
0697         group->num_wqs = 0;
0698         group->use_rdbuf_limit = false;
0699         group->rdbufs_allowed = 0;
0700         group->rdbufs_reserved = 0;
0701         if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
0702             group->tc_a = 1;
0703             group->tc_b = 1;
0704         } else {
0705             group->tc_a = -1;
0706             group->tc_b = -1;
0707         }
0708     }
0709 }
0710 
0711 static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
0712 {
0713     int i;
0714 
0715     for (i = 0; i < idxd->max_wqs; i++) {
0716         struct idxd_wq *wq = idxd->wqs[i];
0717 
0718         mutex_lock(&wq->wq_lock);
0719         idxd_wq_disable_cleanup(wq);
0720         idxd_wq_device_reset_cleanup(wq);
0721         mutex_unlock(&wq->wq_lock);
0722     }
0723 }
0724 
0725 void idxd_device_clear_state(struct idxd_device *idxd)
0726 {
0727     if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
0728         return;
0729 
0730     idxd_device_wqs_clear_state(idxd);
0731     spin_lock(&idxd->dev_lock);
0732     idxd_groups_clear_state(idxd);
0733     idxd_engines_clear_state(idxd);
0734     idxd->state = IDXD_DEV_DISABLED;
0735     spin_unlock(&idxd->dev_lock);
0736 }
0737 
0738 static void idxd_group_config_write(struct idxd_group *group)
0739 {
0740     struct idxd_device *idxd = group->idxd;
0741     struct device *dev = &idxd->pdev->dev;
0742     int i;
0743     u32 grpcfg_offset;
0744 
0745     dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
0746 
0747     /* setup GRPWQCFG */
0748     for (i = 0; i < GRPWQCFG_STRIDES; i++) {
0749         grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
0750         iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset);
0751         dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
0752             group->id, i, grpcfg_offset,
0753             ioread64(idxd->reg_base + grpcfg_offset));
0754     }
0755 
0756     /* setup GRPENGCFG */
0757     grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
0758     iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
0759     dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
0760         grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
0761 
0762     /* setup GRPFLAGS */
0763     grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
0764     iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
0765     dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
0766         group->id, grpcfg_offset,
0767         ioread32(idxd->reg_base + grpcfg_offset));
0768 }
0769 
0770 static int idxd_groups_config_write(struct idxd_device *idxd)
0771 
0772 {
0773     union gencfg_reg reg;
0774     int i;
0775     struct device *dev = &idxd->pdev->dev;
0776 
0777     /* Setup bandwidth rdbuf limit */
0778     if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) {
0779         reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
0780         reg.rdbuf_limit = idxd->rdbuf_limit;
0781         iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
0782     }
0783 
0784     dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
0785         ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
0786 
0787     for (i = 0; i < idxd->max_groups; i++) {
0788         struct idxd_group *group = idxd->groups[i];
0789 
0790         idxd_group_config_write(group);
0791     }
0792 
0793     return 0;
0794 }
0795 
0796 static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd)
0797 {
0798     struct pci_dev *pdev = idxd->pdev;
0799 
0800     if (pdev->pasid_enabled && (pdev->pasid_features & PCI_PASID_CAP_PRIV))
0801         return true;
0802     return false;
0803 }
0804 
0805 static int idxd_wq_config_write(struct idxd_wq *wq)
0806 {
0807     struct idxd_device *idxd = wq->idxd;
0808     struct device *dev = &idxd->pdev->dev;
0809     u32 wq_offset;
0810     int i;
0811 
0812     if (!wq->group)
0813         return 0;
0814 
0815     /*
0816      * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
0817      * wq reset. This will copy back the sticky values that are present on some devices.
0818      */
0819     for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
0820         wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
0821         wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset);
0822     }
0823 
0824     if (wq->size == 0 && wq->type != IDXD_WQT_NONE)
0825         wq->size = WQ_DEFAULT_QUEUE_DEPTH;
0826 
0827     /* byte 0-3 */
0828     wq->wqcfg->wq_size = wq->size;
0829 
0830     /* bytes 4-7 */
0831     wq->wqcfg->wq_thresh = wq->threshold;
0832 
0833     /* byte 8-11 */
0834     if (wq_dedicated(wq))
0835         wq->wqcfg->mode = 1;
0836 
0837     /*
0838      * The WQ priv bit is set depending on the WQ type. priv = 1 if the
0839      * WQ type is kernel to indicate privileged access. This setting only
0840      * matters for dedicated WQ. According to the DSA spec:
0841      * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the
0842      * Privileged Mode Enable field of the PCI Express PASID capability
0843      * is 0, this field must be 0.
0844      *
0845      * In the case of a dedicated kernel WQ that is not able to support
0846      * the PASID cap, then the configuration will be rejected.
0847      */
0848     if (wq_dedicated(wq) && wq->wqcfg->pasid_en &&
0849         !idxd_device_pasid_priv_enabled(idxd) &&
0850         wq->type == IDXD_WQT_KERNEL) {
0851         idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV;
0852         return -EOPNOTSUPP;
0853     }
0854 
0855     wq->wqcfg->priority = wq->priority;
0856 
0857     if (idxd->hw.gen_cap.block_on_fault &&
0858         test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags))
0859         wq->wqcfg->bof = 1;
0860 
0861     if (idxd->hw.wq_cap.wq_ats_support)
0862         wq->wqcfg->wq_ats_disable = wq->ats_dis;
0863 
0864     /* bytes 12-15 */
0865     wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
0866     wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
0867 
0868     dev_dbg(dev, "WQ %d CFGs\n", wq->id);
0869     for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
0870         wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
0871         iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
0872         dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
0873             wq->id, i, wq_offset,
0874             ioread32(idxd->reg_base + wq_offset));
0875     }
0876 
0877     return 0;
0878 }
0879 
0880 static int idxd_wqs_config_write(struct idxd_device *idxd)
0881 {
0882     int i, rc;
0883 
0884     for (i = 0; i < idxd->max_wqs; i++) {
0885         struct idxd_wq *wq = idxd->wqs[i];
0886 
0887         rc = idxd_wq_config_write(wq);
0888         if (rc < 0)
0889             return rc;
0890     }
0891 
0892     return 0;
0893 }
0894 
0895 static void idxd_group_flags_setup(struct idxd_device *idxd)
0896 {
0897     int i;
0898 
0899     /* TC-A 0 and TC-B 1 should be defaults */
0900     for (i = 0; i < idxd->max_groups; i++) {
0901         struct idxd_group *group = idxd->groups[i];
0902 
0903         if (group->tc_a == -1)
0904             group->tc_a = group->grpcfg.flags.tc_a = 0;
0905         else
0906             group->grpcfg.flags.tc_a = group->tc_a;
0907         if (group->tc_b == -1)
0908             group->tc_b = group->grpcfg.flags.tc_b = 1;
0909         else
0910             group->grpcfg.flags.tc_b = group->tc_b;
0911         group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit;
0912         group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved;
0913         if (group->rdbufs_allowed)
0914             group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
0915         else
0916             group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs;
0917     }
0918 }
0919 
0920 static int idxd_engines_setup(struct idxd_device *idxd)
0921 {
0922     int i, engines = 0;
0923     struct idxd_engine *eng;
0924     struct idxd_group *group;
0925 
0926     for (i = 0; i < idxd->max_groups; i++) {
0927         group = idxd->groups[i];
0928         group->grpcfg.engines = 0;
0929     }
0930 
0931     for (i = 0; i < idxd->max_engines; i++) {
0932         eng = idxd->engines[i];
0933         group = eng->group;
0934 
0935         if (!group)
0936             continue;
0937 
0938         group->grpcfg.engines |= BIT(eng->id);
0939         engines++;
0940     }
0941 
0942     if (!engines)
0943         return -EINVAL;
0944 
0945     return 0;
0946 }
0947 
0948 static int idxd_wqs_setup(struct idxd_device *idxd)
0949 {
0950     struct idxd_wq *wq;
0951     struct idxd_group *group;
0952     int i, j, configured = 0;
0953     struct device *dev = &idxd->pdev->dev;
0954 
0955     for (i = 0; i < idxd->max_groups; i++) {
0956         group = idxd->groups[i];
0957         for (j = 0; j < 4; j++)
0958             group->grpcfg.wqs[j] = 0;
0959     }
0960 
0961     for (i = 0; i < idxd->max_wqs; i++) {
0962         wq = idxd->wqs[i];
0963         group = wq->group;
0964 
0965         if (!wq->group)
0966             continue;
0967 
0968         if (wq_shared(wq) && !wq_shared_supported(wq)) {
0969             idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
0970             dev_warn(dev, "No shared wq support but configured.\n");
0971             return -EINVAL;
0972         }
0973 
0974         group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
0975         configured++;
0976     }
0977 
0978     if (configured == 0) {
0979         idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED;
0980         return -EINVAL;
0981     }
0982 
0983     return 0;
0984 }
0985 
0986 int idxd_device_config(struct idxd_device *idxd)
0987 {
0988     int rc;
0989 
0990     lockdep_assert_held(&idxd->dev_lock);
0991     rc = idxd_wqs_setup(idxd);
0992     if (rc < 0)
0993         return rc;
0994 
0995     rc = idxd_engines_setup(idxd);
0996     if (rc < 0)
0997         return rc;
0998 
0999     idxd_group_flags_setup(idxd);
1000 
1001     rc = idxd_wqs_config_write(idxd);
1002     if (rc < 0)
1003         return rc;
1004 
1005     rc = idxd_groups_config_write(idxd);
1006     if (rc < 0)
1007         return rc;
1008 
1009     return 0;
1010 }
1011 
1012 static int idxd_wq_load_config(struct idxd_wq *wq)
1013 {
1014     struct idxd_device *idxd = wq->idxd;
1015     struct device *dev = &idxd->pdev->dev;
1016     int wqcfg_offset;
1017     int i;
1018 
1019     wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0);
1020     memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size);
1021 
1022     wq->size = wq->wqcfg->wq_size;
1023     wq->threshold = wq->wqcfg->wq_thresh;
1024 
1025     /* The driver does not support shared WQ mode in read-only config yet */
1026     if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en)
1027         return -EOPNOTSUPP;
1028 
1029     set_bit(WQ_FLAG_DEDICATED, &wq->flags);
1030 
1031     wq->priority = wq->wqcfg->priority;
1032 
1033     wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift;
1034     wq->max_batch_size = 1ULL << wq->wqcfg->max_batch_shift;
1035 
1036     for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
1037         wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
1038         dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]);
1039     }
1040 
1041     return 0;
1042 }
1043 
1044 static void idxd_group_load_config(struct idxd_group *group)
1045 {
1046     struct idxd_device *idxd = group->idxd;
1047     struct device *dev = &idxd->pdev->dev;
1048     int i, j, grpcfg_offset;
1049 
1050     /*
1051      * Load WQS bit fields
1052      * Iterate through all 256 bits 64 bits at a time
1053      */
1054     for (i = 0; i < GRPWQCFG_STRIDES; i++) {
1055         struct idxd_wq *wq;
1056 
1057         grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
1058         group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset);
1059         dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
1060             group->id, i, grpcfg_offset, group->grpcfg.wqs[i]);
1061 
1062         if (i * 64 >= idxd->max_wqs)
1063             break;
1064 
1065         /* Iterate through all 64 bits and check for wq set */
1066         for (j = 0; j < 64; j++) {
1067             int id = i * 64 + j;
1068 
1069             /* No need to check beyond max wqs */
1070             if (id >= idxd->max_wqs)
1071                 break;
1072 
1073             /* Set group assignment for wq if wq bit is set */
1074             if (group->grpcfg.wqs[i] & BIT(j)) {
1075                 wq = idxd->wqs[id];
1076                 wq->group = group;
1077             }
1078         }
1079     }
1080 
1081     grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
1082     group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset);
1083     dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
1084         grpcfg_offset, group->grpcfg.engines);
1085 
1086     /* Iterate through all 64 bits to check engines set */
1087     for (i = 0; i < 64; i++) {
1088         if (i >= idxd->max_engines)
1089             break;
1090 
1091         if (group->grpcfg.engines & BIT(i)) {
1092             struct idxd_engine *engine = idxd->engines[i];
1093 
1094             engine->group = group;
1095         }
1096     }
1097 
1098     grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
1099     group->grpcfg.flags.bits = ioread32(idxd->reg_base + grpcfg_offset);
1100     dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
1101         group->id, grpcfg_offset, group->grpcfg.flags.bits);
1102 }
1103 
1104 int idxd_device_load_config(struct idxd_device *idxd)
1105 {
1106     union gencfg_reg reg;
1107     int i, rc;
1108 
1109     reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
1110     idxd->rdbuf_limit = reg.rdbuf_limit;
1111 
1112     for (i = 0; i < idxd->max_groups; i++) {
1113         struct idxd_group *group = idxd->groups[i];
1114 
1115         idxd_group_load_config(group);
1116     }
1117 
1118     for (i = 0; i < idxd->max_wqs; i++) {
1119         struct idxd_wq *wq = idxd->wqs[i];
1120 
1121         rc = idxd_wq_load_config(wq);
1122         if (rc < 0)
1123             return rc;
1124     }
1125 
1126     return 0;
1127 }
1128 
1129 static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
1130 {
1131     struct idxd_desc *desc, *itr;
1132     struct llist_node *head;
1133     LIST_HEAD(flist);
1134     enum idxd_complete_type ctype;
1135 
1136     spin_lock(&ie->list_lock);
1137     head = llist_del_all(&ie->pending_llist);
1138     if (head) {
1139         llist_for_each_entry_safe(desc, itr, head, llnode)
1140             list_add_tail(&desc->list, &ie->work_list);
1141     }
1142 
1143     list_for_each_entry_safe(desc, itr, &ie->work_list, list)
1144         list_move_tail(&desc->list, &flist);
1145     spin_unlock(&ie->list_lock);
1146 
1147     list_for_each_entry_safe(desc, itr, &flist, list) {
1148         list_del(&desc->list);
1149         ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
1150         idxd_dma_complete_txd(desc, ctype, true);
1151     }
1152 }
1153 
1154 static void idxd_device_set_perm_entry(struct idxd_device *idxd,
1155                        struct idxd_irq_entry *ie)
1156 {
1157     union msix_perm mperm;
1158 
1159     if (ie->pasid == INVALID_IOASID)
1160         return;
1161 
1162     mperm.bits = 0;
1163     mperm.pasid = ie->pasid;
1164     mperm.pasid_en = 1;
1165     iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
1166 }
1167 
1168 static void idxd_device_clear_perm_entry(struct idxd_device *idxd,
1169                      struct idxd_irq_entry *ie)
1170 {
1171     iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
1172 }
1173 
1174 void idxd_wq_free_irq(struct idxd_wq *wq)
1175 {
1176     struct idxd_device *idxd = wq->idxd;
1177     struct idxd_irq_entry *ie = &wq->ie;
1178 
1179     if (wq->type != IDXD_WQT_KERNEL)
1180         return;
1181 
1182     free_irq(ie->vector, ie);
1183     idxd_flush_pending_descs(ie);
1184     if (idxd->request_int_handles)
1185         idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
1186     idxd_device_clear_perm_entry(idxd, ie);
1187     ie->vector = -1;
1188     ie->int_handle = INVALID_INT_HANDLE;
1189     ie->pasid = INVALID_IOASID;
1190 }
1191 
1192 int idxd_wq_request_irq(struct idxd_wq *wq)
1193 {
1194     struct idxd_device *idxd = wq->idxd;
1195     struct pci_dev *pdev = idxd->pdev;
1196     struct device *dev = &pdev->dev;
1197     struct idxd_irq_entry *ie;
1198     int rc;
1199 
1200     if (wq->type != IDXD_WQT_KERNEL)
1201         return 0;
1202 
1203     ie = &wq->ie;
1204     ie->vector = pci_irq_vector(pdev, ie->id);
1205     ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID;
1206     idxd_device_set_perm_entry(idxd, ie);
1207 
1208     rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie);
1209     if (rc < 0) {
1210         dev_err(dev, "Failed to request irq %d.\n", ie->vector);
1211         goto err_irq;
1212     }
1213 
1214     if (idxd->request_int_handles) {
1215         rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle,
1216                             IDXD_IRQ_MSIX);
1217         if (rc < 0)
1218             goto err_int_handle;
1219     } else {
1220         ie->int_handle = ie->id;
1221     }
1222 
1223     return 0;
1224 
1225 err_int_handle:
1226     ie->int_handle = INVALID_INT_HANDLE;
1227     free_irq(ie->vector, ie);
1228 err_irq:
1229     idxd_device_clear_perm_entry(idxd, ie);
1230     ie->pasid = INVALID_IOASID;
1231     return rc;
1232 }
1233 
1234 int drv_enable_wq(struct idxd_wq *wq)
1235 {
1236     struct idxd_device *idxd = wq->idxd;
1237     struct device *dev = &idxd->pdev->dev;
1238     int rc = -ENXIO;
1239 
1240     lockdep_assert_held(&wq->wq_lock);
1241 
1242     if (idxd->state != IDXD_DEV_ENABLED) {
1243         idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED;
1244         goto err;
1245     }
1246 
1247     if (wq->state != IDXD_WQ_DISABLED) {
1248         dev_dbg(dev, "wq %d already enabled.\n", wq->id);
1249         idxd->cmd_status = IDXD_SCMD_WQ_ENABLED;
1250         rc = -EBUSY;
1251         goto err;
1252     }
1253 
1254     if (!wq->group) {
1255         dev_dbg(dev, "wq %d not attached to group.\n", wq->id);
1256         idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP;
1257         goto err;
1258     }
1259 
1260     if (strlen(wq->name) == 0) {
1261         idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME;
1262         dev_dbg(dev, "wq %d name not set.\n", wq->id);
1263         goto err;
1264     }
1265 
1266     /* Shared WQ checks */
1267     if (wq_shared(wq)) {
1268         if (!wq_shared_supported(wq)) {
1269             idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
1270             dev_dbg(dev, "PASID not enabled and shared wq.\n");
1271             goto err;
1272         }
1273         /*
1274          * Shared wq with the threshold set to 0 means the user
1275          * did not set the threshold or transitioned from a
1276          * dedicated wq but did not set threshold. A value
1277          * of 0 would effectively disable the shared wq. The
1278          * driver does not allow a value of 0 to be set for
1279          * threshold via sysfs.
1280          */
1281         if (wq->threshold == 0) {
1282             idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH;
1283             dev_dbg(dev, "Shared wq and threshold 0.\n");
1284             goto err;
1285         }
1286     }
1287 
1288     /*
1289      * In the event that the WQ is configurable for pasid and priv bits.
1290      * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit.
1291      * However, for non-kernel wq, the driver should only set the pasid_en bit for
1292      * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and
1293      * pasid_en later on so there is no need to setup.
1294      */
1295     if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
1296         int priv = 0;
1297 
1298         if (wq_pasid_enabled(wq)) {
1299             if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
1300                 u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
1301 
1302                 __idxd_wq_set_pasid_locked(wq, pasid);
1303             }
1304         }
1305 
1306         if (is_idxd_wq_kernel(wq))
1307             priv = 1;
1308         __idxd_wq_set_priv_locked(wq, priv);
1309     }
1310 
1311     rc = 0;
1312     spin_lock(&idxd->dev_lock);
1313     if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1314         rc = idxd_device_config(idxd);
1315     spin_unlock(&idxd->dev_lock);
1316     if (rc < 0) {
1317         dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc);
1318         goto err;
1319     }
1320 
1321     rc = idxd_wq_enable(wq);
1322     if (rc < 0) {
1323         dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc);
1324         goto err;
1325     }
1326 
1327     rc = idxd_wq_map_portal(wq);
1328     if (rc < 0) {
1329         idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR;
1330         dev_dbg(dev, "wq %d portal mapping failed: %d\n", wq->id, rc);
1331         goto err_map_portal;
1332     }
1333 
1334     wq->client_count = 0;
1335 
1336     rc = idxd_wq_request_irq(wq);
1337     if (rc < 0) {
1338         idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
1339         dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
1340         goto err_irq;
1341     }
1342 
1343     rc = idxd_wq_alloc_resources(wq);
1344     if (rc < 0) {
1345         idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
1346         dev_dbg(dev, "WQ resource alloc failed\n");
1347         goto err_res_alloc;
1348     }
1349 
1350     rc = idxd_wq_init_percpu_ref(wq);
1351     if (rc < 0) {
1352         idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
1353         dev_dbg(dev, "percpu_ref setup failed\n");
1354         goto err_ref;
1355     }
1356 
1357     return 0;
1358 
1359 err_ref:
1360     idxd_wq_free_resources(wq);
1361 err_res_alloc:
1362     idxd_wq_free_irq(wq);
1363 err_irq:
1364     idxd_wq_unmap_portal(wq);
1365 err_map_portal:
1366     rc = idxd_wq_disable(wq, false);
1367     if (rc < 0)
1368         dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
1369 err:
1370     return rc;
1371 }
1372 
1373 void drv_disable_wq(struct idxd_wq *wq)
1374 {
1375     struct idxd_device *idxd = wq->idxd;
1376     struct device *dev = &idxd->pdev->dev;
1377 
1378     lockdep_assert_held(&wq->wq_lock);
1379 
1380     if (idxd_wq_refcount(wq))
1381         dev_warn(dev, "Clients has claim on wq %d: %d\n",
1382              wq->id, idxd_wq_refcount(wq));
1383 
1384     idxd_wq_free_resources(wq);
1385     idxd_wq_unmap_portal(wq);
1386     idxd_wq_drain(wq);
1387     idxd_wq_free_irq(wq);
1388     idxd_wq_reset(wq);
1389     percpu_ref_exit(&wq->wq_active);
1390     wq->type = IDXD_WQT_NONE;
1391     wq->client_count = 0;
1392 }
1393 
1394 int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
1395 {
1396     struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1397     int rc = 0;
1398 
1399     /*
1400      * Device should be in disabled state for the idxd_drv to load. If it's in
1401      * enabled state, then the device was altered outside of driver's control.
1402      * If the state is in halted state, then we don't want to proceed.
1403      */
1404     if (idxd->state != IDXD_DEV_DISABLED) {
1405         idxd->cmd_status = IDXD_SCMD_DEV_ENABLED;
1406         return -ENXIO;
1407     }
1408 
1409     /* Device configuration */
1410     spin_lock(&idxd->dev_lock);
1411     if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1412         rc = idxd_device_config(idxd);
1413     spin_unlock(&idxd->dev_lock);
1414     if (rc < 0)
1415         return -ENXIO;
1416 
1417     /* Start device */
1418     rc = idxd_device_enable(idxd);
1419     if (rc < 0)
1420         return rc;
1421 
1422     /* Setup DMA device without channels */
1423     rc = idxd_register_dma_device(idxd);
1424     if (rc < 0) {
1425         idxd_device_disable(idxd);
1426         idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
1427         return rc;
1428     }
1429 
1430     idxd->cmd_status = 0;
1431     return 0;
1432 }
1433 
1434 void idxd_device_drv_remove(struct idxd_dev *idxd_dev)
1435 {
1436     struct device *dev = &idxd_dev->conf_dev;
1437     struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1438     int i;
1439 
1440     for (i = 0; i < idxd->max_wqs; i++) {
1441         struct idxd_wq *wq = idxd->wqs[i];
1442         struct device *wq_dev = wq_confdev(wq);
1443 
1444         if (wq->state == IDXD_WQ_DISABLED)
1445             continue;
1446         dev_warn(dev, "Active wq %d on disable %s.\n", i, dev_name(wq_dev));
1447         device_release_driver(wq_dev);
1448     }
1449 
1450     idxd_unregister_dma_device(idxd);
1451     idxd_device_disable(idxd);
1452     if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1453         idxd_device_reset(idxd);
1454 }
1455 
1456 static enum idxd_dev_type dev_types[] = {
1457     IDXD_DEV_DSA,
1458     IDXD_DEV_IAX,
1459     IDXD_DEV_NONE,
1460 };
1461 
1462 struct idxd_device_driver idxd_drv = {
1463     .type = dev_types,
1464     .probe = idxd_device_drv_probe,
1465     .remove = idxd_device_drv_remove,
1466     .name = "idxd",
1467 };
1468 EXPORT_SYMBOL_GPL(idxd_drv);