Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * AMD Passthru DMA device driver
0004  * -- Based on the CCP driver
0005  *
0006  * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
0007  *
0008  * Author: Sanjay R Mehta <sanju.mehta@amd.com>
0009  * Author: Gary R Hook <gary.hook@amd.com>
0010  */
0011 
0012 #include <linux/bitfield.h>
0013 #include <linux/dma-mapping.h>
0014 #include <linux/debugfs.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/kernel.h>
0017 #include <linux/module.h>
0018 #include <linux/pci.h>
0019 
0020 #include "ptdma.h"
0021 
0022 /* Human-readable error strings */
0023 static char *pt_error_codes[] = {
0024     "",
0025     "ERR 01: ILLEGAL_ENGINE",
0026     "ERR 03: ILLEGAL_FUNCTION_TYPE",
0027     "ERR 04: ILLEGAL_FUNCTION_MODE",
0028     "ERR 06: ILLEGAL_FUNCTION_SIZE",
0029     "ERR 08: ILLEGAL_FUNCTION_RSVD",
0030     "ERR 09: ILLEGAL_BUFFER_LENGTH",
0031     "ERR 10: VLSB_FAULT",
0032     "ERR 11: ILLEGAL_MEM_ADDR",
0033     "ERR 12: ILLEGAL_MEM_SEL",
0034     "ERR 13: ILLEGAL_CONTEXT_ID",
0035     "ERR 15: 0xF Reserved",
0036     "ERR 18: CMD_TIMEOUT",
0037     "ERR 19: IDMA0_AXI_SLVERR",
0038     "ERR 20: IDMA0_AXI_DECERR",
0039     "ERR 21: 0x15 Reserved",
0040     "ERR 22: IDMA1_AXI_SLAVE_FAULT",
0041     "ERR 23: IDMA1_AIXI_DECERR",
0042     "ERR 24: 0x18 Reserved",
0043     "ERR 27: 0x1B Reserved",
0044     "ERR 38: ODMA0_AXI_SLVERR",
0045     "ERR 39: ODMA0_AXI_DECERR",
0046     "ERR 40: 0x28 Reserved",
0047     "ERR 41: ODMA1_AXI_SLVERR",
0048     "ERR 42: ODMA1_AXI_DECERR",
0049     "ERR 43: LSB_PARITY_ERR",
0050 };
0051 
0052 static void pt_log_error(struct pt_device *d, int e)
0053 {
0054     dev_err(d->dev, "PTDMA error: %s (0x%x)\n", pt_error_codes[e], e);
0055 }
0056 
0057 void pt_start_queue(struct pt_cmd_queue *cmd_q)
0058 {
0059     /* Turn on the run bit */
0060     iowrite32(cmd_q->qcontrol | CMD_Q_RUN, cmd_q->reg_control);
0061 }
0062 
0063 void pt_stop_queue(struct pt_cmd_queue *cmd_q)
0064 {
0065     /* Turn off the run bit */
0066     iowrite32(cmd_q->qcontrol & ~CMD_Q_RUN, cmd_q->reg_control);
0067 }
0068 
0069 static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd_q)
0070 {
0071     bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
0072     u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
0073     u32 tail;
0074 
0075     if (soc) {
0076         desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
0077         desc->dw0 &= ~DWORD0_SOC;
0078     }
0079     mutex_lock(&cmd_q->q_mutex);
0080 
0081     /* Copy 32-byte command descriptor to hw queue. */
0082     memcpy(q_desc, desc, 32);
0083     cmd_q->qidx = (cmd_q->qidx + 1) % CMD_Q_LEN;
0084 
0085     /* The data used by this command must be flushed to memory */
0086     wmb();
0087 
0088     /* Write the new tail address back to the queue register */
0089     tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
0090     iowrite32(tail, cmd_q->reg_control + 0x0004);
0091 
0092     /* Turn the queue back on using our cached control register */
0093     pt_start_queue(cmd_q);
0094     mutex_unlock(&cmd_q->q_mutex);
0095 
0096     return 0;
0097 }
0098 
0099 int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
0100                  struct pt_passthru_engine *pt_engine)
0101 {
0102     struct ptdma_desc desc;
0103     struct pt_device *pt = container_of(cmd_q, struct pt_device, cmd_q);
0104 
0105     cmd_q->cmd_error = 0;
0106     cmd_q->total_pt_ops++;
0107     memset(&desc, 0, sizeof(desc));
0108     desc.dw0 = CMD_DESC_DW0_VAL;
0109     desc.length = pt_engine->src_len;
0110     desc.src_lo = lower_32_bits(pt_engine->src_dma);
0111     desc.dw3.src_hi = upper_32_bits(pt_engine->src_dma);
0112     desc.dst_lo = lower_32_bits(pt_engine->dst_dma);
0113     desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma);
0114 
0115     if (cmd_q->int_en)
0116         pt_core_enable_queue_interrupts(pt);
0117     else
0118         pt_core_disable_queue_interrupts(pt);
0119 
0120     return pt_core_execute_cmd(&desc, cmd_q);
0121 }
0122 
0123 static void pt_do_cmd_complete(unsigned long data)
0124 {
0125     struct pt_tasklet_data *tdata = (struct pt_tasklet_data *)data;
0126     struct pt_cmd *cmd = tdata->cmd;
0127     struct pt_cmd_queue *cmd_q = &cmd->pt->cmd_q;
0128     u32 tail;
0129 
0130     if (cmd_q->cmd_error) {
0131            /*
0132         * Log the error and flush the queue by
0133         * moving the head pointer
0134         */
0135         tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
0136         pt_log_error(cmd_q->pt, cmd_q->cmd_error);
0137         iowrite32(tail, cmd_q->reg_control + 0x0008);
0138     }
0139 
0140     cmd->pt_cmd_callback(cmd->data, cmd->ret);
0141 }
0142 
0143 void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q)
0144 {
0145     u32 status;
0146 
0147     status = ioread32(cmd_q->reg_control + 0x0010);
0148     if (status) {
0149         cmd_q->int_status = status;
0150         cmd_q->q_status = ioread32(cmd_q->reg_control + 0x0100);
0151         cmd_q->q_int_status = ioread32(cmd_q->reg_control + 0x0104);
0152 
0153         /* On error, only save the first error value */
0154         if ((status & INT_ERROR) && !cmd_q->cmd_error)
0155             cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
0156 
0157         /* Acknowledge the completion */
0158         iowrite32(status, cmd_q->reg_control + 0x0010);
0159         pt_do_cmd_complete((ulong)&pt->tdata);
0160     }
0161 }
0162 
0163 static irqreturn_t pt_core_irq_handler(int irq, void *data)
0164 {
0165     struct pt_device *pt = data;
0166     struct pt_cmd_queue *cmd_q = &pt->cmd_q;
0167 
0168     pt_core_disable_queue_interrupts(pt);
0169     pt->total_interrupts++;
0170     pt_check_status_trans(pt, cmd_q);
0171     pt_core_enable_queue_interrupts(pt);
0172     return IRQ_HANDLED;
0173 }
0174 
0175 int pt_core_init(struct pt_device *pt)
0176 {
0177     char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
0178     struct pt_cmd_queue *cmd_q = &pt->cmd_q;
0179     u32 dma_addr_lo, dma_addr_hi;
0180     struct device *dev = pt->dev;
0181     struct dma_pool *dma_pool;
0182     int ret;
0183 
0184     /* Allocate a dma pool for the queue */
0185     snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q", dev_name(pt->dev));
0186 
0187     dma_pool = dma_pool_create(dma_pool_name, dev,
0188                    PT_DMAPOOL_MAX_SIZE,
0189                    PT_DMAPOOL_ALIGN, 0);
0190     if (!dma_pool)
0191         return -ENOMEM;
0192 
0193     /* ptdma core initialisation */
0194     iowrite32(CMD_CONFIG_VHB_EN, pt->io_regs + CMD_CONFIG_OFFSET);
0195     iowrite32(CMD_QUEUE_PRIO, pt->io_regs + CMD_QUEUE_PRIO_OFFSET);
0196     iowrite32(CMD_TIMEOUT_DISABLE, pt->io_regs + CMD_TIMEOUT_OFFSET);
0197     iowrite32(CMD_CLK_GATE_CONFIG, pt->io_regs + CMD_CLK_GATE_CTL_OFFSET);
0198     iowrite32(CMD_CONFIG_REQID, pt->io_regs + CMD_REQID_CONFIG_OFFSET);
0199 
0200     cmd_q->pt = pt;
0201     cmd_q->dma_pool = dma_pool;
0202     mutex_init(&cmd_q->q_mutex);
0203 
0204     /* Page alignment satisfies our needs for N <= 128 */
0205     cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
0206     cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize,
0207                       &cmd_q->qbase_dma,
0208                       GFP_KERNEL);
0209     if (!cmd_q->qbase) {
0210         dev_err(dev, "unable to allocate command queue\n");
0211         ret = -ENOMEM;
0212         goto e_destroy_pool;
0213     }
0214 
0215     cmd_q->qidx = 0;
0216 
0217     /* Preset some register values */
0218     cmd_q->reg_control = pt->io_regs + CMD_Q_STATUS_INCR;
0219 
0220     /* Turn off the queues and disable interrupts until ready */
0221     pt_core_disable_queue_interrupts(pt);
0222 
0223     cmd_q->qcontrol = 0; /* Start with nothing */
0224     iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
0225 
0226     ioread32(cmd_q->reg_control + 0x0104);
0227     ioread32(cmd_q->reg_control + 0x0100);
0228 
0229     /* Clear the interrupt status */
0230     iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
0231 
0232     /* Request an irq */
0233     ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
0234     if (ret) {
0235         dev_err(dev, "unable to allocate an IRQ\n");
0236         goto e_free_dma;
0237     }
0238 
0239     /* Update the device registers with queue information. */
0240     cmd_q->qcontrol &= ~CMD_Q_SIZE;
0241     cmd_q->qcontrol |= FIELD_PREP(CMD_Q_SIZE, QUEUE_SIZE_VAL);
0242 
0243     cmd_q->qdma_tail = cmd_q->qbase_dma;
0244     dma_addr_lo = lower_32_bits(cmd_q->qdma_tail);
0245     iowrite32((u32)dma_addr_lo, cmd_q->reg_control + 0x0004);
0246     iowrite32((u32)dma_addr_lo, cmd_q->reg_control + 0x0008);
0247 
0248     dma_addr_hi = upper_32_bits(cmd_q->qdma_tail);
0249     cmd_q->qcontrol |= (dma_addr_hi << 16);
0250     iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
0251 
0252     pt_core_enable_queue_interrupts(pt);
0253 
0254     /* Register the DMA engine support */
0255     ret = pt_dmaengine_register(pt);
0256     if (ret)
0257         goto e_free_irq;
0258 
0259     /* Set up debugfs entries */
0260     ptdma_debugfs_setup(pt);
0261 
0262     return 0;
0263 
0264 e_free_irq:
0265     free_irq(pt->pt_irq, pt);
0266 
0267 e_free_dma:
0268     dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
0269 
0270 e_destroy_pool:
0271     dma_pool_destroy(pt->cmd_q.dma_pool);
0272 
0273     return ret;
0274 }
0275 
0276 void pt_core_destroy(struct pt_device *pt)
0277 {
0278     struct device *dev = pt->dev;
0279     struct pt_cmd_queue *cmd_q = &pt->cmd_q;
0280     struct pt_cmd *cmd;
0281 
0282     /* Unregister the DMA engine */
0283     pt_dmaengine_unregister(pt);
0284 
0285     /* Disable and clear interrupts */
0286     pt_core_disable_queue_interrupts(pt);
0287 
0288     /* Turn off the run bit */
0289     pt_stop_queue(cmd_q);
0290 
0291     /* Clear the interrupt status */
0292     iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
0293     ioread32(cmd_q->reg_control + 0x0104);
0294     ioread32(cmd_q->reg_control + 0x0100);
0295 
0296     free_irq(pt->pt_irq, pt);
0297 
0298     dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase,
0299               cmd_q->qbase_dma);
0300 
0301     /* Flush the cmd queue */
0302     while (!list_empty(&pt->cmd)) {
0303         /* Invoke the callback directly with an error code */
0304         cmd = list_first_entry(&pt->cmd, struct pt_cmd, entry);
0305         list_del(&cmd->entry);
0306         cmd->pt_cmd_callback(cmd->data, -ENODEV);
0307     }
0308 }