Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* OMAP SSI port driver.
0003  *
0004  * Copyright (C) 2010 Nokia Corporation. All rights reserved.
0005  * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
0006  *
0007  * Contact: Carlos Chinea <carlos.chinea@nokia.com>
0008  */
0009 
0010 #include <linux/mod_devicetable.h>
0011 #include <linux/platform_device.h>
0012 #include <linux/dma-mapping.h>
0013 #include <linux/pm_runtime.h>
0014 #include <linux/delay.h>
0015 
0016 #include <linux/gpio/consumer.h>
0017 #include <linux/pinctrl/consumer.h>
0018 #include <linux/debugfs.h>
0019 
0020 #include "omap_ssi_regs.h"
0021 #include "omap_ssi.h"
0022 
0023 static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
0024 {
0025     return 0;
0026 }
0027 
0028 static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
0029 {
0030     return 0;
0031 }
0032 
0033 static inline unsigned int ssi_wakein(struct hsi_port *port)
0034 {
0035     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0036     return gpiod_get_value(omap_port->wake_gpio);
0037 }
0038 
0039 #ifdef CONFIG_DEBUG_FS
0040 static void ssi_debug_remove_port(struct hsi_port *port)
0041 {
0042     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0043 
0044     debugfs_remove_recursive(omap_port->dir);
0045 }
0046 
0047 static int ssi_port_regs_show(struct seq_file *m, void *p __maybe_unused)
0048 {
0049     struct hsi_port *port = m->private;
0050     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0051     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
0052     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
0053     void __iomem    *base = omap_ssi->sys;
0054     unsigned int ch;
0055 
0056     pm_runtime_get_sync(omap_port->pdev);
0057     if (omap_port->wake_irq > 0)
0058         seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
0059     seq_printf(m, "WAKE\t\t: 0x%08x\n",
0060                 readl(base + SSI_WAKE_REG(port->num)));
0061     seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
0062             readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
0063     seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
0064             readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
0065     /* SST */
0066     base = omap_port->sst_base;
0067     seq_puts(m, "\nSST\n===\n");
0068     seq_printf(m, "ID SST\t\t: 0x%08x\n",
0069                 readl(base + SSI_SST_ID_REG));
0070     seq_printf(m, "MODE\t\t: 0x%08x\n",
0071                 readl(base + SSI_SST_MODE_REG));
0072     seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
0073                 readl(base + SSI_SST_FRAMESIZE_REG));
0074     seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
0075                 readl(base + SSI_SST_DIVISOR_REG));
0076     seq_printf(m, "CHANNELS\t: 0x%08x\n",
0077                 readl(base + SSI_SST_CHANNELS_REG));
0078     seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
0079                 readl(base + SSI_SST_ARBMODE_REG));
0080     seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
0081                 readl(base + SSI_SST_TXSTATE_REG));
0082     seq_printf(m, "BUFSTATE\t: 0x%08x\n",
0083                 readl(base + SSI_SST_BUFSTATE_REG));
0084     seq_printf(m, "BREAK\t\t: 0x%08x\n",
0085                 readl(base + SSI_SST_BREAK_REG));
0086     for (ch = 0; ch < omap_port->channels; ch++) {
0087         seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
0088                 readl(base + SSI_SST_BUFFER_CH_REG(ch)));
0089     }
0090     /* SSR */
0091     base = omap_port->ssr_base;
0092     seq_puts(m, "\nSSR\n===\n");
0093     seq_printf(m, "ID SSR\t\t: 0x%08x\n",
0094                 readl(base + SSI_SSR_ID_REG));
0095     seq_printf(m, "MODE\t\t: 0x%08x\n",
0096                 readl(base + SSI_SSR_MODE_REG));
0097     seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
0098                 readl(base + SSI_SSR_FRAMESIZE_REG));
0099     seq_printf(m, "CHANNELS\t: 0x%08x\n",
0100                 readl(base + SSI_SSR_CHANNELS_REG));
0101     seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
0102                 readl(base + SSI_SSR_TIMEOUT_REG));
0103     seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
0104                 readl(base + SSI_SSR_RXSTATE_REG));
0105     seq_printf(m, "BUFSTATE\t: 0x%08x\n",
0106                 readl(base + SSI_SSR_BUFSTATE_REG));
0107     seq_printf(m, "BREAK\t\t: 0x%08x\n",
0108                 readl(base + SSI_SSR_BREAK_REG));
0109     seq_printf(m, "ERROR\t\t: 0x%08x\n",
0110                 readl(base + SSI_SSR_ERROR_REG));
0111     seq_printf(m, "ERRORACK\t: 0x%08x\n",
0112                 readl(base + SSI_SSR_ERRORACK_REG));
0113     for (ch = 0; ch < omap_port->channels; ch++) {
0114         seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
0115                 readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
0116     }
0117     pm_runtime_put_autosuspend(omap_port->pdev);
0118 
0119     return 0;
0120 }
0121 
0122 DEFINE_SHOW_ATTRIBUTE(ssi_port_regs);
0123 
0124 static int ssi_div_get(void *data, u64 *val)
0125 {
0126     struct hsi_port *port = data;
0127     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0128 
0129     pm_runtime_get_sync(omap_port->pdev);
0130     *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
0131     pm_runtime_put_autosuspend(omap_port->pdev);
0132 
0133     return 0;
0134 }
0135 
0136 static int ssi_div_set(void *data, u64 val)
0137 {
0138     struct hsi_port *port = data;
0139     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0140 
0141     if (val > 127)
0142         return -EINVAL;
0143 
0144     pm_runtime_get_sync(omap_port->pdev);
0145     writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
0146     omap_port->sst.divisor = val;
0147     pm_runtime_put_autosuspend(omap_port->pdev);
0148 
0149     return 0;
0150 }
0151 
0152 DEFINE_DEBUGFS_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
0153 
0154 static int ssi_debug_add_port(struct omap_ssi_port *omap_port,
0155                      struct dentry *dir)
0156 {
0157     struct hsi_port *port = to_hsi_port(omap_port->dev);
0158 
0159     dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
0160     if (!dir)
0161         return -ENOMEM;
0162     omap_port->dir = dir;
0163     debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
0164     dir = debugfs_create_dir("sst", dir);
0165     if (!dir)
0166         return -ENOMEM;
0167     debugfs_create_file_unsafe("divisor", 0644, dir, port,
0168                    &ssi_sst_div_fops);
0169 
0170     return 0;
0171 }
0172 #endif
0173 
0174 static void ssi_process_errqueue(struct work_struct *work)
0175 {
0176     struct omap_ssi_port *omap_port;
0177     struct list_head *head, *tmp;
0178     struct hsi_msg *msg;
0179 
0180     omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work);
0181 
0182     list_for_each_safe(head, tmp, &omap_port->errqueue) {
0183         msg = list_entry(head, struct hsi_msg, link);
0184         msg->complete(msg);
0185         list_del(head);
0186     }
0187 }
0188 
0189 static int ssi_claim_lch(struct hsi_msg *msg)
0190 {
0191 
0192     struct hsi_port *port = hsi_get_port(msg->cl);
0193     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
0194     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
0195     int lch;
0196 
0197     for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
0198         if (!omap_ssi->gdd_trn[lch].msg) {
0199             omap_ssi->gdd_trn[lch].msg = msg;
0200             omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
0201             return lch;
0202         }
0203 
0204     return -EBUSY;
0205 }
0206 
0207 static int ssi_start_dma(struct hsi_msg *msg, int lch)
0208 {
0209     struct hsi_port *port = hsi_get_port(msg->cl);
0210     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0211     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
0212     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
0213     void __iomem *gdd = omap_ssi->gdd;
0214     int err;
0215     u16 csdp;
0216     u16 ccr;
0217     u32 s_addr;
0218     u32 d_addr;
0219     u32 tmp;
0220 
0221     /* Hold clocks during the transfer */
0222     pm_runtime_get(omap_port->pdev);
0223 
0224     if (!pm_runtime_active(omap_port->pdev)) {
0225         dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n");
0226         pm_runtime_put_autosuspend(omap_port->pdev);
0227         return -EREMOTEIO;
0228     }
0229 
0230     if (msg->ttype == HSI_MSG_READ) {
0231         err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
0232                             DMA_FROM_DEVICE);
0233         if (err < 0) {
0234             dev_dbg(&ssi->device, "DMA map SG failed !\n");
0235             pm_runtime_put_autosuspend(omap_port->pdev);
0236             return err;
0237         }
0238         csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
0239             SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
0240             SSI_DATA_TYPE_S32;
0241         ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
0242         ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
0243             SSI_CCR_ENABLE;
0244         s_addr = omap_port->ssr_dma +
0245                     SSI_SSR_BUFFER_CH_REG(msg->channel);
0246         d_addr = sg_dma_address(msg->sgt.sgl);
0247     } else {
0248         err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
0249                             DMA_TO_DEVICE);
0250         if (err < 0) {
0251             dev_dbg(&ssi->device, "DMA map SG failed !\n");
0252             pm_runtime_put_autosuspend(omap_port->pdev);
0253             return err;
0254         }
0255         csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
0256             SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
0257             SSI_DATA_TYPE_S32;
0258         ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
0259         ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
0260             SSI_CCR_ENABLE;
0261         s_addr = sg_dma_address(msg->sgt.sgl);
0262         d_addr = omap_port->sst_dma +
0263                     SSI_SST_BUFFER_CH_REG(msg->channel);
0264     }
0265     dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
0266         lch, csdp, ccr, s_addr, d_addr);
0267 
0268     writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch));
0269     writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
0270     writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
0271     writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
0272     writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
0273                         gdd + SSI_GDD_CEN_REG(lch));
0274 
0275     spin_lock_bh(&omap_ssi->lock);
0276     tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
0277     tmp |= SSI_GDD_LCH(lch);
0278     writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
0279     spin_unlock_bh(&omap_ssi->lock);
0280     writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
0281     msg->status = HSI_STATUS_PROCEEDING;
0282 
0283     return 0;
0284 }
0285 
0286 static int ssi_start_pio(struct hsi_msg *msg)
0287 {
0288     struct hsi_port *port = hsi_get_port(msg->cl);
0289     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0290     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
0291     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
0292     u32 val;
0293 
0294     pm_runtime_get(omap_port->pdev);
0295 
0296     if (!pm_runtime_active(omap_port->pdev)) {
0297         dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n");
0298         pm_runtime_put_autosuspend(omap_port->pdev);
0299         return -EREMOTEIO;
0300     }
0301 
0302     if (msg->ttype == HSI_MSG_WRITE) {
0303         val = SSI_DATAACCEPT(msg->channel);
0304         /* Hold clocks for pio writes */
0305         pm_runtime_get(omap_port->pdev);
0306     } else {
0307         val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
0308     }
0309     dev_dbg(&port->device, "Single %s transfer\n",
0310                         msg->ttype ? "write" : "read");
0311     val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
0312     writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
0313     pm_runtime_put_autosuspend(omap_port->pdev);
0314     msg->actual_len = 0;
0315     msg->status = HSI_STATUS_PROCEEDING;
0316 
0317     return 0;
0318 }
0319 
0320 static int ssi_start_transfer(struct list_head *queue)
0321 {
0322     struct hsi_msg *msg;
0323     int lch = -1;
0324 
0325     if (list_empty(queue))
0326         return 0;
0327     msg = list_first_entry(queue, struct hsi_msg, link);
0328     if (msg->status != HSI_STATUS_QUEUED)
0329         return 0;
0330     if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
0331         lch = ssi_claim_lch(msg);
0332     if (lch >= 0)
0333         return ssi_start_dma(msg, lch);
0334     else
0335         return ssi_start_pio(msg);
0336 }
0337 
0338 static int ssi_async_break(struct hsi_msg *msg)
0339 {
0340     struct hsi_port *port = hsi_get_port(msg->cl);
0341     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0342     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
0343     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
0344     int err = 0;
0345     u32 tmp;
0346 
0347     pm_runtime_get_sync(omap_port->pdev);
0348     if (msg->ttype == HSI_MSG_WRITE) {
0349         if (omap_port->sst.mode != SSI_MODE_FRAME) {
0350             err = -EINVAL;
0351             goto out;
0352         }
0353         writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
0354         msg->status = HSI_STATUS_COMPLETED;
0355         msg->complete(msg);
0356     } else {
0357         if (omap_port->ssr.mode != SSI_MODE_FRAME) {
0358             err = -EINVAL;
0359             goto out;
0360         }
0361         spin_lock_bh(&omap_port->lock);
0362         tmp = readl(omap_ssi->sys +
0363                     SSI_MPU_ENABLE_REG(port->num, 0));
0364         writel(tmp | SSI_BREAKDETECTED,
0365             omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
0366         msg->status = HSI_STATUS_PROCEEDING;
0367         list_add_tail(&msg->link, &omap_port->brkqueue);
0368         spin_unlock_bh(&omap_port->lock);
0369     }
0370 out:
0371     pm_runtime_mark_last_busy(omap_port->pdev);
0372     pm_runtime_put_autosuspend(omap_port->pdev);
0373 
0374     return err;
0375 }
0376 
0377 static int ssi_async(struct hsi_msg *msg)
0378 {
0379     struct hsi_port *port = hsi_get_port(msg->cl);
0380     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0381     struct list_head *queue;
0382     int err = 0;
0383 
0384     BUG_ON(!msg);
0385 
0386     if (msg->sgt.nents > 1)
0387         return -ENOSYS; /* TODO: Add sg support */
0388 
0389     if (msg->break_frame)
0390         return ssi_async_break(msg);
0391 
0392     if (msg->ttype) {
0393         BUG_ON(msg->channel >= omap_port->sst.channels);
0394         queue = &omap_port->txqueue[msg->channel];
0395     } else {
0396         BUG_ON(msg->channel >= omap_port->ssr.channels);
0397         queue = &omap_port->rxqueue[msg->channel];
0398     }
0399     msg->status = HSI_STATUS_QUEUED;
0400 
0401     pm_runtime_get_sync(omap_port->pdev);
0402     spin_lock_bh(&omap_port->lock);
0403     list_add_tail(&msg->link, queue);
0404     err = ssi_start_transfer(queue);
0405     if (err < 0) {
0406         list_del(&msg->link);
0407         msg->status = HSI_STATUS_ERROR;
0408     }
0409     spin_unlock_bh(&omap_port->lock);
0410     pm_runtime_mark_last_busy(omap_port->pdev);
0411     pm_runtime_put_autosuspend(omap_port->pdev);
0412     dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
0413                 msg->status, msg->ttype, msg->channel);
0414 
0415     return err;
0416 }
0417 
0418 static u32 ssi_calculate_div(struct hsi_controller *ssi)
0419 {
0420     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
0421     u32 tx_fckrate = (u32) omap_ssi->fck_rate;
0422 
0423     /* / 2 : SSI TX clock is always half of the SSI functional clock */
0424     tx_fckrate >>= 1;
0425     /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
0426     tx_fckrate--;
0427     dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
0428         tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate,
0429         omap_ssi->max_speed);
0430 
0431     return tx_fckrate / omap_ssi->max_speed;
0432 }
0433 
0434 static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
0435 {
0436     struct list_head *node, *tmp;
0437     struct hsi_msg *msg;
0438 
0439     list_for_each_safe(node, tmp, queue) {
0440         msg = list_entry(node, struct hsi_msg, link);
0441         if ((cl) && (cl != msg->cl))
0442             continue;
0443         list_del(node);
0444         pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
0445             msg->channel, msg, msg->sgt.sgl->length,
0446                     msg->ttype, msg->context);
0447         if (msg->destructor)
0448             msg->destructor(msg);
0449         else
0450             hsi_free_msg(msg);
0451     }
0452 }
0453 
0454 static int ssi_setup(struct hsi_client *cl)
0455 {
0456     struct hsi_port *port = to_hsi_port(cl->device.parent);
0457     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0458     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
0459     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
0460     void __iomem *sst = omap_port->sst_base;
0461     void __iomem *ssr = omap_port->ssr_base;
0462     u32 div;
0463     u32 val;
0464     int err = 0;
0465 
0466     pm_runtime_get_sync(omap_port->pdev);
0467     spin_lock_bh(&omap_port->lock);
0468     if (cl->tx_cfg.speed)
0469         omap_ssi->max_speed = cl->tx_cfg.speed;
0470     div = ssi_calculate_div(ssi);
0471     if (div > SSI_MAX_DIVISOR) {
0472         dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
0473                         cl->tx_cfg.speed, div);
0474         err = -EINVAL;
0475         goto out;
0476     }
0477     /* Set TX/RX module to sleep to stop TX/RX during cfg update */
0478     writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
0479     writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
0480     /* Flush posted write */
0481     val = readl(ssr + SSI_SSR_MODE_REG);
0482     /* TX */
0483     writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG);
0484     writel_relaxed(div, sst + SSI_SST_DIVISOR_REG);
0485     writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG);
0486     writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
0487     writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
0488     /* RX */
0489     writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG);
0490     writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG);
0491     writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG);
0492     /* Cleanup the break queue if we leave FRAME mode */
0493     if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
0494         (cl->rx_cfg.mode != SSI_MODE_FRAME))
0495         ssi_flush_queue(&omap_port->brkqueue, cl);
0496     writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
0497     omap_port->channels = max(cl->rx_cfg.num_hw_channels,
0498                   cl->tx_cfg.num_hw_channels);
0499     /* Shadow registering for OFF mode */
0500     /* SST */
0501     omap_port->sst.divisor = div;
0502     omap_port->sst.frame_size = 31;
0503     omap_port->sst.channels = cl->tx_cfg.num_hw_channels;
0504     omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
0505     omap_port->sst.mode = cl->tx_cfg.mode;
0506     /* SSR */
0507     omap_port->ssr.frame_size = 31;
0508     omap_port->ssr.timeout = 0;
0509     omap_port->ssr.channels = cl->rx_cfg.num_hw_channels;
0510     omap_port->ssr.mode = cl->rx_cfg.mode;
0511 out:
0512     spin_unlock_bh(&omap_port->lock);
0513     pm_runtime_mark_last_busy(omap_port->pdev);
0514     pm_runtime_put_autosuspend(omap_port->pdev);
0515 
0516     return err;
0517 }
0518 
0519 static int ssi_flush(struct hsi_client *cl)
0520 {
0521     struct hsi_port *port = hsi_get_port(cl);
0522     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0523     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
0524     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
0525     struct hsi_msg *msg;
0526     void __iomem *sst = omap_port->sst_base;
0527     void __iomem *ssr = omap_port->ssr_base;
0528     unsigned int i;
0529     u32 err;
0530 
0531     pm_runtime_get_sync(omap_port->pdev);
0532     spin_lock_bh(&omap_port->lock);
0533 
0534     /* stop all ssi communication */
0535     pinctrl_pm_select_idle_state(omap_port->pdev);
0536     udelay(1); /* wait for racing frames */
0537 
0538     /* Stop all DMA transfers */
0539     for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
0540         msg = omap_ssi->gdd_trn[i].msg;
0541         if (!msg || (port != hsi_get_port(msg->cl)))
0542             continue;
0543         writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
0544         if (msg->ttype == HSI_MSG_READ)
0545             pm_runtime_put_autosuspend(omap_port->pdev);
0546         omap_ssi->gdd_trn[i].msg = NULL;
0547     }
0548     /* Flush all SST buffers */
0549     writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG);
0550     writel_relaxed(0, sst + SSI_SST_TXSTATE_REG);
0551     /* Flush all SSR buffers */
0552     writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG);
0553     writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG);
0554     /* Flush all errors */
0555     err = readl(ssr + SSI_SSR_ERROR_REG);
0556     writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG);
0557     /* Flush break */
0558     writel_relaxed(0, ssr + SSI_SSR_BREAK_REG);
0559     /* Clear interrupts */
0560     writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
0561     writel_relaxed(0xffffff00,
0562             omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
0563     writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
0564     writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
0565     /* Dequeue all pending requests */
0566     for (i = 0; i < omap_port->channels; i++) {
0567         /* Release write clocks */
0568         if (!list_empty(&omap_port->txqueue[i]))
0569             pm_runtime_put_autosuspend(omap_port->pdev);
0570         ssi_flush_queue(&omap_port->txqueue[i], NULL);
0571         ssi_flush_queue(&omap_port->rxqueue[i], NULL);
0572     }
0573     ssi_flush_queue(&omap_port->brkqueue, NULL);
0574 
0575     /* Resume SSI communication */
0576     pinctrl_pm_select_default_state(omap_port->pdev);
0577 
0578     spin_unlock_bh(&omap_port->lock);
0579     pm_runtime_mark_last_busy(omap_port->pdev);
0580     pm_runtime_put_autosuspend(omap_port->pdev);
0581 
0582     return 0;
0583 }
0584 
0585 static void start_tx_work(struct work_struct *work)
0586 {
0587     struct omap_ssi_port *omap_port =
0588                 container_of(work, struct omap_ssi_port, work);
0589     struct hsi_port *port = to_hsi_port(omap_port->dev);
0590     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
0591     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
0592 
0593     pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
0594     writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
0595 }
0596 
0597 static int ssi_start_tx(struct hsi_client *cl)
0598 {
0599     struct hsi_port *port = hsi_get_port(cl);
0600     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0601 
0602     dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
0603 
0604     spin_lock_bh(&omap_port->wk_lock);
0605     if (omap_port->wk_refcount++) {
0606         spin_unlock_bh(&omap_port->wk_lock);
0607         return 0;
0608     }
0609     spin_unlock_bh(&omap_port->wk_lock);
0610 
0611     schedule_work(&omap_port->work);
0612 
0613     return 0;
0614 }
0615 
0616 static int ssi_stop_tx(struct hsi_client *cl)
0617 {
0618     struct hsi_port *port = hsi_get_port(cl);
0619     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0620     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
0621     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
0622 
0623     dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
0624 
0625     spin_lock_bh(&omap_port->wk_lock);
0626     BUG_ON(!omap_port->wk_refcount);
0627     if (--omap_port->wk_refcount) {
0628         spin_unlock_bh(&omap_port->wk_lock);
0629         return 0;
0630     }
0631     writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
0632     spin_unlock_bh(&omap_port->wk_lock);
0633 
0634     pm_runtime_mark_last_busy(omap_port->pdev);
0635     pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */
0636 
0637 
0638     return 0;
0639 }
0640 
0641 static void ssi_transfer(struct omap_ssi_port *omap_port,
0642                             struct list_head *queue)
0643 {
0644     struct hsi_msg *msg;
0645     int err = -1;
0646 
0647     pm_runtime_get(omap_port->pdev);
0648     spin_lock_bh(&omap_port->lock);
0649     while (err < 0) {
0650         err = ssi_start_transfer(queue);
0651         if (err < 0) {
0652             msg = list_first_entry(queue, struct hsi_msg, link);
0653             msg->status = HSI_STATUS_ERROR;
0654             msg->actual_len = 0;
0655             list_del(&msg->link);
0656             spin_unlock_bh(&omap_port->lock);
0657             msg->complete(msg);
0658             spin_lock_bh(&omap_port->lock);
0659         }
0660     }
0661     spin_unlock_bh(&omap_port->lock);
0662     pm_runtime_mark_last_busy(omap_port->pdev);
0663     pm_runtime_put_autosuspend(omap_port->pdev);
0664 }
0665 
0666 static void ssi_cleanup_queues(struct hsi_client *cl)
0667 {
0668     struct hsi_port *port = hsi_get_port(cl);
0669     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0670     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
0671     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
0672     struct hsi_msg *msg;
0673     unsigned int i;
0674     u32 rxbufstate = 0;
0675     u32 txbufstate = 0;
0676     u32 status = SSI_ERROROCCURED;
0677     u32 tmp;
0678 
0679     ssi_flush_queue(&omap_port->brkqueue, cl);
0680     if (list_empty(&omap_port->brkqueue))
0681         status |= SSI_BREAKDETECTED;
0682 
0683     for (i = 0; i < omap_port->channels; i++) {
0684         if (list_empty(&omap_port->txqueue[i]))
0685             continue;
0686         msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
0687                                     link);
0688         if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
0689             txbufstate |= (1 << i);
0690             status |= SSI_DATAACCEPT(i);
0691             /* Release the clocks writes, also GDD ones */
0692             pm_runtime_mark_last_busy(omap_port->pdev);
0693             pm_runtime_put_autosuspend(omap_port->pdev);
0694         }
0695         ssi_flush_queue(&omap_port->txqueue[i], cl);
0696     }
0697     for (i = 0; i < omap_port->channels; i++) {
0698         if (list_empty(&omap_port->rxqueue[i]))
0699             continue;
0700         msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
0701                                     link);
0702         if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
0703             rxbufstate |= (1 << i);
0704             status |= SSI_DATAAVAILABLE(i);
0705         }
0706         ssi_flush_queue(&omap_port->rxqueue[i], cl);
0707         /* Check if we keep the error detection interrupt armed */
0708         if (!list_empty(&omap_port->rxqueue[i]))
0709             status &= ~SSI_ERROROCCURED;
0710     }
0711     /* Cleanup write buffers */
0712     tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
0713     tmp &= ~txbufstate;
0714     writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
0715     /* Cleanup read buffers */
0716     tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
0717     tmp &= ~rxbufstate;
0718     writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
0719     /* Disarm and ack pending interrupts */
0720     tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
0721     tmp &= ~status;
0722     writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
0723     writel_relaxed(status, omap_ssi->sys +
0724         SSI_MPU_STATUS_REG(port->num, 0));
0725 }
0726 
0727 static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
0728 {
0729     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
0730     struct hsi_port *port = hsi_get_port(cl);
0731     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0732     struct hsi_msg *msg;
0733     unsigned int i;
0734     u32 val = 0;
0735     u32 tmp;
0736 
0737     for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
0738         msg = omap_ssi->gdd_trn[i].msg;
0739         if ((!msg) || (msg->cl != cl))
0740             continue;
0741         writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
0742         val |= (1 << i);
0743         /*
0744          * Clock references for write will be handled in
0745          * ssi_cleanup_queues
0746          */
0747         if (msg->ttype == HSI_MSG_READ) {
0748             pm_runtime_mark_last_busy(omap_port->pdev);
0749             pm_runtime_put_autosuspend(omap_port->pdev);
0750         }
0751         omap_ssi->gdd_trn[i].msg = NULL;
0752     }
0753     tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
0754     tmp &= ~val;
0755     writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
0756     writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
0757 }
0758 
0759 static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode)
0760 {
0761     writel(mode, omap_port->sst_base + SSI_SST_MODE_REG);
0762     writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
0763     /* OCP barrier */
0764     mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
0765 
0766     return 0;
0767 }
0768 
0769 static int ssi_release(struct hsi_client *cl)
0770 {
0771     struct hsi_port *port = hsi_get_port(cl);
0772     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0773     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
0774 
0775     pm_runtime_get_sync(omap_port->pdev);
0776     spin_lock_bh(&omap_port->lock);
0777     /* Stop all the pending DMA requests for that client */
0778     ssi_cleanup_gdd(ssi, cl);
0779     /* Now cleanup all the queues */
0780     ssi_cleanup_queues(cl);
0781     /* If it is the last client of the port, do extra checks and cleanup */
0782     if (port->claimed <= 1) {
0783         /*
0784          * Drop the clock reference for the incoming wake line
0785          * if it is still kept high by the other side.
0786          */
0787         if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags))
0788             pm_runtime_put_sync(omap_port->pdev);
0789         pm_runtime_get(omap_port->pdev);
0790         /* Stop any SSI TX/RX without a client */
0791         ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
0792         omap_port->sst.mode = SSI_MODE_SLEEP;
0793         omap_port->ssr.mode = SSI_MODE_SLEEP;
0794         pm_runtime_put(omap_port->pdev);
0795         WARN_ON(omap_port->wk_refcount != 0);
0796     }
0797     spin_unlock_bh(&omap_port->lock);
0798     pm_runtime_put_sync(omap_port->pdev);
0799 
0800     return 0;
0801 }
0802 
0803 
0804 
0805 static void ssi_error(struct hsi_port *port)
0806 {
0807     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0808     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
0809     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
0810     struct hsi_msg *msg;
0811     unsigned int i;
0812     u32 err;
0813     u32 val;
0814     u32 tmp;
0815 
0816     /* ACK error */
0817     err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
0818     dev_err(&port->device, "SSI error: 0x%02x\n", err);
0819     if (!err) {
0820         dev_dbg(&port->device, "spurious SSI error ignored!\n");
0821         return;
0822     }
0823     spin_lock(&omap_ssi->lock);
0824     /* Cancel all GDD read transfers */
0825     for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
0826         msg = omap_ssi->gdd_trn[i].msg;
0827         if ((msg) && (msg->ttype == HSI_MSG_READ)) {
0828             writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
0829             val |= (1 << i);
0830             omap_ssi->gdd_trn[i].msg = NULL;
0831         }
0832     }
0833     tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
0834     tmp &= ~val;
0835     writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
0836     spin_unlock(&omap_ssi->lock);
0837     /* Cancel all PIO read transfers */
0838     spin_lock(&omap_port->lock);
0839     tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
0840     tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
0841     writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
0842     /* ACK error */
0843     writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
0844     writel_relaxed(SSI_ERROROCCURED,
0845             omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
0846     /* Signal the error all current pending read requests */
0847     for (i = 0; i < omap_port->channels; i++) {
0848         if (list_empty(&omap_port->rxqueue[i]))
0849             continue;
0850         msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
0851                                     link);
0852         list_del(&msg->link);
0853         msg->status = HSI_STATUS_ERROR;
0854         spin_unlock(&omap_port->lock);
0855         msg->complete(msg);
0856         /* Now restart queued reads if any */
0857         ssi_transfer(omap_port, &omap_port->rxqueue[i]);
0858         spin_lock(&omap_port->lock);
0859     }
0860     spin_unlock(&omap_port->lock);
0861 }
0862 
0863 static void ssi_break_complete(struct hsi_port *port)
0864 {
0865     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0866     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
0867     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
0868     struct hsi_msg *msg;
0869     struct hsi_msg *tmp;
0870     u32 val;
0871 
0872     dev_dbg(&port->device, "HWBREAK received\n");
0873 
0874     spin_lock(&omap_port->lock);
0875     val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
0876     val &= ~SSI_BREAKDETECTED;
0877     writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
0878     writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
0879     writel(SSI_BREAKDETECTED,
0880             omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
0881     spin_unlock(&omap_port->lock);
0882 
0883     list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
0884         msg->status = HSI_STATUS_COMPLETED;
0885         spin_lock(&omap_port->lock);
0886         list_del(&msg->link);
0887         spin_unlock(&omap_port->lock);
0888         msg->complete(msg);
0889     }
0890 
0891 }
0892 
0893 static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
0894 {
0895     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
0896     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
0897     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0898     struct hsi_msg *msg;
0899     u32 *buf;
0900     u32 reg;
0901     u32 val;
0902 
0903     spin_lock_bh(&omap_port->lock);
0904     msg = list_first_entry(queue, struct hsi_msg, link);
0905     if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
0906         msg->actual_len = 0;
0907         msg->status = HSI_STATUS_PENDING;
0908     }
0909     if (msg->ttype == HSI_MSG_WRITE)
0910         val = SSI_DATAACCEPT(msg->channel);
0911     else
0912         val = SSI_DATAAVAILABLE(msg->channel);
0913     if (msg->status == HSI_STATUS_PROCEEDING) {
0914         buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
0915         if (msg->ttype == HSI_MSG_WRITE)
0916             writel(*buf, omap_port->sst_base +
0917                     SSI_SST_BUFFER_CH_REG(msg->channel));
0918          else
0919             *buf = readl(omap_port->ssr_base +
0920                     SSI_SSR_BUFFER_CH_REG(msg->channel));
0921         dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
0922                             msg->ttype, *buf);
0923         msg->actual_len += sizeof(*buf);
0924         if (msg->actual_len >= msg->sgt.sgl->length)
0925             msg->status = HSI_STATUS_COMPLETED;
0926         /*
0927          * Wait for the last written frame to be really sent before
0928          * we call the complete callback
0929          */
0930         if ((msg->status == HSI_STATUS_PROCEEDING) ||
0931                 ((msg->status == HSI_STATUS_COMPLETED) &&
0932                     (msg->ttype == HSI_MSG_WRITE))) {
0933             writel(val, omap_ssi->sys +
0934                     SSI_MPU_STATUS_REG(port->num, 0));
0935             spin_unlock_bh(&omap_port->lock);
0936 
0937             return;
0938         }
0939 
0940     }
0941     /* Transfer completed at this point */
0942     reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
0943     if (msg->ttype == HSI_MSG_WRITE) {
0944         /* Release clocks for write transfer */
0945         pm_runtime_mark_last_busy(omap_port->pdev);
0946         pm_runtime_put_autosuspend(omap_port->pdev);
0947     }
0948     reg &= ~val;
0949     writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
0950     writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
0951     list_del(&msg->link);
0952     spin_unlock_bh(&omap_port->lock);
0953     msg->complete(msg);
0954     ssi_transfer(omap_port, queue);
0955 }
0956 
0957 static irqreturn_t ssi_pio_thread(int irq, void *ssi_port)
0958 {
0959     struct hsi_port *port = (struct hsi_port *)ssi_port;
0960     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
0961     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
0962     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
0963     void __iomem *sys = omap_ssi->sys;
0964     unsigned int ch;
0965     u32 status_reg;
0966 
0967     pm_runtime_get_sync(omap_port->pdev);
0968 
0969     do {
0970         status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
0971         status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
0972 
0973         for (ch = 0; ch < omap_port->channels; ch++) {
0974             if (status_reg & SSI_DATAACCEPT(ch))
0975                 ssi_pio_complete(port, &omap_port->txqueue[ch]);
0976             if (status_reg & SSI_DATAAVAILABLE(ch))
0977                 ssi_pio_complete(port, &omap_port->rxqueue[ch]);
0978         }
0979         if (status_reg & SSI_BREAKDETECTED)
0980             ssi_break_complete(port);
0981         if (status_reg & SSI_ERROROCCURED)
0982             ssi_error(port);
0983 
0984         status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
0985         status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
0986 
0987         /* TODO: sleep if we retry? */
0988     } while (status_reg);
0989 
0990     pm_runtime_mark_last_busy(omap_port->pdev);
0991     pm_runtime_put_autosuspend(omap_port->pdev);
0992 
0993     return IRQ_HANDLED;
0994 }
0995 
0996 static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port)
0997 {
0998     struct hsi_port *port = (struct hsi_port *)ssi_port;
0999     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1000     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1001     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1002 
1003     if (ssi_wakein(port)) {
1004         /**
1005          * We can have a quick High-Low-High transition in the line.
1006          * In such a case if we have long interrupt latencies,
1007          * we can miss the low event or get twice a high event.
1008          * This workaround will avoid breaking the clock reference
1009          * count when such a situation ocurrs.
1010          */
1011         if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags))
1012             pm_runtime_get_sync(omap_port->pdev);
1013         dev_dbg(&ssi->device, "Wake in high\n");
1014         if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
1015             writel(SSI_WAKE(0),
1016                 omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
1017         }
1018         hsi_event(port, HSI_EVENT_START_RX);
1019     } else {
1020         dev_dbg(&ssi->device, "Wake in low\n");
1021         if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
1022             writel(SSI_WAKE(0),
1023                 omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
1024         }
1025         hsi_event(port, HSI_EVENT_STOP_RX);
1026         if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) {
1027             pm_runtime_mark_last_busy(omap_port->pdev);
1028             pm_runtime_put_autosuspend(omap_port->pdev);
1029         }
1030     }
1031 
1032     return IRQ_HANDLED;
1033 }
1034 
1035 static int ssi_port_irq(struct hsi_port *port, struct platform_device *pd)
1036 {
1037     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1038     int err;
1039 
1040     err = platform_get_irq(pd, 0);
1041     if (err < 0)
1042         return err;
1043     omap_port->irq = err;
1044     err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL,
1045                 ssi_pio_thread, IRQF_ONESHOT, "SSI PORT", port);
1046     if (err < 0)
1047         dev_err(&port->device, "Request IRQ %d failed (%d)\n",
1048                             omap_port->irq, err);
1049     return err;
1050 }
1051 
1052 static int ssi_wake_irq(struct hsi_port *port, struct platform_device *pd)
1053 {
1054     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1055     int cawake_irq;
1056     int err;
1057 
1058     if (!omap_port->wake_gpio) {
1059         omap_port->wake_irq = -1;
1060         return 0;
1061     }
1062 
1063     cawake_irq = gpiod_to_irq(omap_port->wake_gpio);
1064     omap_port->wake_irq = cawake_irq;
1065 
1066     err = devm_request_threaded_irq(&port->device, cawake_irq, NULL,
1067         ssi_wake_thread,
1068         IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1069         "SSI cawake", port);
1070     if (err < 0)
1071         dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
1072                         cawake_irq, err);
1073     err = enable_irq_wake(cawake_irq);
1074     if (err < 0)
1075         dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n",
1076             cawake_irq, err);
1077 
1078     return err;
1079 }
1080 
1081 static void ssi_queues_init(struct omap_ssi_port *omap_port)
1082 {
1083     unsigned int ch;
1084 
1085     for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
1086         INIT_LIST_HEAD(&omap_port->txqueue[ch]);
1087         INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
1088     }
1089     INIT_LIST_HEAD(&omap_port->brkqueue);
1090 }
1091 
1092 static int ssi_port_get_iomem(struct platform_device *pd,
1093         const char *name, void __iomem **pbase, dma_addr_t *phy)
1094 {
1095     struct hsi_port *port = platform_get_drvdata(pd);
1096     struct resource *mem;
1097     struct resource *ioarea;
1098     void __iomem *base;
1099 
1100     mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
1101     if (!mem) {
1102         dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
1103         return -ENXIO;
1104     }
1105     ioarea = devm_request_mem_region(&port->device, mem->start,
1106                     resource_size(mem), dev_name(&pd->dev));
1107     if (!ioarea) {
1108         dev_err(&pd->dev, "%s IO memory region request failed\n",
1109                                 mem->name);
1110         return -ENXIO;
1111     }
1112     base = devm_ioremap(&port->device, mem->start, resource_size(mem));
1113     if (!base) {
1114         dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
1115         return -ENXIO;
1116     }
1117     *pbase = base;
1118 
1119     if (phy)
1120         *phy = mem->start;
1121 
1122     return 0;
1123 }
1124 
1125 static int ssi_port_probe(struct platform_device *pd)
1126 {
1127     struct device_node *np = pd->dev.of_node;
1128     struct hsi_port *port;
1129     struct omap_ssi_port *omap_port;
1130     struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
1131     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1132     struct gpio_desc *cawake_gpio = NULL;
1133     u32 port_id;
1134     int err;
1135 
1136     dev_dbg(&pd->dev, "init ssi port...\n");
1137 
1138     if (!ssi->port || !omap_ssi->port) {
1139         dev_err(&pd->dev, "ssi controller not initialized!\n");
1140         err = -ENODEV;
1141         goto error;
1142     }
1143 
1144     /* get id of first uninitialized port in controller */
1145     for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id];
1146         port_id++)
1147         ;
1148 
1149     if (port_id >= ssi->num_ports) {
1150         dev_err(&pd->dev, "port id out of range!\n");
1151         err = -ENODEV;
1152         goto error;
1153     }
1154 
1155     port = ssi->port[port_id];
1156 
1157     if (!np) {
1158         dev_err(&pd->dev, "missing device tree data\n");
1159         err = -EINVAL;
1160         goto error;
1161     }
1162 
1163     cawake_gpio = devm_gpiod_get(&pd->dev, "ti,ssi-cawake", GPIOD_IN);
1164     if (IS_ERR(cawake_gpio)) {
1165         err = PTR_ERR(cawake_gpio);
1166         dev_err(&pd->dev, "couldn't get cawake gpio (err=%d)!\n", err);
1167         goto error;
1168     }
1169 
1170     omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL);
1171     if (!omap_port) {
1172         err = -ENOMEM;
1173         goto error;
1174     }
1175     omap_port->wake_gpio = cawake_gpio;
1176     omap_port->pdev = &pd->dev;
1177     omap_port->port_id = port_id;
1178 
1179     INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue);
1180     INIT_WORK(&omap_port->work, start_tx_work);
1181 
1182     /* initialize HSI port */
1183     port->async = ssi_async;
1184     port->setup = ssi_setup;
1185     port->flush = ssi_flush;
1186     port->start_tx  = ssi_start_tx;
1187     port->stop_tx   = ssi_stop_tx;
1188     port->release   = ssi_release;
1189     hsi_port_set_drvdata(port, omap_port);
1190     omap_ssi->port[port_id] = omap_port;
1191 
1192     platform_set_drvdata(pd, port);
1193 
1194     err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base,
1195         &omap_port->sst_dma);
1196     if (err < 0)
1197         goto error;
1198     err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base,
1199         &omap_port->ssr_dma);
1200     if (err < 0)
1201         goto error;
1202 
1203     err = ssi_port_irq(port, pd);
1204     if (err < 0)
1205         goto error;
1206     err = ssi_wake_irq(port, pd);
1207     if (err < 0)
1208         goto error;
1209 
1210     ssi_queues_init(omap_port);
1211     spin_lock_init(&omap_port->lock);
1212     spin_lock_init(&omap_port->wk_lock);
1213     omap_port->dev = &port->device;
1214 
1215     pm_runtime_use_autosuspend(omap_port->pdev);
1216     pm_runtime_set_autosuspend_delay(omap_port->pdev, 250);
1217     pm_runtime_enable(omap_port->pdev);
1218 
1219 #ifdef CONFIG_DEBUG_FS
1220     err = ssi_debug_add_port(omap_port, omap_ssi->dir);
1221     if (err < 0) {
1222         pm_runtime_disable(omap_port->pdev);
1223         goto error;
1224     }
1225 #endif
1226 
1227     hsi_add_clients_from_dt(port, np);
1228 
1229     dev_info(&pd->dev, "ssi port %u successfully initialized\n", port_id);
1230 
1231     return 0;
1232 
1233 error:
1234     return err;
1235 }
1236 
1237 static int ssi_port_remove(struct platform_device *pd)
1238 {
1239     struct hsi_port *port = platform_get_drvdata(pd);
1240     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1241     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1242     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1243 
1244 #ifdef CONFIG_DEBUG_FS
1245     ssi_debug_remove_port(port);
1246 #endif
1247 
1248     cancel_delayed_work_sync(&omap_port->errqueue_work);
1249 
1250     hsi_port_unregister_clients(port);
1251 
1252     port->async = hsi_dummy_msg;
1253     port->setup = hsi_dummy_cl;
1254     port->flush = hsi_dummy_cl;
1255     port->start_tx  = hsi_dummy_cl;
1256     port->stop_tx   = hsi_dummy_cl;
1257     port->release   = hsi_dummy_cl;
1258 
1259     omap_ssi->port[omap_port->port_id] = NULL;
1260     platform_set_drvdata(pd, NULL);
1261 
1262     pm_runtime_dont_use_autosuspend(&pd->dev);
1263     pm_runtime_disable(&pd->dev);
1264 
1265     return 0;
1266 }
1267 
1268 static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
1269 {
1270     writel_relaxed(omap_port->sst.divisor,
1271                 omap_port->sst_base + SSI_SST_DIVISOR_REG);
1272 
1273     return 0;
1274 }
1275 
1276 void omap_ssi_port_update_fclk(struct hsi_controller *ssi,
1277                    struct omap_ssi_port *omap_port)
1278 {
1279     /* update divisor */
1280     u32 div = ssi_calculate_div(ssi);
1281     omap_port->sst.divisor = div;
1282     ssi_restore_divisor(omap_port);
1283 }
1284 
1285 #ifdef CONFIG_PM
1286 static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
1287 {
1288     struct hsi_port *port = to_hsi_port(omap_port->dev);
1289     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1290     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1291 
1292     omap_port->sys_mpu_enable = readl(omap_ssi->sys +
1293                     SSI_MPU_ENABLE_REG(port->num, 0));
1294 
1295     return 0;
1296 }
1297 
1298 static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port)
1299 {
1300     struct hsi_port *port = to_hsi_port(omap_port->dev);
1301     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1302     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1303     void __iomem    *base;
1304 
1305     writel_relaxed(omap_port->sys_mpu_enable,
1306             omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
1307 
1308     /* SST context */
1309     base = omap_port->sst_base;
1310     writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
1311     writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
1312     writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
1313 
1314     /* SSR context */
1315     base = omap_port->ssr_base;
1316     writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
1317     writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
1318     writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
1319 
1320     return 0;
1321 }
1322 
1323 static int ssi_restore_port_mode(struct omap_ssi_port *omap_port)
1324 {
1325     u32 mode;
1326 
1327     writel_relaxed(omap_port->sst.mode,
1328                 omap_port->sst_base + SSI_SST_MODE_REG);
1329     writel_relaxed(omap_port->ssr.mode,
1330                 omap_port->ssr_base + SSI_SSR_MODE_REG);
1331     /* OCP barrier */
1332     mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
1333 
1334     return 0;
1335 }
1336 
1337 static int omap_ssi_port_runtime_suspend(struct device *dev)
1338 {
1339     struct hsi_port *port = dev_get_drvdata(dev);
1340     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1341     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1342     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1343 
1344     dev_dbg(dev, "port runtime suspend!\n");
1345 
1346     ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
1347     if (omap_ssi->get_loss)
1348         omap_port->loss_count =
1349                 omap_ssi->get_loss(ssi->device.parent);
1350     ssi_save_port_ctx(omap_port);
1351 
1352     return 0;
1353 }
1354 
1355 static int omap_ssi_port_runtime_resume(struct device *dev)
1356 {
1357     struct hsi_port *port = dev_get_drvdata(dev);
1358     struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1359     struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1360     struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1361 
1362     dev_dbg(dev, "port runtime resume!\n");
1363 
1364     if ((omap_ssi->get_loss) && (omap_port->loss_count ==
1365                 omap_ssi->get_loss(ssi->device.parent)))
1366         goto mode; /* We always need to restore the mode & TX divisor */
1367 
1368     ssi_restore_port_ctx(omap_port);
1369 
1370 mode:
1371     ssi_restore_divisor(omap_port);
1372     ssi_restore_port_mode(omap_port);
1373 
1374     return 0;
1375 }
1376 
1377 static const struct dev_pm_ops omap_ssi_port_pm_ops = {
1378     SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend,
1379         omap_ssi_port_runtime_resume, NULL)
1380 };
1381 
1382 #define DEV_PM_OPS     (&omap_ssi_port_pm_ops)
1383 #else
1384 #define DEV_PM_OPS     NULL
1385 #endif
1386 
1387 
1388 #ifdef CONFIG_OF
1389 static const struct of_device_id omap_ssi_port_of_match[] = {
1390     { .compatible = "ti,omap3-ssi-port", },
1391     {},
1392 };
1393 MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
1394 #else
1395 #define omap_ssi_port_of_match NULL
1396 #endif
1397 
1398 struct platform_driver ssi_port_pdriver = {
1399     .probe = ssi_port_probe,
1400     .remove = ssi_port_remove,
1401     .driver = {
1402         .name   = "omap_ssi_port",
1403         .of_match_table = omap_ssi_port_of_match,
1404         .pm = DEV_PM_OPS,
1405     },
1406 };