Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * AMD am53c974 driver.
0004  * Copyright (c) 2014 Hannes Reinecke, SUSE Linux GmbH
0005  */
0006 
0007 #include <linux/kernel.h>
0008 #include <linux/module.h>
0009 #include <linux/init.h>
0010 #include <linux/delay.h>
0011 #include <linux/pci.h>
0012 #include <linux/interrupt.h>
0013 
0014 #include <scsi/scsi_host.h>
0015 
0016 #include "esp_scsi.h"
0017 
0018 #define DRV_MODULE_NAME "am53c974"
0019 #define DRV_MODULE_VERSION "1.00"
0020 
0021 static bool am53c974_debug;
0022 static bool am53c974_fenab = true;
0023 
0024 #define esp_dma_log(f, a...)                        \
0025     do {                                \
0026         if (am53c974_debug)                 \
0027             shost_printk(KERN_DEBUG, esp->host, f, ##a);    \
0028     } while (0)
0029 
0030 #define ESP_DMA_CMD 0x10
0031 #define ESP_DMA_STC 0x11
0032 #define ESP_DMA_SPA 0x12
0033 #define ESP_DMA_WBC 0x13
0034 #define ESP_DMA_WAC 0x14
0035 #define ESP_DMA_STATUS 0x15
0036 #define ESP_DMA_SMDLA 0x16
0037 #define ESP_DMA_WMAC 0x17
0038 
0039 #define ESP_DMA_CMD_IDLE 0x00
0040 #define ESP_DMA_CMD_BLAST 0x01
0041 #define ESP_DMA_CMD_ABORT 0x02
0042 #define ESP_DMA_CMD_START 0x03
0043 #define ESP_DMA_CMD_MASK  0x03
0044 #define ESP_DMA_CMD_DIAG 0x04
0045 #define ESP_DMA_CMD_MDL 0x10
0046 #define ESP_DMA_CMD_INTE_P 0x20
0047 #define ESP_DMA_CMD_INTE_D 0x40
0048 #define ESP_DMA_CMD_DIR 0x80
0049 
0050 #define ESP_DMA_STAT_PWDN 0x01
0051 #define ESP_DMA_STAT_ERROR 0x02
0052 #define ESP_DMA_STAT_ABORT 0x04
0053 #define ESP_DMA_STAT_DONE 0x08
0054 #define ESP_DMA_STAT_SCSIINT 0x10
0055 #define ESP_DMA_STAT_BCMPLT 0x20
0056 
0057 /* EEPROM is accessed with 16-bit values */
0058 #define DC390_EEPROM_READ 0x80
0059 #define DC390_EEPROM_LEN 0x40
0060 
0061 /*
0062  * DC390 EEPROM
0063  *
0064  * 8 * 4 bytes of per-device options
0065  * followed by HBA specific options
0066  */
0067 
0068 /* Per-device options */
0069 #define DC390_EE_MODE1 0x00
0070 #define DC390_EE_SPEED 0x01
0071 
0072 /* HBA-specific options */
0073 #define DC390_EE_ADAPT_SCSI_ID 0x40
0074 #define DC390_EE_MODE2 0x41
0075 #define DC390_EE_DELAY 0x42
0076 #define DC390_EE_TAG_CMD_NUM 0x43
0077 
0078 #define DC390_EE_MODE1_PARITY_CHK   0x01
0079 #define DC390_EE_MODE1_SYNC_NEGO    0x02
0080 #define DC390_EE_MODE1_EN_DISC      0x04
0081 #define DC390_EE_MODE1_SEND_START   0x08
0082 #define DC390_EE_MODE1_TCQ          0x10
0083 
0084 #define DC390_EE_MODE2_MORE_2DRV    0x01
0085 #define DC390_EE_MODE2_GREATER_1G   0x02
0086 #define DC390_EE_MODE2_RST_SCSI_BUS 0x04
0087 #define DC390_EE_MODE2_ACTIVE_NEGATION 0x08
0088 #define DC390_EE_MODE2_NO_SEEK      0x10
0089 #define DC390_EE_MODE2_LUN_CHECK    0x20
0090 
0091 struct pci_esp_priv {
0092     struct esp *esp;
0093     u8 dma_status;
0094 };
0095 
0096 static void pci_esp_dma_drain(struct esp *esp);
0097 
0098 static inline struct pci_esp_priv *pci_esp_get_priv(struct esp *esp)
0099 {
0100     return dev_get_drvdata(esp->dev);
0101 }
0102 
0103 static void pci_esp_write8(struct esp *esp, u8 val, unsigned long reg)
0104 {
0105     iowrite8(val, esp->regs + (reg * 4UL));
0106 }
0107 
0108 static u8 pci_esp_read8(struct esp *esp, unsigned long reg)
0109 {
0110     return ioread8(esp->regs + (reg * 4UL));
0111 }
0112 
0113 static void pci_esp_write32(struct esp *esp, u32 val, unsigned long reg)
0114 {
0115     return iowrite32(val, esp->regs + (reg * 4UL));
0116 }
0117 
0118 static int pci_esp_irq_pending(struct esp *esp)
0119 {
0120     struct pci_esp_priv *pep = pci_esp_get_priv(esp);
0121 
0122     pep->dma_status = pci_esp_read8(esp, ESP_DMA_STATUS);
0123     esp_dma_log("dma intr dreg[%02x]\n", pep->dma_status);
0124 
0125     if (pep->dma_status & (ESP_DMA_STAT_ERROR |
0126                    ESP_DMA_STAT_ABORT |
0127                    ESP_DMA_STAT_DONE |
0128                    ESP_DMA_STAT_SCSIINT))
0129         return 1;
0130 
0131     return 0;
0132 }
0133 
0134 static void pci_esp_reset_dma(struct esp *esp)
0135 {
0136     /* Nothing to do ? */
0137 }
0138 
0139 static void pci_esp_dma_drain(struct esp *esp)
0140 {
0141     u8 resid;
0142     int lim = 1000;
0143 
0144 
0145     if ((esp->sreg & ESP_STAT_PMASK) == ESP_DOP ||
0146         (esp->sreg & ESP_STAT_PMASK) == ESP_DIP)
0147         /* Data-In or Data-Out, nothing to be done */
0148         return;
0149 
0150     while (--lim > 0) {
0151         resid = pci_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES;
0152         if (resid <= 1)
0153             break;
0154         cpu_relax();
0155     }
0156 
0157     /*
0158      * When there is a residual BCMPLT will never be set
0159      * (obviously). But we still have to issue the BLAST
0160      * command, otherwise the data will not being transferred.
0161      * But we'll never know when the BLAST operation is
0162      * finished. So check for some time and give up eventually.
0163      */
0164     lim = 1000;
0165     pci_esp_write8(esp, ESP_DMA_CMD_DIR | ESP_DMA_CMD_BLAST, ESP_DMA_CMD);
0166     while (pci_esp_read8(esp, ESP_DMA_STATUS) & ESP_DMA_STAT_BCMPLT) {
0167         if (--lim == 0)
0168             break;
0169         cpu_relax();
0170     }
0171     pci_esp_write8(esp, ESP_DMA_CMD_DIR | ESP_DMA_CMD_IDLE, ESP_DMA_CMD);
0172     esp_dma_log("DMA blast done (%d tries, %d bytes left)\n", lim, resid);
0173     /* BLAST residual handling is currently untested */
0174     if (WARN_ON_ONCE(resid == 1)) {
0175         struct esp_cmd_entry *ent = esp->active_cmd;
0176 
0177         ent->flags |= ESP_CMD_FLAG_RESIDUAL;
0178     }
0179 }
0180 
0181 static void pci_esp_dma_invalidate(struct esp *esp)
0182 {
0183     struct pci_esp_priv *pep = pci_esp_get_priv(esp);
0184 
0185     esp_dma_log("invalidate DMA\n");
0186 
0187     pci_esp_write8(esp, ESP_DMA_CMD_IDLE, ESP_DMA_CMD);
0188     pep->dma_status = 0;
0189 }
0190 
0191 static int pci_esp_dma_error(struct esp *esp)
0192 {
0193     struct pci_esp_priv *pep = pci_esp_get_priv(esp);
0194 
0195     if (pep->dma_status & ESP_DMA_STAT_ERROR) {
0196         u8 dma_cmd = pci_esp_read8(esp, ESP_DMA_CMD);
0197 
0198         if ((dma_cmd & ESP_DMA_CMD_MASK) == ESP_DMA_CMD_START)
0199             pci_esp_write8(esp, ESP_DMA_CMD_ABORT, ESP_DMA_CMD);
0200 
0201         return 1;
0202     }
0203     if (pep->dma_status & ESP_DMA_STAT_ABORT) {
0204         pci_esp_write8(esp, ESP_DMA_CMD_IDLE, ESP_DMA_CMD);
0205         pep->dma_status = pci_esp_read8(esp, ESP_DMA_CMD);
0206         return 1;
0207     }
0208     return 0;
0209 }
0210 
0211 static void pci_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
0212                  u32 dma_count, int write, u8 cmd)
0213 {
0214     struct pci_esp_priv *pep = pci_esp_get_priv(esp);
0215     u32 val = 0;
0216 
0217     BUG_ON(!(cmd & ESP_CMD_DMA));
0218 
0219     pep->dma_status = 0;
0220 
0221     /* Set DMA engine to IDLE */
0222     if (write)
0223         /* DMA write direction logic is inverted */
0224         val |= ESP_DMA_CMD_DIR;
0225     pci_esp_write8(esp, ESP_DMA_CMD_IDLE | val, ESP_DMA_CMD);
0226 
0227     pci_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
0228     pci_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
0229     if (esp->config2 & ESP_CONFIG2_FENAB)
0230         pci_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
0231 
0232     pci_esp_write32(esp, esp_count, ESP_DMA_STC);
0233     pci_esp_write32(esp, addr, ESP_DMA_SPA);
0234 
0235     esp_dma_log("start dma addr[%x] count[%d:%d]\n",
0236             addr, esp_count, dma_count);
0237 
0238     scsi_esp_cmd(esp, cmd);
0239     /* Send DMA Start command */
0240     pci_esp_write8(esp, ESP_DMA_CMD_START | val, ESP_DMA_CMD);
0241 }
0242 
0243 static u32 pci_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
0244 {
0245     int dma_limit = 16;
0246     u32 base, end;
0247 
0248     /*
0249      * If CONFIG2_FENAB is set we can
0250      * handle up to 24 bit addresses
0251      */
0252     if (esp->config2 & ESP_CONFIG2_FENAB)
0253         dma_limit = 24;
0254 
0255     if (dma_len > (1U << dma_limit))
0256         dma_len = (1U << dma_limit);
0257 
0258     /*
0259      * Prevent crossing a 24-bit address boundary.
0260      */
0261     base = dma_addr & ((1U << 24) - 1U);
0262     end = base + dma_len;
0263     if (end > (1U << 24))
0264         end = (1U <<24);
0265     dma_len = end - base;
0266 
0267     return dma_len;
0268 }
0269 
0270 static const struct esp_driver_ops pci_esp_ops = {
0271     .esp_write8 =   pci_esp_write8,
0272     .esp_read8  =   pci_esp_read8,
0273     .irq_pending    =   pci_esp_irq_pending,
0274     .reset_dma  =   pci_esp_reset_dma,
0275     .dma_drain  =   pci_esp_dma_drain,
0276     .dma_invalidate =   pci_esp_dma_invalidate,
0277     .send_dma_cmd   =   pci_esp_send_dma_cmd,
0278     .dma_error  =   pci_esp_dma_error,
0279     .dma_length_limit = pci_esp_dma_length_limit,
0280 };
0281 
0282 /*
0283  * Read DC-390 eeprom
0284  */
0285 static void dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd)
0286 {
0287     u8 carry_flag = 1, j = 0x80, bval;
0288     int i;
0289 
0290     for (i = 0; i < 9; i++) {
0291         if (carry_flag) {
0292             pci_write_config_byte(pdev, 0x80, 0x40);
0293             bval = 0xc0;
0294         } else
0295             bval = 0x80;
0296 
0297         udelay(160);
0298         pci_write_config_byte(pdev, 0x80, bval);
0299         udelay(160);
0300         pci_write_config_byte(pdev, 0x80, 0);
0301         udelay(160);
0302 
0303         carry_flag = (cmd & j) ? 1 : 0;
0304         j >>= 1;
0305     }
0306 }
0307 
0308 static u16 dc390_eeprom_get_data(struct pci_dev *pdev)
0309 {
0310     int i;
0311     u16 wval = 0;
0312     u8 bval;
0313 
0314     for (i = 0; i < 16; i++) {
0315         wval <<= 1;
0316 
0317         pci_write_config_byte(pdev, 0x80, 0x80);
0318         udelay(160);
0319         pci_write_config_byte(pdev, 0x80, 0x40);
0320         udelay(160);
0321         pci_read_config_byte(pdev, 0x00, &bval);
0322 
0323         if (bval == 0x22)
0324             wval |= 1;
0325     }
0326 
0327     return wval;
0328 }
0329 
0330 static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
0331 {
0332     u8 cmd = DC390_EEPROM_READ, i;
0333 
0334     for (i = 0; i < DC390_EEPROM_LEN; i++) {
0335         pci_write_config_byte(pdev, 0xc0, 0);
0336         udelay(160);
0337 
0338         dc390_eeprom_prepare_read(pdev, cmd++);
0339         *ptr++ = dc390_eeprom_get_data(pdev);
0340 
0341         pci_write_config_byte(pdev, 0x80, 0);
0342         pci_write_config_byte(pdev, 0x80, 0);
0343         udelay(160);
0344     }
0345 }
0346 
0347 static void dc390_check_eeprom(struct esp *esp)
0348 {
0349     struct pci_dev *pdev = to_pci_dev(esp->dev);
0350     u8 EEbuf[128];
0351     u16 *ptr = (u16 *)EEbuf, wval = 0;
0352     int i;
0353 
0354     dc390_read_eeprom(pdev, ptr);
0355 
0356     for (i = 0; i < DC390_EEPROM_LEN; i++, ptr++)
0357         wval += *ptr;
0358 
0359     /* no Tekram EEprom found */
0360     if (wval != 0x1234) {
0361         dev_printk(KERN_INFO, &pdev->dev,
0362                "No valid Tekram EEprom found\n");
0363         return;
0364     }
0365     esp->scsi_id = EEbuf[DC390_EE_ADAPT_SCSI_ID];
0366     esp->num_tags = 2 << EEbuf[DC390_EE_TAG_CMD_NUM];
0367     if (EEbuf[DC390_EE_MODE2] & DC390_EE_MODE2_ACTIVE_NEGATION)
0368         esp->config4 |= ESP_CONFIG4_RADE | ESP_CONFIG4_RAE;
0369 }
0370 
0371 static int pci_esp_probe_one(struct pci_dev *pdev,
0372                   const struct pci_device_id *id)
0373 {
0374     struct scsi_host_template *hostt = &scsi_esp_template;
0375     int err = -ENODEV;
0376     struct Scsi_Host *shost;
0377     struct esp *esp;
0378     struct pci_esp_priv *pep;
0379 
0380     if (pci_enable_device(pdev)) {
0381         dev_printk(KERN_INFO, &pdev->dev, "cannot enable device\n");
0382         return -ENODEV;
0383     }
0384 
0385     if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
0386         dev_printk(KERN_INFO, &pdev->dev,
0387                "failed to set 32bit DMA mask\n");
0388         goto fail_disable_device;
0389     }
0390 
0391     shost = scsi_host_alloc(hostt, sizeof(struct esp));
0392     if (!shost) {
0393         dev_printk(KERN_INFO, &pdev->dev,
0394                "failed to allocate scsi host\n");
0395         err = -ENOMEM;
0396         goto fail_disable_device;
0397     }
0398 
0399     pep = kzalloc(sizeof(struct pci_esp_priv), GFP_KERNEL);
0400     if (!pep) {
0401         dev_printk(KERN_INFO, &pdev->dev,
0402                "failed to allocate esp_priv\n");
0403         err = -ENOMEM;
0404         goto fail_host_alloc;
0405     }
0406 
0407     esp = shost_priv(shost);
0408     esp->host = shost;
0409     esp->dev = &pdev->dev;
0410     esp->ops = &pci_esp_ops;
0411     /*
0412      * The am53c974 HBA has a design flaw of generating
0413      * spurious DMA completion interrupts when using
0414      * DMA for command submission.
0415      */
0416     esp->flags |= ESP_FLAG_USE_FIFO;
0417     /*
0418      * Enable CONFIG2_FENAB to allow for large DMA transfers
0419      */
0420     if (am53c974_fenab)
0421         esp->config2 |= ESP_CONFIG2_FENAB;
0422 
0423     pep->esp = esp;
0424 
0425     if (pci_request_regions(pdev, DRV_MODULE_NAME)) {
0426         dev_printk(KERN_ERR, &pdev->dev,
0427                "pci memory selection failed\n");
0428         goto fail_priv_alloc;
0429     }
0430 
0431     esp->regs = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
0432     if (!esp->regs) {
0433         dev_printk(KERN_ERR, &pdev->dev, "pci I/O map failed\n");
0434         err = -EINVAL;
0435         goto fail_release_regions;
0436     }
0437     esp->dma_regs = esp->regs;
0438 
0439     pci_set_master(pdev);
0440 
0441     esp->command_block = dma_alloc_coherent(&pdev->dev, 16,
0442             &esp->command_block_dma, GFP_KERNEL);
0443     if (!esp->command_block) {
0444         dev_printk(KERN_ERR, &pdev->dev,
0445                "failed to allocate command block\n");
0446         err = -ENOMEM;
0447         goto fail_unmap_regs;
0448     }
0449 
0450     pci_set_drvdata(pdev, pep);
0451 
0452     err = request_irq(pdev->irq, scsi_esp_intr, IRQF_SHARED,
0453               DRV_MODULE_NAME, esp);
0454     if (err < 0) {
0455         dev_printk(KERN_ERR, &pdev->dev, "failed to register IRQ\n");
0456         goto fail_unmap_command_block;
0457     }
0458 
0459     esp->scsi_id = 7;
0460     dc390_check_eeprom(esp);
0461 
0462     shost->this_id = esp->scsi_id;
0463     shost->max_id = 8;
0464     shost->irq = pdev->irq;
0465     shost->io_port = pci_resource_start(pdev, 0);
0466     shost->n_io_port = pci_resource_len(pdev, 0);
0467     shost->unique_id = shost->io_port;
0468     esp->scsi_id_mask = (1 << esp->scsi_id);
0469     /* Assume 40MHz clock */
0470     esp->cfreq = 40000000;
0471 
0472     err = scsi_esp_register(esp);
0473     if (err)
0474         goto fail_free_irq;
0475 
0476     return 0;
0477 
0478 fail_free_irq:
0479     free_irq(pdev->irq, esp);
0480 fail_unmap_command_block:
0481     pci_set_drvdata(pdev, NULL);
0482     dma_free_coherent(&pdev->dev, 16, esp->command_block,
0483               esp->command_block_dma);
0484 fail_unmap_regs:
0485     pci_iounmap(pdev, esp->regs);
0486 fail_release_regions:
0487     pci_release_regions(pdev);
0488 fail_priv_alloc:
0489     kfree(pep);
0490 fail_host_alloc:
0491     scsi_host_put(shost);
0492 fail_disable_device:
0493     pci_disable_device(pdev);
0494 
0495     return err;
0496 }
0497 
0498 static void pci_esp_remove_one(struct pci_dev *pdev)
0499 {
0500     struct pci_esp_priv *pep = pci_get_drvdata(pdev);
0501     struct esp *esp = pep->esp;
0502 
0503     scsi_esp_unregister(esp);
0504     free_irq(pdev->irq, esp);
0505     pci_set_drvdata(pdev, NULL);
0506     dma_free_coherent(&pdev->dev, 16, esp->command_block,
0507               esp->command_block_dma);
0508     pci_iounmap(pdev, esp->regs);
0509     pci_release_regions(pdev);
0510     pci_disable_device(pdev);
0511     kfree(pep);
0512 
0513     scsi_host_put(esp->host);
0514 }
0515 
0516 static struct pci_device_id am53c974_pci_tbl[] = {
0517     { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SCSI,
0518         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
0519     { }
0520 };
0521 MODULE_DEVICE_TABLE(pci, am53c974_pci_tbl);
0522 
0523 static struct pci_driver am53c974_driver = {
0524     .name           = DRV_MODULE_NAME,
0525     .id_table       = am53c974_pci_tbl,
0526     .probe          = pci_esp_probe_one,
0527     .remove         = pci_esp_remove_one,
0528 };
0529 
0530 module_pci_driver(am53c974_driver);
0531 
0532 MODULE_DESCRIPTION("AM53C974 SCSI driver");
0533 MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
0534 MODULE_LICENSE("GPL");
0535 MODULE_VERSION(DRV_MODULE_VERSION);
0536 MODULE_ALIAS("tmscsim");
0537 
0538 module_param(am53c974_debug, bool, 0644);
0539 MODULE_PARM_DESC(am53c974_debug, "Enable debugging");
0540 
0541 module_param(am53c974_fenab, bool, 0444);
0542 MODULE_PARM_DESC(am53c974_fenab, "Enable 24-bit DMA transfer sizes");