0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/kernel.h>
0009 #include <linux/gfp.h>
0010 #include <linux/module.h>
0011 #include <linux/types.h>
0012 #include <linux/err.h>
0013 #include <linux/io.h>
0014 #include <linux/spinlock.h>
0015 #include <linux/device.h>
0016 #include <linux/of_device.h>
0017 #include <linux/of_address.h>
0018 #include <linux/platform_device.h>
0019 #include <linux/libata.h>
0020 #include <linux/interrupt.h>
0021 #include <linux/delay.h>
0022 #include <linux/export.h>
0023 #include <linux/gpio/consumer.h>
0024
0025 #include "ahci.h"
0026
0027 #define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f))
0028 #define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2)
0029 #define SERDES_CR_CTL 0x80a0
0030 #define SERDES_CR_ADDR 0x80a1
0031 #define SERDES_CR_DATA 0x80a2
0032 #define CR_BUSY 0x0001
0033 #define CR_START 0x0001
0034 #define CR_WR_RDN 0x0002
0035 #define CPHY_TX_INPUT_STS 0x2001
0036 #define CPHY_RX_INPUT_STS 0x2002
0037 #define CPHY_SATA_TX_OVERRIDE 0x8000
0038 #define CPHY_SATA_RX_OVERRIDE 0x4000
0039 #define CPHY_TX_OVERRIDE 0x2004
0040 #define CPHY_RX_OVERRIDE 0x2005
0041 #define SPHY_LANE 0x100
0042 #define SPHY_HALF_RATE 0x0001
0043 #define CPHY_SATA_DPLL_MODE 0x0700
0044 #define CPHY_SATA_DPLL_SHIFT 8
0045 #define CPHY_SATA_DPLL_RESET (1 << 11)
0046 #define CPHY_SATA_TX_ATTEN 0x1c00
0047 #define CPHY_SATA_TX_ATTEN_SHIFT 10
0048 #define CPHY_PHY_COUNT 6
0049 #define CPHY_LANE_COUNT 4
0050 #define CPHY_PORT_COUNT (CPHY_PHY_COUNT * CPHY_LANE_COUNT)
0051
0052 static DEFINE_SPINLOCK(cphy_lock);
0053
0054
0055
0056 struct phy_lane_info {
0057 void __iomem *phy_base;
0058 u8 lane_mapping;
0059 u8 phy_devs;
0060 u8 tx_atten;
0061 };
0062 static struct phy_lane_info port_data[CPHY_PORT_COUNT];
0063
0064 static DEFINE_SPINLOCK(sgpio_lock);
0065 #define SCLOCK 0
0066 #define SLOAD 1
0067 #define SDATA 2
0068 #define SGPIO_PINS 3
0069 #define SGPIO_PORTS 8
0070
0071 struct ecx_plat_data {
0072 u32 n_ports;
0073
0074 u32 pre_clocks;
0075 u32 post_clocks;
0076 struct gpio_desc *sgpio_gpiod[SGPIO_PINS];
0077 u32 sgpio_pattern;
0078 u32 port_to_sgpio[SGPIO_PORTS];
0079 };
0080
0081 #define SGPIO_SIGNALS 3
0082 #define ECX_ACTIVITY_BITS 0x300000
0083 #define ECX_ACTIVITY_SHIFT 0
0084 #define ECX_LOCATE_BITS 0x80000
0085 #define ECX_LOCATE_SHIFT 1
0086 #define ECX_FAULT_BITS 0x400000
0087 #define ECX_FAULT_SHIFT 2
0088 static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
0089 u32 shift)
0090 {
0091 return 1 << (3 * pdata->port_to_sgpio[port] + shift);
0092 }
0093
0094 static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state)
0095 {
0096 if (state & ECX_ACTIVITY_BITS)
0097 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
0098 ECX_ACTIVITY_SHIFT);
0099 else
0100 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
0101 ECX_ACTIVITY_SHIFT);
0102 if (state & ECX_LOCATE_BITS)
0103 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
0104 ECX_LOCATE_SHIFT);
0105 else
0106 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
0107 ECX_LOCATE_SHIFT);
0108 if (state & ECX_FAULT_BITS)
0109 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
0110 ECX_FAULT_SHIFT);
0111 else
0112 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
0113 ECX_FAULT_SHIFT);
0114 }
0115
0116
0117
0118
0119
0120 static void ecx_led_cycle_clock(struct ecx_plat_data *pdata)
0121 {
0122 gpiod_set_value(pdata->sgpio_gpiod[SCLOCK], 1);
0123 udelay(50);
0124 gpiod_set_value(pdata->sgpio_gpiod[SCLOCK], 0);
0125 udelay(50);
0126 }
0127
0128 static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
0129 ssize_t size)
0130 {
0131 struct ahci_host_priv *hpriv = ap->host->private_data;
0132 struct ecx_plat_data *pdata = hpriv->plat_data;
0133 struct ahci_port_priv *pp = ap->private_data;
0134 unsigned long flags;
0135 int pmp, i;
0136 struct ahci_em_priv *emp;
0137 u32 sgpio_out;
0138
0139
0140 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
0141 if (pmp < EM_MAX_SLOTS)
0142 emp = &pp->em_priv[pmp];
0143 else
0144 return -EINVAL;
0145
0146 if (!(hpriv->em_msg_type & EM_MSG_TYPE_LED))
0147 return size;
0148
0149 spin_lock_irqsave(&sgpio_lock, flags);
0150 ecx_parse_sgpio(pdata, ap->port_no, state);
0151 sgpio_out = pdata->sgpio_pattern;
0152 for (i = 0; i < pdata->pre_clocks; i++)
0153 ecx_led_cycle_clock(pdata);
0154
0155 gpiod_set_value(pdata->sgpio_gpiod[SLOAD], 1);
0156 ecx_led_cycle_clock(pdata);
0157 gpiod_set_value(pdata->sgpio_gpiod[SLOAD], 0);
0158
0159
0160
0161
0162 for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) {
0163 gpiod_set_value(pdata->sgpio_gpiod[SDATA], sgpio_out & 1);
0164 sgpio_out >>= 1;
0165 ecx_led_cycle_clock(pdata);
0166 }
0167 for (i = 0; i < pdata->post_clocks; i++)
0168 ecx_led_cycle_clock(pdata);
0169
0170
0171 emp->led_state = state;
0172
0173 spin_unlock_irqrestore(&sgpio_lock, flags);
0174 return size;
0175 }
0176
0177 static void highbank_set_em_messages(struct device *dev,
0178 struct ahci_host_priv *hpriv,
0179 struct ata_port_info *pi)
0180 {
0181 struct device_node *np = dev->of_node;
0182 struct ecx_plat_data *pdata = hpriv->plat_data;
0183 int i;
0184
0185 for (i = 0; i < SGPIO_PINS; i++) {
0186 struct gpio_desc *gpiod;
0187
0188 gpiod = devm_gpiod_get_index(dev, "calxeda,sgpio", i,
0189 GPIOD_OUT_HIGH);
0190 if (IS_ERR(gpiod)) {
0191 dev_err(dev, "failed to get GPIO %d\n", i);
0192 continue;
0193 }
0194 gpiod_set_consumer_name(gpiod, "CX SGPIO");
0195
0196 pdata->sgpio_gpiod[i] = gpiod;
0197 }
0198 of_property_read_u32_array(np, "calxeda,led-order",
0199 pdata->port_to_sgpio,
0200 pdata->n_ports);
0201 if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks))
0202 pdata->pre_clocks = 0;
0203 if (of_property_read_u32(np, "calxeda,post-clocks",
0204 &pdata->post_clocks))
0205 pdata->post_clocks = 0;
0206
0207
0208 hpriv->em_loc = 0;
0209 hpriv->em_buf_sz = 4;
0210 hpriv->em_msg_type = EM_MSG_TYPE_LED;
0211 pi->flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY;
0212 }
0213
0214 static u32 __combo_phy_reg_read(u8 sata_port, u32 addr)
0215 {
0216 u32 data;
0217 u8 dev = port_data[sata_port].phy_devs;
0218 spin_lock(&cphy_lock);
0219 writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
0220 data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr));
0221 spin_unlock(&cphy_lock);
0222 return data;
0223 }
0224
0225 static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data)
0226 {
0227 u8 dev = port_data[sata_port].phy_devs;
0228 spin_lock(&cphy_lock);
0229 writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
0230 writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr));
0231 spin_unlock(&cphy_lock);
0232 }
0233
0234 static void combo_phy_wait_for_ready(u8 sata_port)
0235 {
0236 while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY)
0237 udelay(5);
0238 }
0239
0240 static u32 combo_phy_read(u8 sata_port, u32 addr)
0241 {
0242 combo_phy_wait_for_ready(sata_port);
0243 __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
0244 __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START);
0245 combo_phy_wait_for_ready(sata_port);
0246 return __combo_phy_reg_read(sata_port, SERDES_CR_DATA);
0247 }
0248
0249 static void combo_phy_write(u8 sata_port, u32 addr, u32 data)
0250 {
0251 combo_phy_wait_for_ready(sata_port);
0252 __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
0253 __combo_phy_reg_write(sata_port, SERDES_CR_DATA, data);
0254 __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START);
0255 }
0256
0257 static void highbank_cphy_disable_overrides(u8 sata_port)
0258 {
0259 u8 lane = port_data[sata_port].lane_mapping;
0260 u32 tmp;
0261 if (unlikely(port_data[sata_port].phy_base == NULL))
0262 return;
0263 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
0264 tmp &= ~CPHY_SATA_RX_OVERRIDE;
0265 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
0266 }
0267
0268 static void cphy_override_tx_attenuation(u8 sata_port, u32 val)
0269 {
0270 u8 lane = port_data[sata_port].lane_mapping;
0271 u32 tmp;
0272
0273 if (val & 0x8)
0274 return;
0275
0276 tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE);
0277 tmp &= ~CPHY_SATA_TX_OVERRIDE;
0278 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
0279
0280 tmp |= CPHY_SATA_TX_OVERRIDE;
0281 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
0282
0283 tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN;
0284 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
0285 }
0286
0287 static void cphy_override_rx_mode(u8 sata_port, u32 val)
0288 {
0289 u8 lane = port_data[sata_port].lane_mapping;
0290 u32 tmp;
0291 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
0292 tmp &= ~CPHY_SATA_RX_OVERRIDE;
0293 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
0294
0295 tmp |= CPHY_SATA_RX_OVERRIDE;
0296 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
0297
0298 tmp &= ~CPHY_SATA_DPLL_MODE;
0299 tmp |= val << CPHY_SATA_DPLL_SHIFT;
0300 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
0301
0302 tmp |= CPHY_SATA_DPLL_RESET;
0303 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
0304
0305 tmp &= ~CPHY_SATA_DPLL_RESET;
0306 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
0307
0308 msleep(15);
0309 }
0310
0311 static void highbank_cphy_override_lane(u8 sata_port)
0312 {
0313 u8 lane = port_data[sata_port].lane_mapping;
0314 u32 tmp, k = 0;
0315
0316 if (unlikely(port_data[sata_port].phy_base == NULL))
0317 return;
0318 do {
0319 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS +
0320 lane * SPHY_LANE);
0321 } while ((tmp & SPHY_HALF_RATE) && (k++ < 1000));
0322 cphy_override_rx_mode(sata_port, 3);
0323 cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten);
0324 }
0325
0326 static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
0327 {
0328 struct device_node *sata_node = dev->of_node;
0329 int phy_count = 0, phy, port = 0, i;
0330 void __iomem *cphy_base[CPHY_PHY_COUNT] = {};
0331 struct device_node *phy_nodes[CPHY_PHY_COUNT] = {};
0332 u32 tx_atten[CPHY_PORT_COUNT] = {};
0333
0334 memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
0335
0336 do {
0337 u32 tmp;
0338 struct of_phandle_args phy_data;
0339 if (of_parse_phandle_with_args(sata_node,
0340 "calxeda,port-phys", "#phy-cells",
0341 port, &phy_data))
0342 break;
0343 for (phy = 0; phy < phy_count; phy++) {
0344 if (phy_nodes[phy] == phy_data.np)
0345 break;
0346 }
0347 if (phy_nodes[phy] == NULL) {
0348 phy_nodes[phy] = phy_data.np;
0349 cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
0350 if (cphy_base[phy] == NULL) {
0351 return 0;
0352 }
0353 phy_count += 1;
0354 }
0355 port_data[port].lane_mapping = phy_data.args[0];
0356 of_property_read_u32(phy_nodes[phy], "phydev", &tmp);
0357 port_data[port].phy_devs = tmp;
0358 port_data[port].phy_base = cphy_base[phy];
0359 of_node_put(phy_data.np);
0360 port += 1;
0361 } while (port < CPHY_PORT_COUNT);
0362 of_property_read_u32_array(sata_node, "calxeda,tx-atten",
0363 tx_atten, port);
0364 for (i = 0; i < port; i++)
0365 port_data[i].tx_atten = (u8) tx_atten[i];
0366 return 0;
0367 }
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385 static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
0386 unsigned long deadline)
0387 {
0388 static const unsigned long timing[] = { 5, 100, 500};
0389 struct ata_port *ap = link->ap;
0390 struct ahci_port_priv *pp = ap->private_data;
0391 struct ahci_host_priv *hpriv = ap->host->private_data;
0392 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
0393 struct ata_taskfile tf;
0394 bool online;
0395 u32 sstatus;
0396 int rc;
0397 int retry = 100;
0398
0399 hpriv->stop_engine(ap);
0400
0401
0402 ata_tf_init(link->device, &tf);
0403 tf.status = ATA_BUSY;
0404 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
0405
0406 do {
0407 highbank_cphy_disable_overrides(link->ap->port_no);
0408 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
0409 highbank_cphy_override_lane(link->ap->port_no);
0410
0411
0412
0413
0414 if (sata_scr_read(link, SCR_STATUS, &sstatus))
0415 break;
0416 if (!(sstatus & 0x3))
0417 break;
0418 } while (!online && retry--);
0419
0420 hpriv->start_engine(ap);
0421
0422 if (online)
0423 *class = ahci_dev_classify(ap);
0424
0425 return rc;
0426 }
0427
0428 static struct ata_port_operations ahci_highbank_ops = {
0429 .inherits = &ahci_ops,
0430 .hardreset = ahci_highbank_hardreset,
0431 .transmit_led_message = ecx_transmit_led_message,
0432 };
0433
0434 static const struct ata_port_info ahci_highbank_port_info = {
0435 .flags = AHCI_FLAG_COMMON,
0436 .pio_mask = ATA_PIO4,
0437 .udma_mask = ATA_UDMA6,
0438 .port_ops = &ahci_highbank_ops,
0439 };
0440
0441 static struct scsi_host_template ahci_highbank_platform_sht = {
0442 AHCI_SHT("sata_highbank"),
0443 };
0444
0445 static const struct of_device_id ahci_of_match[] = {
0446 { .compatible = "calxeda,hb-ahci" },
0447 { }
0448 };
0449 MODULE_DEVICE_TABLE(of, ahci_of_match);
0450
0451 static int ahci_highbank_probe(struct platform_device *pdev)
0452 {
0453 struct device *dev = &pdev->dev;
0454 struct ahci_host_priv *hpriv;
0455 struct ecx_plat_data *pdata;
0456 struct ata_host *host;
0457 struct resource *mem;
0458 int irq;
0459 int i;
0460 int rc;
0461 u32 n_ports;
0462 struct ata_port_info pi = ahci_highbank_port_info;
0463 const struct ata_port_info *ppi[] = { &pi, NULL };
0464
0465 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0466 if (!mem) {
0467 dev_err(dev, "no mmio space\n");
0468 return -EINVAL;
0469 }
0470
0471 irq = platform_get_irq(pdev, 0);
0472 if (irq < 0)
0473 return irq;
0474 if (!irq)
0475 return -EINVAL;
0476
0477 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
0478 if (!hpriv) {
0479 dev_err(dev, "can't alloc ahci_host_priv\n");
0480 return -ENOMEM;
0481 }
0482 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
0483 if (!pdata) {
0484 dev_err(dev, "can't alloc ecx_plat_data\n");
0485 return -ENOMEM;
0486 }
0487
0488 hpriv->irq = irq;
0489 hpriv->flags |= (unsigned long)pi.private_data;
0490
0491 hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
0492 if (!hpriv->mmio) {
0493 dev_err(dev, "can't map %pR\n", mem);
0494 return -ENOMEM;
0495 }
0496
0497 rc = highbank_initialize_phys(dev, hpriv->mmio);
0498 if (rc)
0499 return rc;
0500
0501
0502 ahci_save_initial_config(dev, hpriv);
0503
0504
0505 if (hpriv->cap & HOST_CAP_NCQ)
0506 pi.flags |= ATA_FLAG_NCQ;
0507
0508 if (hpriv->cap & HOST_CAP_PMP)
0509 pi.flags |= ATA_FLAG_PMP;
0510
0511 if (hpriv->cap & HOST_CAP_64)
0512 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
0513
0514
0515
0516
0517
0518
0519 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
0520
0521 pdata->n_ports = n_ports;
0522 hpriv->plat_data = pdata;
0523 highbank_set_em_messages(dev, hpriv, &pi);
0524
0525 host = ata_host_alloc_pinfo(dev, ppi, n_ports);
0526 if (!host) {
0527 rc = -ENOMEM;
0528 goto err0;
0529 }
0530
0531 host->private_data = hpriv;
0532
0533 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
0534 host->flags |= ATA_HOST_PARALLEL_SCAN;
0535
0536 for (i = 0; i < host->n_ports; i++) {
0537 struct ata_port *ap = host->ports[i];
0538
0539 ata_port_desc(ap, "mmio %pR", mem);
0540 ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
0541
0542
0543 if (ap->flags & ATA_FLAG_EM)
0544 ap->em_message_type = hpriv->em_msg_type;
0545
0546
0547 if (!(hpriv->port_map & (1 << i)))
0548 ap->ops = &ata_dummy_port_ops;
0549 }
0550
0551 rc = ahci_reset_controller(host);
0552 if (rc)
0553 goto err0;
0554
0555 ahci_init_controller(host);
0556 ahci_print_info(host, "platform");
0557
0558 rc = ahci_host_activate(host, &ahci_highbank_platform_sht);
0559 if (rc)
0560 goto err0;
0561
0562 return 0;
0563 err0:
0564 return rc;
0565 }
0566
0567 #ifdef CONFIG_PM_SLEEP
0568 static int ahci_highbank_suspend(struct device *dev)
0569 {
0570 struct ata_host *host = dev_get_drvdata(dev);
0571 struct ahci_host_priv *hpriv = host->private_data;
0572 void __iomem *mmio = hpriv->mmio;
0573 u32 ctl;
0574
0575 if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
0576 dev_err(dev, "firmware update required for suspend/resume\n");
0577 return -EIO;
0578 }
0579
0580
0581
0582
0583
0584
0585 ctl = readl(mmio + HOST_CTL);
0586 ctl &= ~HOST_IRQ_EN;
0587 writel(ctl, mmio + HOST_CTL);
0588 readl(mmio + HOST_CTL);
0589
0590 ata_host_suspend(host, PMSG_SUSPEND);
0591 return 0;
0592 }
0593
0594 static int ahci_highbank_resume(struct device *dev)
0595 {
0596 struct ata_host *host = dev_get_drvdata(dev);
0597 int rc;
0598
0599 if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
0600 rc = ahci_reset_controller(host);
0601 if (rc)
0602 return rc;
0603
0604 ahci_init_controller(host);
0605 }
0606
0607 ata_host_resume(host);
0608
0609 return 0;
0610 }
0611 #endif
0612
0613 static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
0614 ahci_highbank_suspend, ahci_highbank_resume);
0615
0616 static struct platform_driver ahci_highbank_driver = {
0617 .remove = ata_platform_remove_one,
0618 .driver = {
0619 .name = "highbank-ahci",
0620 .of_match_table = ahci_of_match,
0621 .pm = &ahci_highbank_pm_ops,
0622 },
0623 .probe = ahci_highbank_probe,
0624 };
0625
0626 module_platform_driver(ahci_highbank_driver);
0627
0628 MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver");
0629 MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
0630 MODULE_LICENSE("GPL");
0631 MODULE_ALIAS("sata:highbank");