Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* pci_fire.c: Sun4u platform PCI-E controller support.
0003  *
0004  * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
0005  */
0006 #include <linux/kernel.h>
0007 #include <linux/pci.h>
0008 #include <linux/slab.h>
0009 #include <linux/init.h>
0010 #include <linux/msi.h>
0011 #include <linux/export.h>
0012 #include <linux/irq.h>
0013 #include <linux/of_device.h>
0014 #include <linux/numa.h>
0015 
0016 #include <asm/prom.h>
0017 #include <asm/irq.h>
0018 #include <asm/upa.h>
0019 
0020 #include "pci_impl.h"
0021 
0022 #define DRIVER_NAME "fire"
0023 #define PFX     DRIVER_NAME ": "
0024 
0025 #define FIRE_IOMMU_CONTROL  0x40000UL
0026 #define FIRE_IOMMU_TSBBASE  0x40008UL
0027 #define FIRE_IOMMU_FLUSH    0x40100UL
0028 #define FIRE_IOMMU_FLUSHINV 0x40108UL
0029 
0030 static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
0031 {
0032     struct iommu *iommu = pbm->iommu;
0033     u32 vdma[2], dma_mask;
0034     u64 control;
0035     int tsbsize, err;
0036 
0037     /* No virtual-dma property on these guys, use largest size.  */
0038     vdma[0] = 0xc0000000; /* base */
0039     vdma[1] = 0x40000000; /* size */
0040     dma_mask = 0xffffffff;
0041     tsbsize = 128;
0042 
0043     /* Register addresses. */
0044     iommu->iommu_control  = pbm->pbm_regs + FIRE_IOMMU_CONTROL;
0045     iommu->iommu_tsbbase  = pbm->pbm_regs + FIRE_IOMMU_TSBBASE;
0046     iommu->iommu_flush    = pbm->pbm_regs + FIRE_IOMMU_FLUSH;
0047     iommu->iommu_flushinv = pbm->pbm_regs + FIRE_IOMMU_FLUSHINV;
0048 
0049     /* We use the main control/status register of FIRE as the write
0050      * completion register.
0051      */
0052     iommu->write_complete_reg = pbm->controller_regs + 0x410000UL;
0053 
0054     /*
0055      * Invalidate TLB Entries.
0056      */
0057     upa_writeq(~(u64)0, iommu->iommu_flushinv);
0058 
0059     err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask,
0060                    pbm->numa_node);
0061     if (err)
0062         return err;
0063 
0064     upa_writeq(__pa(iommu->page_table) | 0x7UL, iommu->iommu_tsbbase);
0065 
0066     control = upa_readq(iommu->iommu_control);
0067     control |= (0x00000400 /* TSB cache snoop enable */ |
0068             0x00000300 /* Cache mode */         |
0069             0x00000002 /* Bypass enable */      |
0070             0x00000001 /* Translation enable */);
0071     upa_writeq(control, iommu->iommu_control);
0072 
0073     return 0;
0074 }
0075 
0076 #ifdef CONFIG_PCI_MSI
0077 struct pci_msiq_entry {
0078     u64     word0;
0079 #define MSIQ_WORD0_RESV         0x8000000000000000UL
0080 #define MSIQ_WORD0_FMT_TYPE     0x7f00000000000000UL
0081 #define MSIQ_WORD0_FMT_TYPE_SHIFT   56
0082 #define MSIQ_WORD0_LEN          0x00ffc00000000000UL
0083 #define MSIQ_WORD0_LEN_SHIFT        46
0084 #define MSIQ_WORD0_ADDR0        0x00003fff00000000UL
0085 #define MSIQ_WORD0_ADDR0_SHIFT      32
0086 #define MSIQ_WORD0_RID          0x00000000ffff0000UL
0087 #define MSIQ_WORD0_RID_SHIFT        16
0088 #define MSIQ_WORD0_DATA0        0x000000000000ffffUL
0089 #define MSIQ_WORD0_DATA0_SHIFT      0
0090 
0091 #define MSIQ_TYPE_MSG           0x6
0092 #define MSIQ_TYPE_MSI32         0xb
0093 #define MSIQ_TYPE_MSI64         0xf
0094 
0095     u64     word1;
0096 #define MSIQ_WORD1_ADDR1        0xffffffffffff0000UL
0097 #define MSIQ_WORD1_ADDR1_SHIFT      16
0098 #define MSIQ_WORD1_DATA1        0x000000000000ffffUL
0099 #define MSIQ_WORD1_DATA1_SHIFT      0
0100 
0101     u64     resv[6];
0102 };
0103 
0104 /* All MSI registers are offset from pbm->pbm_regs */
0105 #define EVENT_QUEUE_BASE_ADDR_REG   0x010000UL
0106 #define  EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL
0107 
0108 #define EVENT_QUEUE_CONTROL_SET(EQ) (0x011000UL + (EQ) * 0x8UL)
0109 #define  EVENT_QUEUE_CONTROL_SET_OFLOW  0x0200000000000000UL
0110 #define  EVENT_QUEUE_CONTROL_SET_EN 0x0000100000000000UL
0111 
0112 #define EVENT_QUEUE_CONTROL_CLEAR(EQ)   (0x011200UL + (EQ) * 0x8UL)
0113 #define  EVENT_QUEUE_CONTROL_CLEAR_OF   0x0200000000000000UL
0114 #define  EVENT_QUEUE_CONTROL_CLEAR_E2I  0x0000800000000000UL
0115 #define  EVENT_QUEUE_CONTROL_CLEAR_DIS  0x0000100000000000UL
0116 
0117 #define EVENT_QUEUE_STATE(EQ)       (0x011400UL + (EQ) * 0x8UL)
0118 #define  EVENT_QUEUE_STATE_MASK     0x0000000000000007UL
0119 #define  EVENT_QUEUE_STATE_IDLE     0x0000000000000001UL
0120 #define  EVENT_QUEUE_STATE_ACTIVE   0x0000000000000002UL
0121 #define  EVENT_QUEUE_STATE_ERROR    0x0000000000000004UL
0122 
0123 #define EVENT_QUEUE_TAIL(EQ)        (0x011600UL + (EQ) * 0x8UL)
0124 #define  EVENT_QUEUE_TAIL_OFLOW     0x0200000000000000UL
0125 #define  EVENT_QUEUE_TAIL_VAL       0x000000000000007fUL
0126 
0127 #define EVENT_QUEUE_HEAD(EQ)        (0x011800UL + (EQ) * 0x8UL)
0128 #define  EVENT_QUEUE_HEAD_VAL       0x000000000000007fUL
0129 
0130 #define MSI_MAP(MSI)            (0x020000UL + (MSI) * 0x8UL)
0131 #define  MSI_MAP_VALID          0x8000000000000000UL
0132 #define  MSI_MAP_EQWR_N         0x4000000000000000UL
0133 #define  MSI_MAP_EQNUM          0x000000000000003fUL
0134 
0135 #define MSI_CLEAR(MSI)          (0x028000UL + (MSI) * 0x8UL)
0136 #define  MSI_CLEAR_EQWR_N       0x4000000000000000UL
0137 
0138 #define IMONDO_DATA0            0x02C000UL
0139 #define  IMONDO_DATA0_DATA      0xffffffffffffffc0UL
0140 
0141 #define IMONDO_DATA1            0x02C008UL
0142 #define  IMONDO_DATA1_DATA      0xffffffffffffffffUL
0143 
0144 #define MSI_32BIT_ADDR          0x034000UL
0145 #define  MSI_32BIT_ADDR_VAL     0x00000000ffff0000UL
0146 
0147 #define MSI_64BIT_ADDR          0x034008UL
0148 #define  MSI_64BIT_ADDR_VAL     0xffffffffffff0000UL
0149 
0150 static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
0151                  unsigned long *head)
0152 {
0153     *head = upa_readq(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
0154     return 0;
0155 }
0156 
0157 static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid,
0158                 unsigned long *head, unsigned long *msi)
0159 {
0160     unsigned long type_fmt, type, msi_num;
0161     struct pci_msiq_entry *base, *ep;
0162 
0163     base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192));
0164     ep = &base[*head];
0165 
0166     if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0)
0167         return 0;
0168 
0169     type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >>
0170             MSIQ_WORD0_FMT_TYPE_SHIFT);
0171     type = (type_fmt >> 3);
0172     if (unlikely(type != MSIQ_TYPE_MSI32 &&
0173              type != MSIQ_TYPE_MSI64))
0174         return -EINVAL;
0175 
0176     *msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >>
0177               MSIQ_WORD0_DATA0_SHIFT);
0178 
0179     upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi_num));
0180 
0181     /* Clear the entry.  */
0182     ep->word0 &= ~MSIQ_WORD0_FMT_TYPE;
0183 
0184     /* Go to next entry in ring.  */
0185     (*head)++;
0186     if (*head >= pbm->msiq_ent_count)
0187         *head = 0;
0188 
0189     return 1;
0190 }
0191 
0192 static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
0193                  unsigned long head)
0194 {
0195     upa_writeq(head, pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
0196     return 0;
0197 }
0198 
0199 static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
0200                   unsigned long msi, int is_msi64)
0201 {
0202     u64 val;
0203 
0204     val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
0205     val &= ~(MSI_MAP_EQNUM);
0206     val |= msiqid;
0207     upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
0208 
0209     upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi));
0210 
0211     val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
0212     val |= MSI_MAP_VALID;
0213     upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
0214 
0215     return 0;
0216 }
0217 
0218 static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
0219 {
0220     u64 val;
0221 
0222     val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
0223 
0224     val &= ~MSI_MAP_VALID;
0225 
0226     upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
0227 
0228     return 0;
0229 }
0230 
0231 static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm)
0232 {
0233     unsigned long pages, order, i;
0234 
0235     order = get_order(512 * 1024);
0236     pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
0237     if (pages == 0UL) {
0238         printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
0239                order);
0240         return -ENOMEM;
0241     }
0242     memset((char *)pages, 0, PAGE_SIZE << order);
0243     pbm->msi_queues = (void *) pages;
0244 
0245     upa_writeq((EVENT_QUEUE_BASE_ADDR_ALL_ONES |
0246             __pa(pbm->msi_queues)),
0247            pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG);
0248 
0249     upa_writeq(pbm->portid << 6, pbm->pbm_regs + IMONDO_DATA0);
0250     upa_writeq(0, pbm->pbm_regs + IMONDO_DATA1);
0251 
0252     upa_writeq(pbm->msi32_start, pbm->pbm_regs + MSI_32BIT_ADDR);
0253     upa_writeq(pbm->msi64_start, pbm->pbm_regs + MSI_64BIT_ADDR);
0254 
0255     for (i = 0; i < pbm->msiq_num; i++) {
0256         upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_HEAD(i));
0257         upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_TAIL(i));
0258     }
0259 
0260     return 0;
0261 }
0262 
0263 static void pci_fire_msiq_free(struct pci_pbm_info *pbm)
0264 {
0265     unsigned long pages, order;
0266 
0267     order = get_order(512 * 1024);
0268     pages = (unsigned long) pbm->msi_queues;
0269 
0270     free_pages(pages, order);
0271 
0272     pbm->msi_queues = NULL;
0273 }
0274 
0275 static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
0276                    unsigned long msiqid,
0277                    unsigned long devino)
0278 {
0279     unsigned long cregs = (unsigned long) pbm->pbm_regs;
0280     unsigned long imap_reg, iclr_reg, int_ctrlr;
0281     unsigned int irq;
0282     int fixup;
0283     u64 val;
0284 
0285     imap_reg = cregs + (0x001000UL + (devino * 0x08UL));
0286     iclr_reg = cregs + (0x001400UL + (devino * 0x08UL));
0287 
0288     /* XXX iterate amongst the 4 IRQ controllers XXX */
0289     int_ctrlr = (1UL << 6);
0290 
0291     val = upa_readq(imap_reg);
0292     val |= (1UL << 63) | int_ctrlr;
0293     upa_writeq(val, imap_reg);
0294 
0295     fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
0296 
0297     irq = build_irq(fixup, iclr_reg, imap_reg);
0298     if (!irq)
0299         return -ENOMEM;
0300 
0301     upa_writeq(EVENT_QUEUE_CONTROL_SET_EN,
0302            pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid));
0303 
0304     return irq;
0305 }
0306 
0307 static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
0308     .get_head   =   pci_fire_get_head,
0309     .dequeue_msi    =   pci_fire_dequeue_msi,
0310     .set_head   =   pci_fire_set_head,
0311     .msi_setup  =   pci_fire_msi_setup,
0312     .msi_teardown   =   pci_fire_msi_teardown,
0313     .msiq_alloc =   pci_fire_msiq_alloc,
0314     .msiq_free  =   pci_fire_msiq_free,
0315     .msiq_build_irq =   pci_fire_msiq_build_irq,
0316 };
0317 
0318 static void pci_fire_msi_init(struct pci_pbm_info *pbm)
0319 {
0320     sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops);
0321 }
0322 #else /* CONFIG_PCI_MSI */
0323 static void pci_fire_msi_init(struct pci_pbm_info *pbm)
0324 {
0325 }
0326 #endif /* !(CONFIG_PCI_MSI) */
0327 
0328 /* Based at pbm->controller_regs */
0329 #define FIRE_PARITY_CONTROL 0x470010UL
0330 #define  FIRE_PARITY_ENAB   0x8000000000000000UL
0331 #define FIRE_FATAL_RESET_CTL    0x471028UL
0332 #define  FIRE_FATAL_RESET_SPARE 0x0000000004000000UL
0333 #define  FIRE_FATAL_RESET_MB    0x0000000002000000UL
0334 #define  FIRE_FATAL_RESET_CPE   0x0000000000008000UL
0335 #define  FIRE_FATAL_RESET_APE   0x0000000000004000UL
0336 #define  FIRE_FATAL_RESET_PIO   0x0000000000000040UL
0337 #define  FIRE_FATAL_RESET_JW    0x0000000000000004UL
0338 #define  FIRE_FATAL_RESET_JI    0x0000000000000002UL
0339 #define  FIRE_FATAL_RESET_JR    0x0000000000000001UL
0340 #define FIRE_CORE_INTR_ENABLE   0x471800UL
0341 
0342 /* Based at pbm->pbm_regs */
0343 #define FIRE_TLU_CTRL       0x80000UL
0344 #define  FIRE_TLU_CTRL_TIM  0x00000000da000000UL
0345 #define  FIRE_TLU_CTRL_QDET 0x0000000000000100UL
0346 #define  FIRE_TLU_CTRL_CFG  0x0000000000000001UL
0347 #define FIRE_TLU_DEV_CTRL   0x90008UL
0348 #define FIRE_TLU_LINK_CTRL  0x90020UL
0349 #define FIRE_TLU_LINK_CTRL_CLK  0x0000000000000040UL
0350 #define FIRE_LPU_RESET      0xe2008UL
0351 #define FIRE_LPU_LLCFG      0xe2200UL
0352 #define  FIRE_LPU_LLCFG_VC0 0x0000000000000100UL
0353 #define FIRE_LPU_FCTRL_UCTRL    0xe2240UL
0354 #define  FIRE_LPU_FCTRL_UCTRL_N 0x0000000000000002UL
0355 #define  FIRE_LPU_FCTRL_UCTRL_P 0x0000000000000001UL
0356 #define FIRE_LPU_TXL_FIFOP  0xe2430UL
0357 #define FIRE_LPU_LTSSM_CFG2 0xe2788UL
0358 #define FIRE_LPU_LTSSM_CFG3 0xe2790UL
0359 #define FIRE_LPU_LTSSM_CFG4 0xe2798UL
0360 #define FIRE_LPU_LTSSM_CFG5 0xe27a0UL
0361 #define FIRE_DMC_IENAB      0x31800UL
0362 #define FIRE_DMC_DBG_SEL_A  0x53000UL
0363 #define FIRE_DMC_DBG_SEL_B  0x53008UL
0364 #define FIRE_PEC_IENAB      0x51800UL
0365 
0366 static void pci_fire_hw_init(struct pci_pbm_info *pbm)
0367 {
0368     u64 val;
0369 
0370     upa_writeq(FIRE_PARITY_ENAB,
0371            pbm->controller_regs + FIRE_PARITY_CONTROL);
0372 
0373     upa_writeq((FIRE_FATAL_RESET_SPARE |
0374             FIRE_FATAL_RESET_MB |
0375             FIRE_FATAL_RESET_CPE |
0376             FIRE_FATAL_RESET_APE |
0377             FIRE_FATAL_RESET_PIO |
0378             FIRE_FATAL_RESET_JW |
0379             FIRE_FATAL_RESET_JI |
0380             FIRE_FATAL_RESET_JR),
0381            pbm->controller_regs + FIRE_FATAL_RESET_CTL);
0382 
0383     upa_writeq(~(u64)0, pbm->controller_regs + FIRE_CORE_INTR_ENABLE);
0384 
0385     val = upa_readq(pbm->pbm_regs + FIRE_TLU_CTRL);
0386     val |= (FIRE_TLU_CTRL_TIM |
0387         FIRE_TLU_CTRL_QDET |
0388         FIRE_TLU_CTRL_CFG);
0389     upa_writeq(val, pbm->pbm_regs + FIRE_TLU_CTRL);
0390     upa_writeq(0, pbm->pbm_regs + FIRE_TLU_DEV_CTRL);
0391     upa_writeq(FIRE_TLU_LINK_CTRL_CLK,
0392            pbm->pbm_regs + FIRE_TLU_LINK_CTRL);
0393 
0394     upa_writeq(0, pbm->pbm_regs + FIRE_LPU_RESET);
0395     upa_writeq(FIRE_LPU_LLCFG_VC0, pbm->pbm_regs + FIRE_LPU_LLCFG);
0396     upa_writeq((FIRE_LPU_FCTRL_UCTRL_N | FIRE_LPU_FCTRL_UCTRL_P),
0397            pbm->pbm_regs + FIRE_LPU_FCTRL_UCTRL);
0398     upa_writeq(((0xffff << 16) | (0x0000 << 0)),
0399            pbm->pbm_regs + FIRE_LPU_TXL_FIFOP);
0400     upa_writeq(3000000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG2);
0401     upa_writeq(500000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG3);
0402     upa_writeq((2 << 16) | (140 << 8),
0403            pbm->pbm_regs + FIRE_LPU_LTSSM_CFG4);
0404     upa_writeq(0, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG5);
0405 
0406     upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_DMC_IENAB);
0407     upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_A);
0408     upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_B);
0409 
0410     upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_PEC_IENAB);
0411 }
0412 
0413 static int pci_fire_pbm_init(struct pci_pbm_info *pbm,
0414                  struct platform_device *op, u32 portid)
0415 {
0416     const struct linux_prom64_registers *regs;
0417     struct device_node *dp = op->dev.of_node;
0418     int err;
0419 
0420     pbm->numa_node = NUMA_NO_NODE;
0421 
0422     pbm->pci_ops = &sun4u_pci_ops;
0423     pbm->config_space_reg_bits = 12;
0424 
0425     pbm->index = pci_num_pbms++;
0426 
0427     pbm->portid = portid;
0428     pbm->op = op;
0429     pbm->name = dp->full_name;
0430 
0431     regs = of_get_property(dp, "reg", NULL);
0432     pbm->pbm_regs = regs[0].phys_addr;
0433     pbm->controller_regs = regs[1].phys_addr - 0x410000UL;
0434 
0435     printk("%s: SUN4U PCIE Bus Module\n", pbm->name);
0436 
0437     pci_determine_mem_io_space(pbm);
0438 
0439     pci_get_pbm_props(pbm);
0440 
0441     pci_fire_hw_init(pbm);
0442 
0443     err = pci_fire_pbm_iommu_init(pbm);
0444     if (err)
0445         return err;
0446 
0447     pci_fire_msi_init(pbm);
0448 
0449     pbm->pci_bus = pci_scan_one_pbm(pbm, &op->dev);
0450 
0451     /* XXX register error interrupt handlers XXX */
0452 
0453     pbm->next = pci_pbm_root;
0454     pci_pbm_root = pbm;
0455 
0456     return 0;
0457 }
0458 
0459 static int fire_probe(struct platform_device *op)
0460 {
0461     struct device_node *dp = op->dev.of_node;
0462     struct pci_pbm_info *pbm;
0463     struct iommu *iommu;
0464     u32 portid;
0465     int err;
0466 
0467     portid = of_getintprop_default(dp, "portid", 0xff);
0468 
0469     err = -ENOMEM;
0470     pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
0471     if (!pbm) {
0472         printk(KERN_ERR PFX "Cannot allocate pci_pbminfo.\n");
0473         goto out_err;
0474     }
0475 
0476     iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
0477     if (!iommu) {
0478         printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n");
0479         goto out_free_controller;
0480     }
0481 
0482     pbm->iommu = iommu;
0483 
0484     err = pci_fire_pbm_init(pbm, op, portid);
0485     if (err)
0486         goto out_free_iommu;
0487 
0488     dev_set_drvdata(&op->dev, pbm);
0489 
0490     return 0;
0491 
0492 out_free_iommu:
0493     kfree(pbm->iommu);
0494             
0495 out_free_controller:
0496     kfree(pbm);
0497 
0498 out_err:
0499     return err;
0500 }
0501 
0502 static const struct of_device_id fire_match[] = {
0503     {
0504         .name = "pci",
0505         .compatible = "pciex108e,80f0",
0506     },
0507     {},
0508 };
0509 
0510 static struct platform_driver fire_driver = {
0511     .driver = {
0512         .name = DRIVER_NAME,
0513         .of_match_table = fire_match,
0514     },
0515     .probe      = fire_probe,
0516 };
0517 
0518 static int __init fire_init(void)
0519 {
0520     return platform_driver_register(&fire_driver);
0521 }
0522 
0523 subsys_initcall(fire_init);