Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Driver for the HP iLO management processor.
0004  *
0005  * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
0006  *  David Altobelli <david.altobelli@hpe.com>
0007  */
0008 #include <linux/kernel.h>
0009 #include <linux/types.h>
0010 #include <linux/module.h>
0011 #include <linux/fs.h>
0012 #include <linux/pci.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/ioport.h>
0015 #include <linux/device.h>
0016 #include <linux/file.h>
0017 #include <linux/cdev.h>
0018 #include <linux/sched.h>
0019 #include <linux/spinlock.h>
0020 #include <linux/delay.h>
0021 #include <linux/uaccess.h>
0022 #include <linux/io.h>
0023 #include <linux/wait.h>
0024 #include <linux/poll.h>
0025 #include <linux/slab.h>
0026 #include "hpilo.h"
0027 
0028 static struct class *ilo_class;
0029 static unsigned int ilo_major;
0030 static unsigned int max_ccb = 16;
0031 static char ilo_hwdev[MAX_ILO_DEV];
0032 static const struct pci_device_id ilo_blacklist[] = {
0033     /* auxiliary iLO */
0034     {PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP, 0x1979)},
0035     /* CL */
0036     {PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP_3PAR, 0x0289)},
0037     {}
0038 };
0039 
0040 static inline int get_entry_id(int entry)
0041 {
0042     return (entry & ENTRY_MASK_DESCRIPTOR) >> ENTRY_BITPOS_DESCRIPTOR;
0043 }
0044 
0045 static inline int get_entry_len(int entry)
0046 {
0047     return ((entry & ENTRY_MASK_QWORDS) >> ENTRY_BITPOS_QWORDS) << 3;
0048 }
0049 
0050 static inline int mk_entry(int id, int len)
0051 {
0052     int qlen = len & 7 ? (len >> 3) + 1 : len >> 3;
0053     return id << ENTRY_BITPOS_DESCRIPTOR | qlen << ENTRY_BITPOS_QWORDS;
0054 }
0055 
0056 static inline int desc_mem_sz(int nr_entry)
0057 {
0058     return nr_entry << L2_QENTRY_SZ;
0059 }
0060 
0061 /*
0062  * FIFO queues, shared with hardware.
0063  *
0064  * If a queue has empty slots, an entry is added to the queue tail,
0065  * and that entry is marked as occupied.
0066  * Entries can be dequeued from the head of the list, when the device
0067  * has marked the entry as consumed.
0068  *
0069  * Returns true on successful queue/dequeue, false on failure.
0070  */
0071 static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry)
0072 {
0073     struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
0074     unsigned long flags;
0075     int ret = 0;
0076 
0077     spin_lock_irqsave(&hw->fifo_lock, flags);
0078     if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask]
0079           & ENTRY_MASK_O)) {
0080         fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |=
0081                 (entry & ENTRY_MASK_NOSTATE) | fifo_q->merge;
0082         fifo_q->tail += 1;
0083         ret = 1;
0084     }
0085     spin_unlock_irqrestore(&hw->fifo_lock, flags);
0086 
0087     return ret;
0088 }
0089 
0090 static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry)
0091 {
0092     struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
0093     unsigned long flags;
0094     int ret = 0;
0095     u64 c;
0096 
0097     spin_lock_irqsave(&hw->fifo_lock, flags);
0098     c = fifo_q->fifobar[fifo_q->head & fifo_q->imask];
0099     if (c & ENTRY_MASK_C) {
0100         if (entry)
0101             *entry = c & ENTRY_MASK_NOSTATE;
0102 
0103         fifo_q->fifobar[fifo_q->head & fifo_q->imask] =
0104                             (c | ENTRY_MASK) + 1;
0105         fifo_q->head += 1;
0106         ret = 1;
0107     }
0108     spin_unlock_irqrestore(&hw->fifo_lock, flags);
0109 
0110     return ret;
0111 }
0112 
0113 static int fifo_check_recv(struct ilo_hwinfo *hw, char *fifobar)
0114 {
0115     struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
0116     unsigned long flags;
0117     int ret = 0;
0118     u64 c;
0119 
0120     spin_lock_irqsave(&hw->fifo_lock, flags);
0121     c = fifo_q->fifobar[fifo_q->head & fifo_q->imask];
0122     if (c & ENTRY_MASK_C)
0123         ret = 1;
0124     spin_unlock_irqrestore(&hw->fifo_lock, flags);
0125 
0126     return ret;
0127 }
0128 
0129 static int ilo_pkt_enqueue(struct ilo_hwinfo *hw, struct ccb *ccb,
0130                int dir, int id, int len)
0131 {
0132     char *fifobar;
0133     int entry;
0134 
0135     if (dir == SENDQ)
0136         fifobar = ccb->ccb_u1.send_fifobar;
0137     else
0138         fifobar = ccb->ccb_u3.recv_fifobar;
0139 
0140     entry = mk_entry(id, len);
0141     return fifo_enqueue(hw, fifobar, entry);
0142 }
0143 
0144 static int ilo_pkt_dequeue(struct ilo_hwinfo *hw, struct ccb *ccb,
0145                int dir, int *id, int *len, void **pkt)
0146 {
0147     char *fifobar, *desc;
0148     int entry = 0, pkt_id = 0;
0149     int ret;
0150 
0151     if (dir == SENDQ) {
0152         fifobar = ccb->ccb_u1.send_fifobar;
0153         desc = ccb->ccb_u2.send_desc;
0154     } else {
0155         fifobar = ccb->ccb_u3.recv_fifobar;
0156         desc = ccb->ccb_u4.recv_desc;
0157     }
0158 
0159     ret = fifo_dequeue(hw, fifobar, &entry);
0160     if (ret) {
0161         pkt_id = get_entry_id(entry);
0162         if (id)
0163             *id = pkt_id;
0164         if (len)
0165             *len = get_entry_len(entry);
0166         if (pkt)
0167             *pkt = (void *)(desc + desc_mem_sz(pkt_id));
0168     }
0169 
0170     return ret;
0171 }
0172 
0173 static int ilo_pkt_recv(struct ilo_hwinfo *hw, struct ccb *ccb)
0174 {
0175     char *fifobar = ccb->ccb_u3.recv_fifobar;
0176 
0177     return fifo_check_recv(hw, fifobar);
0178 }
0179 
0180 static inline void doorbell_set(struct ccb *ccb)
0181 {
0182     iowrite8(1, ccb->ccb_u5.db_base);
0183 }
0184 
0185 static inline void doorbell_clr(struct ccb *ccb)
0186 {
0187     iowrite8(2, ccb->ccb_u5.db_base);
0188 }
0189 
0190 static inline int ctrl_set(int l2sz, int idxmask, int desclim)
0191 {
0192     int active = 0, go = 1;
0193     return l2sz << CTRL_BITPOS_L2SZ |
0194            idxmask << CTRL_BITPOS_FIFOINDEXMASK |
0195            desclim << CTRL_BITPOS_DESCLIMIT |
0196            active << CTRL_BITPOS_A |
0197            go << CTRL_BITPOS_G;
0198 }
0199 
0200 static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz)
0201 {
0202     /* for simplicity, use the same parameters for send and recv ctrls */
0203     ccb->send_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1);
0204     ccb->recv_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1);
0205 }
0206 
0207 static inline int fifo_sz(int nr_entry)
0208 {
0209     /* size of a fifo is determined by the number of entries it contains */
0210     return nr_entry * sizeof(u64) + FIFOHANDLESIZE;
0211 }
0212 
0213 static void fifo_setup(void *base_addr, int nr_entry)
0214 {
0215     struct fifo *fifo_q = base_addr;
0216     int i;
0217 
0218     /* set up an empty fifo */
0219     fifo_q->head = 0;
0220     fifo_q->tail = 0;
0221     fifo_q->reset = 0;
0222     fifo_q->nrents = nr_entry;
0223     fifo_q->imask = nr_entry - 1;
0224     fifo_q->merge = ENTRY_MASK_O;
0225 
0226     for (i = 0; i < nr_entry; i++)
0227         fifo_q->fifobar[i] = 0;
0228 }
0229 
0230 static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data)
0231 {
0232     struct ccb *driver_ccb = &data->driver_ccb;
0233     struct ccb __iomem *device_ccb = data->mapped_ccb;
0234     int retries;
0235 
0236     /* complicated dance to tell the hw we are stopping */
0237     doorbell_clr(driver_ccb);
0238     iowrite32(ioread32(&device_ccb->send_ctrl) & ~(1 << CTRL_BITPOS_G),
0239           &device_ccb->send_ctrl);
0240     iowrite32(ioread32(&device_ccb->recv_ctrl) & ~(1 << CTRL_BITPOS_G),
0241           &device_ccb->recv_ctrl);
0242 
0243     /* give iLO some time to process stop request */
0244     for (retries = MAX_WAIT; retries > 0; retries--) {
0245         doorbell_set(driver_ccb);
0246         udelay(WAIT_TIME);
0247         if (!(ioread32(&device_ccb->send_ctrl) & (1 << CTRL_BITPOS_A))
0248             &&
0249             !(ioread32(&device_ccb->recv_ctrl) & (1 << CTRL_BITPOS_A)))
0250             break;
0251     }
0252     if (retries == 0)
0253         dev_err(&pdev->dev, "Closing, but controller still active\n");
0254 
0255     /* clear the hw ccb */
0256     memset_io(device_ccb, 0, sizeof(struct ccb));
0257 
0258     /* free resources used to back send/recv queues */
0259     dma_free_coherent(&pdev->dev, data->dma_size, data->dma_va,
0260               data->dma_pa);
0261 }
0262 
0263 static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
0264 {
0265     char *dma_va;
0266     dma_addr_t dma_pa;
0267     struct ccb *driver_ccb, *ilo_ccb;
0268 
0269     driver_ccb = &data->driver_ccb;
0270     ilo_ccb = &data->ilo_ccb;
0271 
0272     data->dma_size = 2 * fifo_sz(NR_QENTRY) +
0273              2 * desc_mem_sz(NR_QENTRY) +
0274              ILO_START_ALIGN + ILO_CACHE_SZ;
0275 
0276     data->dma_va = dma_alloc_coherent(&hw->ilo_dev->dev, data->dma_size,
0277                       &data->dma_pa, GFP_ATOMIC);
0278     if (!data->dma_va)
0279         return -ENOMEM;
0280 
0281     dma_va = (char *)data->dma_va;
0282     dma_pa = data->dma_pa;
0283 
0284     dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN);
0285     dma_pa = roundup(dma_pa, ILO_START_ALIGN);
0286 
0287     /*
0288      * Create two ccb's, one with virt addrs, one with phys addrs.
0289      * Copy the phys addr ccb to device shared mem.
0290      */
0291     ctrl_setup(driver_ccb, NR_QENTRY, L2_QENTRY_SZ);
0292     ctrl_setup(ilo_ccb, NR_QENTRY, L2_QENTRY_SZ);
0293 
0294     fifo_setup(dma_va, NR_QENTRY);
0295     driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE;
0296     ilo_ccb->ccb_u1.send_fifobar_pa = dma_pa + FIFOHANDLESIZE;
0297     dma_va += fifo_sz(NR_QENTRY);
0298     dma_pa += fifo_sz(NR_QENTRY);
0299 
0300     dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ);
0301     dma_pa = roundup(dma_pa, ILO_CACHE_SZ);
0302 
0303     fifo_setup(dma_va, NR_QENTRY);
0304     driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE;
0305     ilo_ccb->ccb_u3.recv_fifobar_pa = dma_pa + FIFOHANDLESIZE;
0306     dma_va += fifo_sz(NR_QENTRY);
0307     dma_pa += fifo_sz(NR_QENTRY);
0308 
0309     driver_ccb->ccb_u2.send_desc = dma_va;
0310     ilo_ccb->ccb_u2.send_desc_pa = dma_pa;
0311     dma_pa += desc_mem_sz(NR_QENTRY);
0312     dma_va += desc_mem_sz(NR_QENTRY);
0313 
0314     driver_ccb->ccb_u4.recv_desc = dma_va;
0315     ilo_ccb->ccb_u4.recv_desc_pa = dma_pa;
0316 
0317     driver_ccb->channel = slot;
0318     ilo_ccb->channel = slot;
0319 
0320     driver_ccb->ccb_u5.db_base = hw->db_vaddr + (slot << L2_DB_SIZE);
0321     ilo_ccb->ccb_u5.db_base = NULL; /* hw ccb's doorbell is not used */
0322 
0323     return 0;
0324 }
0325 
0326 static void ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
0327 {
0328     int pkt_id, pkt_sz;
0329     struct ccb *driver_ccb = &data->driver_ccb;
0330 
0331     /* copy the ccb with physical addrs to device memory */
0332     data->mapped_ccb = (struct ccb __iomem *)
0333                 (hw->ram_vaddr + (slot * ILOHW_CCB_SZ));
0334     memcpy_toio(data->mapped_ccb, &data->ilo_ccb, sizeof(struct ccb));
0335 
0336     /* put packets on the send and receive queues */
0337     pkt_sz = 0;
0338     for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) {
0339         ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, pkt_sz);
0340         doorbell_set(driver_ccb);
0341     }
0342 
0343     pkt_sz = desc_mem_sz(1);
0344     for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++)
0345         ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, pkt_sz);
0346 
0347     /* the ccb is ready to use */
0348     doorbell_clr(driver_ccb);
0349 }
0350 
0351 static int ilo_ccb_verify(struct ilo_hwinfo *hw, struct ccb_data *data)
0352 {
0353     int pkt_id, i;
0354     struct ccb *driver_ccb = &data->driver_ccb;
0355 
0356     /* make sure iLO is really handling requests */
0357     for (i = MAX_WAIT; i > 0; i--) {
0358         if (ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, NULL, NULL))
0359             break;
0360         udelay(WAIT_TIME);
0361     }
0362 
0363     if (i == 0) {
0364         dev_err(&hw->ilo_dev->dev, "Open could not dequeue a packet\n");
0365         return -EBUSY;
0366     }
0367 
0368     ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, 0);
0369     doorbell_set(driver_ccb);
0370     return 0;
0371 }
0372 
0373 static inline int is_channel_reset(struct ccb *ccb)
0374 {
0375     /* check for this particular channel needing a reset */
0376     return FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset;
0377 }
0378 
0379 static inline void set_channel_reset(struct ccb *ccb)
0380 {
0381     /* set a flag indicating this channel needs a reset */
0382     FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset = 1;
0383 }
0384 
0385 static inline int get_device_outbound(struct ilo_hwinfo *hw)
0386 {
0387     return ioread32(&hw->mmio_vaddr[DB_OUT]);
0388 }
0389 
0390 static inline int is_db_reset(int db_out)
0391 {
0392     return db_out & (1 << DB_RESET);
0393 }
0394 
0395 static inline int is_device_reset(struct ilo_hwinfo *hw)
0396 {
0397     /* check for global reset condition */
0398     return is_db_reset(get_device_outbound(hw));
0399 }
0400 
0401 static inline void clear_pending_db(struct ilo_hwinfo *hw, int clr)
0402 {
0403     iowrite32(clr, &hw->mmio_vaddr[DB_OUT]);
0404 }
0405 
0406 static inline void clear_device(struct ilo_hwinfo *hw)
0407 {
0408     /* clear the device (reset bits, pending channel entries) */
0409     clear_pending_db(hw, -1);
0410 }
0411 
0412 static inline void ilo_enable_interrupts(struct ilo_hwinfo *hw)
0413 {
0414     iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) | 1, &hw->mmio_vaddr[DB_IRQ]);
0415 }
0416 
0417 static inline void ilo_disable_interrupts(struct ilo_hwinfo *hw)
0418 {
0419     iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) & ~1,
0420          &hw->mmio_vaddr[DB_IRQ]);
0421 }
0422 
0423 static void ilo_set_reset(struct ilo_hwinfo *hw)
0424 {
0425     int slot;
0426 
0427     /*
0428      * Mapped memory is zeroed on ilo reset, so set a per ccb flag
0429      * to indicate that this ccb needs to be closed and reopened.
0430      */
0431     for (slot = 0; slot < max_ccb; slot++) {
0432         if (!hw->ccb_alloc[slot])
0433             continue;
0434         set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb);
0435     }
0436 }
0437 
0438 static ssize_t ilo_read(struct file *fp, char __user *buf,
0439             size_t len, loff_t *off)
0440 {
0441     int err, found, cnt, pkt_id, pkt_len;
0442     struct ccb_data *data = fp->private_data;
0443     struct ccb *driver_ccb = &data->driver_ccb;
0444     struct ilo_hwinfo *hw = data->ilo_hw;
0445     void *pkt;
0446 
0447     if (is_channel_reset(driver_ccb)) {
0448         /*
0449          * If the device has been reset, applications
0450          * need to close and reopen all ccbs.
0451          */
0452         return -ENODEV;
0453     }
0454 
0455     /*
0456      * This function is to be called when data is expected
0457      * in the channel, and will return an error if no packet is found
0458      * during the loop below.  The sleep/retry logic is to allow
0459      * applications to call read() immediately post write(),
0460      * and give iLO some time to process the sent packet.
0461      */
0462     cnt = 20;
0463     do {
0464         /* look for a received packet */
0465         found = ilo_pkt_dequeue(hw, driver_ccb, RECVQ, &pkt_id,
0466                     &pkt_len, &pkt);
0467         if (found)
0468             break;
0469         cnt--;
0470         msleep(100);
0471     } while (!found && cnt);
0472 
0473     if (!found)
0474         return -EAGAIN;
0475 
0476     /* only copy the length of the received packet */
0477     if (pkt_len < len)
0478         len = pkt_len;
0479 
0480     err = copy_to_user(buf, pkt, len);
0481 
0482     /* return the received packet to the queue */
0483     ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, desc_mem_sz(1));
0484 
0485     return err ? -EFAULT : len;
0486 }
0487 
0488 static ssize_t ilo_write(struct file *fp, const char __user *buf,
0489              size_t len, loff_t *off)
0490 {
0491     int err, pkt_id, pkt_len;
0492     struct ccb_data *data = fp->private_data;
0493     struct ccb *driver_ccb = &data->driver_ccb;
0494     struct ilo_hwinfo *hw = data->ilo_hw;
0495     void *pkt;
0496 
0497     if (is_channel_reset(driver_ccb))
0498         return -ENODEV;
0499 
0500     /* get a packet to send the user command */
0501     if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt))
0502         return -EBUSY;
0503 
0504     /* limit the length to the length of the packet */
0505     if (pkt_len < len)
0506         len = pkt_len;
0507 
0508     /* on failure, set the len to 0 to return empty packet to the device */
0509     err = copy_from_user(pkt, buf, len);
0510     if (err)
0511         len = 0;
0512 
0513     /* send the packet */
0514     ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, len);
0515     doorbell_set(driver_ccb);
0516 
0517     return err ? -EFAULT : len;
0518 }
0519 
0520 static __poll_t ilo_poll(struct file *fp, poll_table *wait)
0521 {
0522     struct ccb_data *data = fp->private_data;
0523     struct ccb *driver_ccb = &data->driver_ccb;
0524 
0525     poll_wait(fp, &data->ccb_waitq, wait);
0526 
0527     if (is_channel_reset(driver_ccb))
0528         return EPOLLERR;
0529     else if (ilo_pkt_recv(data->ilo_hw, driver_ccb))
0530         return EPOLLIN | EPOLLRDNORM;
0531 
0532     return 0;
0533 }
0534 
0535 static int ilo_close(struct inode *ip, struct file *fp)
0536 {
0537     int slot;
0538     struct ccb_data *data;
0539     struct ilo_hwinfo *hw;
0540     unsigned long flags;
0541 
0542     slot = iminor(ip) % max_ccb;
0543     hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
0544 
0545     spin_lock(&hw->open_lock);
0546 
0547     if (hw->ccb_alloc[slot]->ccb_cnt == 1) {
0548 
0549         data = fp->private_data;
0550 
0551         spin_lock_irqsave(&hw->alloc_lock, flags);
0552         hw->ccb_alloc[slot] = NULL;
0553         spin_unlock_irqrestore(&hw->alloc_lock, flags);
0554 
0555         ilo_ccb_close(hw->ilo_dev, data);
0556 
0557         kfree(data);
0558     } else
0559         hw->ccb_alloc[slot]->ccb_cnt--;
0560 
0561     spin_unlock(&hw->open_lock);
0562 
0563     return 0;
0564 }
0565 
0566 static int ilo_open(struct inode *ip, struct file *fp)
0567 {
0568     int slot, error;
0569     struct ccb_data *data;
0570     struct ilo_hwinfo *hw;
0571     unsigned long flags;
0572 
0573     slot = iminor(ip) % max_ccb;
0574     hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
0575 
0576     /* new ccb allocation */
0577     data = kzalloc(sizeof(*data), GFP_KERNEL);
0578     if (!data)
0579         return -ENOMEM;
0580 
0581     spin_lock(&hw->open_lock);
0582 
0583     /* each fd private_data holds sw/hw view of ccb */
0584     if (hw->ccb_alloc[slot] == NULL) {
0585         /* create a channel control block for this minor */
0586         error = ilo_ccb_setup(hw, data, slot);
0587         if (error) {
0588             kfree(data);
0589             goto out;
0590         }
0591 
0592         data->ccb_cnt = 1;
0593         data->ccb_excl = fp->f_flags & O_EXCL;
0594         data->ilo_hw = hw;
0595         init_waitqueue_head(&data->ccb_waitq);
0596 
0597         /* write the ccb to hw */
0598         spin_lock_irqsave(&hw->alloc_lock, flags);
0599         ilo_ccb_open(hw, data, slot);
0600         hw->ccb_alloc[slot] = data;
0601         spin_unlock_irqrestore(&hw->alloc_lock, flags);
0602 
0603         /* make sure the channel is functional */
0604         error = ilo_ccb_verify(hw, data);
0605         if (error) {
0606 
0607             spin_lock_irqsave(&hw->alloc_lock, flags);
0608             hw->ccb_alloc[slot] = NULL;
0609             spin_unlock_irqrestore(&hw->alloc_lock, flags);
0610 
0611             ilo_ccb_close(hw->ilo_dev, data);
0612 
0613             kfree(data);
0614             goto out;
0615         }
0616 
0617     } else {
0618         kfree(data);
0619         if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) {
0620             /*
0621              * The channel exists, and either this open
0622              * or a previous open of this channel wants
0623              * exclusive access.
0624              */
0625             error = -EBUSY;
0626         } else {
0627             hw->ccb_alloc[slot]->ccb_cnt++;
0628             error = 0;
0629         }
0630     }
0631 out:
0632     spin_unlock(&hw->open_lock);
0633 
0634     if (!error)
0635         fp->private_data = hw->ccb_alloc[slot];
0636 
0637     return error;
0638 }
0639 
0640 static const struct file_operations ilo_fops = {
0641     .owner      = THIS_MODULE,
0642     .read       = ilo_read,
0643     .write      = ilo_write,
0644     .poll       = ilo_poll,
0645     .open       = ilo_open,
0646     .release    = ilo_close,
0647     .llseek     = noop_llseek,
0648 };
0649 
0650 static irqreturn_t ilo_isr(int irq, void *data)
0651 {
0652     struct ilo_hwinfo *hw = data;
0653     int pending, i;
0654 
0655     spin_lock(&hw->alloc_lock);
0656 
0657     /* check for ccbs which have data */
0658     pending = get_device_outbound(hw);
0659     if (!pending) {
0660         spin_unlock(&hw->alloc_lock);
0661         return IRQ_NONE;
0662     }
0663 
0664     if (is_db_reset(pending)) {
0665         /* wake up all ccbs if the device was reset */
0666         pending = -1;
0667         ilo_set_reset(hw);
0668     }
0669 
0670     for (i = 0; i < max_ccb; i++) {
0671         if (!hw->ccb_alloc[i])
0672             continue;
0673         if (pending & (1 << i))
0674             wake_up_interruptible(&hw->ccb_alloc[i]->ccb_waitq);
0675     }
0676 
0677     /* clear the device of the channels that have been handled */
0678     clear_pending_db(hw, pending);
0679 
0680     spin_unlock(&hw->alloc_lock);
0681 
0682     return IRQ_HANDLED;
0683 }
0684 
0685 static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
0686 {
0687     pci_iounmap(pdev, hw->db_vaddr);
0688     pci_iounmap(pdev, hw->ram_vaddr);
0689     pci_iounmap(pdev, hw->mmio_vaddr);
0690 }
0691 
0692 static int ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
0693 {
0694     int bar;
0695     unsigned long off;
0696     u8 pci_rev_id;
0697     int rc;
0698 
0699     /* map the memory mapped i/o registers */
0700     hw->mmio_vaddr = pci_iomap(pdev, 1, 0);
0701     if (hw->mmio_vaddr == NULL) {
0702         dev_err(&pdev->dev, "Error mapping mmio\n");
0703         goto out;
0704     }
0705 
0706     /* map the adapter shared memory region */
0707     rc = pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev_id);
0708     if (rc != 0) {
0709         dev_err(&pdev->dev, "Error reading PCI rev id: %d\n", rc);
0710         goto out;
0711     }
0712 
0713     if (pci_rev_id >= PCI_REV_ID_NECHES) {
0714         bar = 5;
0715         /* Last 8k is reserved for CCBs */
0716         off = pci_resource_len(pdev, bar) - 0x2000;
0717     } else {
0718         bar = 2;
0719         off = 0;
0720     }
0721     hw->ram_vaddr = pci_iomap_range(pdev, bar, off, max_ccb * ILOHW_CCB_SZ);
0722     if (hw->ram_vaddr == NULL) {
0723         dev_err(&pdev->dev, "Error mapping shared mem\n");
0724         goto mmio_free;
0725     }
0726 
0727     /* map the doorbell aperture */
0728     hw->db_vaddr = pci_iomap(pdev, 3, max_ccb * ONE_DB_SIZE);
0729     if (hw->db_vaddr == NULL) {
0730         dev_err(&pdev->dev, "Error mapping doorbell\n");
0731         goto ram_free;
0732     }
0733 
0734     return 0;
0735 ram_free:
0736     pci_iounmap(pdev, hw->ram_vaddr);
0737 mmio_free:
0738     pci_iounmap(pdev, hw->mmio_vaddr);
0739 out:
0740     return -ENOMEM;
0741 }
0742 
0743 static void ilo_remove(struct pci_dev *pdev)
0744 {
0745     int i, minor;
0746     struct ilo_hwinfo *ilo_hw = pci_get_drvdata(pdev);
0747 
0748     if (!ilo_hw)
0749         return;
0750 
0751     clear_device(ilo_hw);
0752 
0753     minor = MINOR(ilo_hw->cdev.dev);
0754     for (i = minor; i < minor + max_ccb; i++)
0755         device_destroy(ilo_class, MKDEV(ilo_major, i));
0756 
0757     cdev_del(&ilo_hw->cdev);
0758     ilo_disable_interrupts(ilo_hw);
0759     free_irq(pdev->irq, ilo_hw);
0760     ilo_unmap_device(pdev, ilo_hw);
0761     pci_release_regions(pdev);
0762     /*
0763      * pci_disable_device(pdev) used to be here. But this PCI device has
0764      * two functions with interrupt lines connected to a single pin. The
0765      * other one is a USB host controller. So when we disable the PIN here
0766      * e.g. by rmmod hpilo, the controller stops working. It is because
0767      * the interrupt link is disabled in ACPI since it is not refcounted
0768      * yet. See acpi_pci_link_free_irq called from acpi_pci_irq_disable.
0769      */
0770     kfree(ilo_hw);
0771     ilo_hwdev[(minor / max_ccb)] = 0;
0772 }
0773 
0774 static int ilo_probe(struct pci_dev *pdev,
0775                    const struct pci_device_id *ent)
0776 {
0777     int devnum, minor, start, error = 0;
0778     struct ilo_hwinfo *ilo_hw;
0779 
0780     if (pci_match_id(ilo_blacklist, pdev)) {
0781         dev_dbg(&pdev->dev, "Not supported on this device\n");
0782         return -ENODEV;
0783     }
0784 
0785     if (max_ccb > MAX_CCB)
0786         max_ccb = MAX_CCB;
0787     else if (max_ccb < MIN_CCB)
0788         max_ccb = MIN_CCB;
0789 
0790     /* find a free range for device files */
0791     for (devnum = 0; devnum < MAX_ILO_DEV; devnum++) {
0792         if (ilo_hwdev[devnum] == 0) {
0793             ilo_hwdev[devnum] = 1;
0794             break;
0795         }
0796     }
0797 
0798     if (devnum == MAX_ILO_DEV) {
0799         dev_err(&pdev->dev, "Error finding free device\n");
0800         return -ENODEV;
0801     }
0802 
0803     /* track global allocations for this device */
0804     error = -ENOMEM;
0805     ilo_hw = kzalloc(sizeof(*ilo_hw), GFP_KERNEL);
0806     if (!ilo_hw)
0807         goto out;
0808 
0809     ilo_hw->ilo_dev = pdev;
0810     spin_lock_init(&ilo_hw->alloc_lock);
0811     spin_lock_init(&ilo_hw->fifo_lock);
0812     spin_lock_init(&ilo_hw->open_lock);
0813 
0814     error = pci_enable_device(pdev);
0815     if (error)
0816         goto free;
0817 
0818     pci_set_master(pdev);
0819 
0820     error = pci_request_regions(pdev, ILO_NAME);
0821     if (error)
0822         goto disable;
0823 
0824     error = ilo_map_device(pdev, ilo_hw);
0825     if (error)
0826         goto free_regions;
0827 
0828     pci_set_drvdata(pdev, ilo_hw);
0829     clear_device(ilo_hw);
0830 
0831     error = request_irq(pdev->irq, ilo_isr, IRQF_SHARED, "hpilo", ilo_hw);
0832     if (error)
0833         goto unmap;
0834 
0835     ilo_enable_interrupts(ilo_hw);
0836 
0837     cdev_init(&ilo_hw->cdev, &ilo_fops);
0838     ilo_hw->cdev.owner = THIS_MODULE;
0839     start = devnum * max_ccb;
0840     error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), max_ccb);
0841     if (error) {
0842         dev_err(&pdev->dev, "Could not add cdev\n");
0843         goto remove_isr;
0844     }
0845 
0846     for (minor = 0 ; minor < max_ccb; minor++) {
0847         struct device *dev;
0848         dev = device_create(ilo_class, &pdev->dev,
0849                     MKDEV(ilo_major, minor), NULL,
0850                     "hpilo!d%dccb%d", devnum, minor);
0851         if (IS_ERR(dev))
0852             dev_err(&pdev->dev, "Could not create files\n");
0853     }
0854 
0855     return 0;
0856 remove_isr:
0857     ilo_disable_interrupts(ilo_hw);
0858     free_irq(pdev->irq, ilo_hw);
0859 unmap:
0860     ilo_unmap_device(pdev, ilo_hw);
0861 free_regions:
0862     pci_release_regions(pdev);
0863 disable:
0864 /*  pci_disable_device(pdev);  see comment in ilo_remove */
0865 free:
0866     kfree(ilo_hw);
0867 out:
0868     ilo_hwdev[devnum] = 0;
0869     return error;
0870 }
0871 
0872 static const struct pci_device_id ilo_devices[] = {
0873     { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) },
0874     { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3307) },
0875     { }
0876 };
0877 MODULE_DEVICE_TABLE(pci, ilo_devices);
0878 
0879 static struct pci_driver ilo_driver = {
0880     .name     = ILO_NAME,
0881     .id_table = ilo_devices,
0882     .probe    = ilo_probe,
0883     .remove   = ilo_remove,
0884 };
0885 
0886 static int __init ilo_init(void)
0887 {
0888     int error;
0889     dev_t dev;
0890 
0891     ilo_class = class_create(THIS_MODULE, "iLO");
0892     if (IS_ERR(ilo_class)) {
0893         error = PTR_ERR(ilo_class);
0894         goto out;
0895     }
0896 
0897     error = alloc_chrdev_region(&dev, 0, MAX_OPEN, ILO_NAME);
0898     if (error)
0899         goto class_destroy;
0900 
0901     ilo_major = MAJOR(dev);
0902 
0903     error = pci_register_driver(&ilo_driver);
0904     if (error)
0905         goto chr_remove;
0906 
0907     return 0;
0908 chr_remove:
0909     unregister_chrdev_region(dev, MAX_OPEN);
0910 class_destroy:
0911     class_destroy(ilo_class);
0912 out:
0913     return error;
0914 }
0915 
0916 static void __exit ilo_exit(void)
0917 {
0918     pci_unregister_driver(&ilo_driver);
0919     unregister_chrdev_region(MKDEV(ilo_major, 0), MAX_OPEN);
0920     class_destroy(ilo_class);
0921 }
0922 
0923 MODULE_VERSION("1.5.0");
0924 MODULE_ALIAS(ILO_NAME);
0925 MODULE_DESCRIPTION(ILO_NAME);
0926 MODULE_AUTHOR("David Altobelli <david.altobelli@hpe.com>");
0927 MODULE_LICENSE("GPL v2");
0928 
0929 module_param(max_ccb, uint, 0444);
0930 MODULE_PARM_DESC(max_ccb, "Maximum number of HP iLO channels to attach (8-24)(default=16)");
0931 
0932 module_init(ilo_init);
0933 module_exit(ilo_exit);