Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
0004  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
0005  */
0006 
0007 #include "efct_driver.h"
0008 #include "efct_hw.h"
0009 #include "efct_io.h"
0010 
0011 struct efct_io_pool {
0012     struct efct *efct;
0013     spinlock_t lock;    /* IO pool lock */
0014     u32 io_num_ios;     /* Total IOs allocated */
0015     struct efct_io *ios[EFCT_NUM_SCSI_IOS];
0016     struct list_head freelist;
0017 
0018 };
0019 
0020 struct efct_io_pool *
0021 efct_io_pool_create(struct efct *efct, u32 num_sgl)
0022 {
0023     u32 i = 0;
0024     struct efct_io_pool *io_pool;
0025     struct efct_io *io;
0026 
0027     /* Allocate the IO pool */
0028     io_pool = kzalloc(sizeof(*io_pool), GFP_KERNEL);
0029     if (!io_pool)
0030         return NULL;
0031 
0032     io_pool->efct = efct;
0033     INIT_LIST_HEAD(&io_pool->freelist);
0034     /* initialize IO pool lock */
0035     spin_lock_init(&io_pool->lock);
0036 
0037     for (i = 0; i < EFCT_NUM_SCSI_IOS; i++) {
0038         io = kzalloc(sizeof(*io), GFP_KERNEL);
0039         if (!io)
0040             break;
0041 
0042         io_pool->io_num_ios++;
0043         io_pool->ios[i] = io;
0044         io->tag = i;
0045         io->instance_index = i;
0046 
0047         /* Allocate a response buffer */
0048         io->rspbuf.size = SCSI_RSP_BUF_LENGTH;
0049         io->rspbuf.virt = dma_alloc_coherent(&efct->pci->dev,
0050                              io->rspbuf.size,
0051                              &io->rspbuf.phys, GFP_KERNEL);
0052         if (!io->rspbuf.virt) {
0053             efc_log_err(efct, "dma_alloc rspbuf failed\n");
0054             efct_io_pool_free(io_pool);
0055             return NULL;
0056         }
0057 
0058         /* Allocate SGL */
0059         io->sgl = kzalloc(sizeof(*io->sgl) * num_sgl, GFP_KERNEL);
0060         if (!io->sgl) {
0061             efct_io_pool_free(io_pool);
0062             return NULL;
0063         }
0064 
0065         io->sgl_allocated = num_sgl;
0066         io->sgl_count = 0;
0067 
0068         INIT_LIST_HEAD(&io->list_entry);
0069         list_add_tail(&io->list_entry, &io_pool->freelist);
0070     }
0071 
0072     return io_pool;
0073 }
0074 
0075 int
0076 efct_io_pool_free(struct efct_io_pool *io_pool)
0077 {
0078     struct efct *efct;
0079     u32 i;
0080     struct efct_io *io;
0081 
0082     if (io_pool) {
0083         efct = io_pool->efct;
0084 
0085         for (i = 0; i < io_pool->io_num_ios; i++) {
0086             io = io_pool->ios[i];
0087             if (!io)
0088                 continue;
0089 
0090             kfree(io->sgl);
0091             dma_free_coherent(&efct->pci->dev,
0092                       io->rspbuf.size, io->rspbuf.virt,
0093                       io->rspbuf.phys);
0094             memset(&io->rspbuf, 0, sizeof(struct efc_dma));
0095         }
0096 
0097         kfree(io_pool);
0098         efct->xport->io_pool = NULL;
0099     }
0100 
0101     return 0;
0102 }
0103 
0104 struct efct_io *
0105 efct_io_pool_io_alloc(struct efct_io_pool *io_pool)
0106 {
0107     struct efct_io *io = NULL;
0108     struct efct *efct;
0109     unsigned long flags = 0;
0110 
0111     efct = io_pool->efct;
0112 
0113     spin_lock_irqsave(&io_pool->lock, flags);
0114 
0115     if (!list_empty(&io_pool->freelist)) {
0116         io = list_first_entry(&io_pool->freelist, struct efct_io,
0117                       list_entry);
0118         list_del_init(&io->list_entry);
0119     }
0120 
0121     spin_unlock_irqrestore(&io_pool->lock, flags);
0122 
0123     if (!io)
0124         return NULL;
0125 
0126     io->io_type = EFCT_IO_TYPE_MAX;
0127     io->hio_type = EFCT_HW_IO_MAX;
0128     io->hio = NULL;
0129     io->transferred = 0;
0130     io->efct = efct;
0131     io->timeout = 0;
0132     io->sgl_count = 0;
0133     io->tgt_task_tag = 0;
0134     io->init_task_tag = 0;
0135     io->hw_tag = 0;
0136     io->display_name = "pending";
0137     io->seq_init = 0;
0138     io->io_free = 0;
0139     io->release = NULL;
0140     atomic_add_return(1, &efct->xport->io_active_count);
0141     atomic_add_return(1, &efct->xport->io_total_alloc);
0142     return io;
0143 }
0144 
0145 /* Free an object used to track an IO */
0146 void
0147 efct_io_pool_io_free(struct efct_io_pool *io_pool, struct efct_io *io)
0148 {
0149     struct efct *efct;
0150     struct efct_hw_io *hio = NULL;
0151     unsigned long flags = 0;
0152 
0153     efct = io_pool->efct;
0154 
0155     spin_lock_irqsave(&io_pool->lock, flags);
0156     hio = io->hio;
0157     io->hio = NULL;
0158     io->io_free = 1;
0159     INIT_LIST_HEAD(&io->list_entry);
0160     list_add(&io->list_entry, &io_pool->freelist);
0161     spin_unlock_irqrestore(&io_pool->lock, flags);
0162 
0163     if (hio)
0164         efct_hw_io_free(&efct->hw, hio);
0165 
0166     atomic_sub_return(1, &efct->xport->io_active_count);
0167     atomic_add_return(1, &efct->xport->io_total_free);
0168 }
0169 
0170 /* Find an I/O given it's node and ox_id */
0171 struct efct_io *
0172 efct_io_find_tgt_io(struct efct *efct, struct efct_node *node,
0173             u16 ox_id, u16 rx_id)
0174 {
0175     struct efct_io  *io = NULL;
0176     unsigned long flags = 0;
0177     u8 found = false;
0178 
0179     spin_lock_irqsave(&node->active_ios_lock, flags);
0180     list_for_each_entry(io, &node->active_ios, list_entry) {
0181         if ((io->cmd_tgt && io->init_task_tag == ox_id) &&
0182             (rx_id == 0xffff || io->tgt_task_tag == rx_id)) {
0183             if (kref_get_unless_zero(&io->ref))
0184                 found = true;
0185             break;
0186         }
0187     }
0188     spin_unlock_irqrestore(&node->active_ios_lock, flags);
0189     return found ? io : NULL;
0190 }