Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * ISHTP Ring Buffers
0004  *
0005  * Copyright (c) 2003-2016, Intel Corporation.
0006  */
0007 
0008 #include <linux/slab.h>
0009 #include "client.h"
0010 
0011 /**
0012  * ishtp_cl_alloc_rx_ring() - Allocate RX ring buffers
0013  * @cl: client device instance
0014  *
0015  * Allocate and initialize RX ring buffers
0016  *
0017  * Return: 0 on success else -ENOMEM
0018  */
0019 int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl)
0020 {
0021     size_t  len = cl->device->fw_client->props.max_msg_length;
0022     int j;
0023     struct ishtp_cl_rb *rb;
0024     int ret = 0;
0025     unsigned long   flags;
0026 
0027     for (j = 0; j < cl->rx_ring_size; ++j) {
0028         rb = ishtp_io_rb_init(cl);
0029         if (!rb) {
0030             ret = -ENOMEM;
0031             goto out;
0032         }
0033         ret = ishtp_io_rb_alloc_buf(rb, len);
0034         if (ret)
0035             goto out;
0036         spin_lock_irqsave(&cl->free_list_spinlock, flags);
0037         list_add_tail(&rb->list, &cl->free_rb_list.list);
0038         spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
0039     }
0040 
0041     return  0;
0042 
0043 out:
0044     dev_err(&cl->device->dev, "error in allocating Rx buffers\n");
0045     ishtp_cl_free_rx_ring(cl);
0046     return  ret;
0047 }
0048 
0049 /**
0050  * ishtp_cl_alloc_tx_ring() - Allocate TX ring buffers
0051  * @cl: client device instance
0052  *
0053  * Allocate and initialize TX ring buffers
0054  *
0055  * Return: 0 on success else -ENOMEM
0056  */
0057 int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
0058 {
0059     size_t  len = cl->device->fw_client->props.max_msg_length;
0060     int j;
0061     unsigned long   flags;
0062 
0063     cl->tx_ring_free_size = 0;
0064 
0065     /* Allocate pool to free Tx bufs */
0066     for (j = 0; j < cl->tx_ring_size; ++j) {
0067         struct ishtp_cl_tx_ring *tx_buf;
0068 
0069         tx_buf = kzalloc(sizeof(struct ishtp_cl_tx_ring), GFP_KERNEL);
0070         if (!tx_buf)
0071             goto    out;
0072 
0073         tx_buf->send_buf.data = kmalloc(len, GFP_KERNEL);
0074         if (!tx_buf->send_buf.data) {
0075             kfree(tx_buf);
0076             goto    out;
0077         }
0078 
0079         spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
0080         list_add_tail(&tx_buf->list, &cl->tx_free_list.list);
0081         ++cl->tx_ring_free_size;
0082         spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
0083     }
0084     return  0;
0085 out:
0086     dev_err(&cl->device->dev, "error in allocating Tx pool\n");
0087     ishtp_cl_free_tx_ring(cl);
0088     return  -ENOMEM;
0089 }
0090 
0091 /**
0092  * ishtp_cl_free_rx_ring() - Free RX ring buffers
0093  * @cl: client device instance
0094  *
0095  * Free RX ring buffers
0096  */
0097 void ishtp_cl_free_rx_ring(struct ishtp_cl *cl)
0098 {
0099     struct ishtp_cl_rb *rb;
0100     unsigned long   flags;
0101 
0102     /* release allocated memory - pass over free_rb_list */
0103     spin_lock_irqsave(&cl->free_list_spinlock, flags);
0104     while (!list_empty(&cl->free_rb_list.list)) {
0105         rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb,
0106                 list);
0107         list_del(&rb->list);
0108         kfree(rb->buffer.data);
0109         kfree(rb);
0110     }
0111     spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
0112     /* release allocated memory - pass over in_process_list */
0113     spin_lock_irqsave(&cl->in_process_spinlock, flags);
0114     while (!list_empty(&cl->in_process_list.list)) {
0115         rb = list_entry(cl->in_process_list.list.next,
0116                 struct ishtp_cl_rb, list);
0117         list_del(&rb->list);
0118         kfree(rb->buffer.data);
0119         kfree(rb);
0120     }
0121     spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
0122 }
0123 
0124 /**
0125  * ishtp_cl_free_tx_ring() - Free TX ring buffers
0126  * @cl: client device instance
0127  *
0128  * Free TX ring buffers
0129  */
0130 void ishtp_cl_free_tx_ring(struct ishtp_cl *cl)
0131 {
0132     struct ishtp_cl_tx_ring *tx_buf;
0133     unsigned long   flags;
0134 
0135     spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
0136     /* release allocated memory - pass over tx_free_list */
0137     while (!list_empty(&cl->tx_free_list.list)) {
0138         tx_buf = list_entry(cl->tx_free_list.list.next,
0139                     struct ishtp_cl_tx_ring, list);
0140         list_del(&tx_buf->list);
0141         --cl->tx_ring_free_size;
0142         kfree(tx_buf->send_buf.data);
0143         kfree(tx_buf);
0144     }
0145     spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
0146 
0147     spin_lock_irqsave(&cl->tx_list_spinlock, flags);
0148     /* release allocated memory - pass over tx_list */
0149     while (!list_empty(&cl->tx_list.list)) {
0150         tx_buf = list_entry(cl->tx_list.list.next,
0151                     struct ishtp_cl_tx_ring, list);
0152         list_del(&tx_buf->list);
0153         kfree(tx_buf->send_buf.data);
0154         kfree(tx_buf);
0155     }
0156     spin_unlock_irqrestore(&cl->tx_list_spinlock, flags);
0157 }
0158 
0159 /**
0160  * ishtp_io_rb_free() - Free IO request block
0161  * @rb: IO request block
0162  *
0163  * Free io request block memory
0164  */
0165 void ishtp_io_rb_free(struct ishtp_cl_rb *rb)
0166 {
0167     if (rb == NULL)
0168         return;
0169 
0170     kfree(rb->buffer.data);
0171     kfree(rb);
0172 }
0173 
0174 /**
0175  * ishtp_io_rb_init() - Allocate and init IO request block
0176  * @cl: client device instance
0177  *
0178  * Allocate and initialize request block
0179  *
0180  * Return: Allocted IO request block pointer
0181  */
0182 struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl)
0183 {
0184     struct ishtp_cl_rb *rb;
0185 
0186     rb = kzalloc(sizeof(struct ishtp_cl_rb), GFP_KERNEL);
0187     if (!rb)
0188         return NULL;
0189 
0190     INIT_LIST_HEAD(&rb->list);
0191     rb->cl = cl;
0192     rb->buf_idx = 0;
0193     return rb;
0194 }
0195 
0196 /**
0197  * ishtp_io_rb_alloc_buf() - Allocate and init response buffer
0198  * @rb: IO request block
0199  * @length: length of response buffer
0200  *
0201  * Allocate respose buffer
0202  *
0203  * Return: 0 on success else -ENOMEM
0204  */
0205 int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length)
0206 {
0207     if (!rb)
0208         return -EINVAL;
0209 
0210     if (length == 0)
0211         return 0;
0212 
0213     rb->buffer.data = kmalloc(length, GFP_KERNEL);
0214     if (!rb->buffer.data)
0215         return -ENOMEM;
0216 
0217     rb->buffer.size = length;
0218     return 0;
0219 }
0220 
0221 /**
0222  * ishtp_cl_io_rb_recycle() - Recycle IO request blocks
0223  * @rb: IO request block
0224  *
0225  * Re-append rb to its client's free list and send flow control if needed
0226  *
0227  * Return: 0 on success else -EFAULT
0228  */
0229 int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
0230 {
0231     struct ishtp_cl *cl;
0232     int rets = 0;
0233     unsigned long   flags;
0234 
0235     if (!rb || !rb->cl)
0236         return  -EFAULT;
0237 
0238     cl = rb->cl;
0239     spin_lock_irqsave(&cl->free_list_spinlock, flags);
0240     list_add_tail(&rb->list, &cl->free_rb_list.list);
0241     spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
0242 
0243     /*
0244      * If we returned the first buffer to empty 'free' list,
0245      * send flow control
0246      */
0247     if (!cl->out_flow_ctrl_creds)
0248         rets = ishtp_cl_read_start(cl);
0249 
0250     return  rets;
0251 }
0252 EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);
0253 
0254 /**
0255  * ishtp_cl_tx_empty() -test whether client device tx buffer is empty
0256  * @cl: Pointer to client device instance
0257  *
0258  * Look client device tx buffer list, and check whether this list is empty
0259  *
0260  * Return: true if client tx buffer list is empty else false
0261  */
0262 bool ishtp_cl_tx_empty(struct ishtp_cl *cl)
0263 {
0264     int tx_list_empty;
0265     unsigned long tx_flags;
0266 
0267     spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
0268     tx_list_empty = list_empty(&cl->tx_list.list);
0269     spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
0270 
0271     return !!tx_list_empty;
0272 }
0273 EXPORT_SYMBOL(ishtp_cl_tx_empty);
0274 
0275 /**
0276  * ishtp_cl_rx_get_rb() -Get a rb from client device rx buffer list
0277  * @cl: Pointer to client device instance
0278  *
0279  * Check client device in-processing buffer list and get a rb from it.
0280  *
0281  * Return: rb pointer if buffer list isn't empty else NULL
0282  */
0283 struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl)
0284 {
0285     unsigned long rx_flags;
0286     struct ishtp_cl_rb *rb;
0287 
0288     spin_lock_irqsave(&cl->in_process_spinlock, rx_flags);
0289     rb = list_first_entry_or_null(&cl->in_process_list.list,
0290                 struct ishtp_cl_rb, list);
0291     if (rb)
0292         list_del_init(&rb->list);
0293     spin_unlock_irqrestore(&cl->in_process_spinlock, rx_flags);
0294 
0295     return rb;
0296 }
0297 EXPORT_SYMBOL(ishtp_cl_rx_get_rb);