0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/slab.h>
0009 #include "client.h"
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl)
0020 {
0021 size_t len = cl->device->fw_client->props.max_msg_length;
0022 int j;
0023 struct ishtp_cl_rb *rb;
0024 int ret = 0;
0025 unsigned long flags;
0026
0027 for (j = 0; j < cl->rx_ring_size; ++j) {
0028 rb = ishtp_io_rb_init(cl);
0029 if (!rb) {
0030 ret = -ENOMEM;
0031 goto out;
0032 }
0033 ret = ishtp_io_rb_alloc_buf(rb, len);
0034 if (ret)
0035 goto out;
0036 spin_lock_irqsave(&cl->free_list_spinlock, flags);
0037 list_add_tail(&rb->list, &cl->free_rb_list.list);
0038 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
0039 }
0040
0041 return 0;
0042
0043 out:
0044 dev_err(&cl->device->dev, "error in allocating Rx buffers\n");
0045 ishtp_cl_free_rx_ring(cl);
0046 return ret;
0047 }
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057 int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
0058 {
0059 size_t len = cl->device->fw_client->props.max_msg_length;
0060 int j;
0061 unsigned long flags;
0062
0063 cl->tx_ring_free_size = 0;
0064
0065
0066 for (j = 0; j < cl->tx_ring_size; ++j) {
0067 struct ishtp_cl_tx_ring *tx_buf;
0068
0069 tx_buf = kzalloc(sizeof(struct ishtp_cl_tx_ring), GFP_KERNEL);
0070 if (!tx_buf)
0071 goto out;
0072
0073 tx_buf->send_buf.data = kmalloc(len, GFP_KERNEL);
0074 if (!tx_buf->send_buf.data) {
0075 kfree(tx_buf);
0076 goto out;
0077 }
0078
0079 spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
0080 list_add_tail(&tx_buf->list, &cl->tx_free_list.list);
0081 ++cl->tx_ring_free_size;
0082 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
0083 }
0084 return 0;
0085 out:
0086 dev_err(&cl->device->dev, "error in allocating Tx pool\n");
0087 ishtp_cl_free_tx_ring(cl);
0088 return -ENOMEM;
0089 }
0090
0091
0092
0093
0094
0095
0096
0097 void ishtp_cl_free_rx_ring(struct ishtp_cl *cl)
0098 {
0099 struct ishtp_cl_rb *rb;
0100 unsigned long flags;
0101
0102
0103 spin_lock_irqsave(&cl->free_list_spinlock, flags);
0104 while (!list_empty(&cl->free_rb_list.list)) {
0105 rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb,
0106 list);
0107 list_del(&rb->list);
0108 kfree(rb->buffer.data);
0109 kfree(rb);
0110 }
0111 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
0112
0113 spin_lock_irqsave(&cl->in_process_spinlock, flags);
0114 while (!list_empty(&cl->in_process_list.list)) {
0115 rb = list_entry(cl->in_process_list.list.next,
0116 struct ishtp_cl_rb, list);
0117 list_del(&rb->list);
0118 kfree(rb->buffer.data);
0119 kfree(rb);
0120 }
0121 spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
0122 }
0123
0124
0125
0126
0127
0128
0129
0130 void ishtp_cl_free_tx_ring(struct ishtp_cl *cl)
0131 {
0132 struct ishtp_cl_tx_ring *tx_buf;
0133 unsigned long flags;
0134
0135 spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
0136
0137 while (!list_empty(&cl->tx_free_list.list)) {
0138 tx_buf = list_entry(cl->tx_free_list.list.next,
0139 struct ishtp_cl_tx_ring, list);
0140 list_del(&tx_buf->list);
0141 --cl->tx_ring_free_size;
0142 kfree(tx_buf->send_buf.data);
0143 kfree(tx_buf);
0144 }
0145 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
0146
0147 spin_lock_irqsave(&cl->tx_list_spinlock, flags);
0148
0149 while (!list_empty(&cl->tx_list.list)) {
0150 tx_buf = list_entry(cl->tx_list.list.next,
0151 struct ishtp_cl_tx_ring, list);
0152 list_del(&tx_buf->list);
0153 kfree(tx_buf->send_buf.data);
0154 kfree(tx_buf);
0155 }
0156 spin_unlock_irqrestore(&cl->tx_list_spinlock, flags);
0157 }
0158
0159
0160
0161
0162
0163
0164
0165 void ishtp_io_rb_free(struct ishtp_cl_rb *rb)
0166 {
0167 if (rb == NULL)
0168 return;
0169
0170 kfree(rb->buffer.data);
0171 kfree(rb);
0172 }
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182 struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl)
0183 {
0184 struct ishtp_cl_rb *rb;
0185
0186 rb = kzalloc(sizeof(struct ishtp_cl_rb), GFP_KERNEL);
0187 if (!rb)
0188 return NULL;
0189
0190 INIT_LIST_HEAD(&rb->list);
0191 rb->cl = cl;
0192 rb->buf_idx = 0;
0193 return rb;
0194 }
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205 int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length)
0206 {
0207 if (!rb)
0208 return -EINVAL;
0209
0210 if (length == 0)
0211 return 0;
0212
0213 rb->buffer.data = kmalloc(length, GFP_KERNEL);
0214 if (!rb->buffer.data)
0215 return -ENOMEM;
0216
0217 rb->buffer.size = length;
0218 return 0;
0219 }
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229 int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
0230 {
0231 struct ishtp_cl *cl;
0232 int rets = 0;
0233 unsigned long flags;
0234
0235 if (!rb || !rb->cl)
0236 return -EFAULT;
0237
0238 cl = rb->cl;
0239 spin_lock_irqsave(&cl->free_list_spinlock, flags);
0240 list_add_tail(&rb->list, &cl->free_rb_list.list);
0241 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
0242
0243
0244
0245
0246
0247 if (!cl->out_flow_ctrl_creds)
0248 rets = ishtp_cl_read_start(cl);
0249
0250 return rets;
0251 }
0252 EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262 bool ishtp_cl_tx_empty(struct ishtp_cl *cl)
0263 {
0264 int tx_list_empty;
0265 unsigned long tx_flags;
0266
0267 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
0268 tx_list_empty = list_empty(&cl->tx_list.list);
0269 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
0270
0271 return !!tx_list_empty;
0272 }
0273 EXPORT_SYMBOL(ishtp_cl_tx_empty);
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283 struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl)
0284 {
0285 unsigned long rx_flags;
0286 struct ishtp_cl_rb *rb;
0287
0288 spin_lock_irqsave(&cl->in_process_spinlock, rx_flags);
0289 rb = list_first_entry_or_null(&cl->in_process_list.list,
0290 struct ishtp_cl_rb, list);
0291 if (rb)
0292 list_del_init(&rb->list);
0293 spin_unlock_irqrestore(&cl->in_process_spinlock, rx_flags);
0294
0295 return rb;
0296 }
0297 EXPORT_SYMBOL(ishtp_cl_rx_get_rb);