0001
0002
0003
0004
0005
0006
0007 #include <linux/vmalloc.h>
0008 #include "rxe.h"
0009 #include "rxe_loc.h"
0010 #include "rxe_queue.h"
0011
0012 int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
0013 struct ib_udata *udata, struct rxe_queue_buf *buf,
0014 size_t buf_size, struct rxe_mmap_info **ip_p)
0015 {
0016 int err;
0017 struct rxe_mmap_info *ip = NULL;
0018
0019 if (outbuf) {
0020 ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
0021 if (IS_ERR(ip)) {
0022 err = PTR_ERR(ip);
0023 goto err1;
0024 }
0025
0026 if (copy_to_user(outbuf, &ip->info, sizeof(ip->info))) {
0027 err = -EFAULT;
0028 goto err2;
0029 }
0030
0031 spin_lock_bh(&rxe->pending_lock);
0032 list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
0033 spin_unlock_bh(&rxe->pending_lock);
0034 }
0035
0036 *ip_p = ip;
0037
0038 return 0;
0039
0040 err2:
0041 kfree(ip);
0042 err1:
0043 return err;
0044 }
0045
0046 inline void rxe_queue_reset(struct rxe_queue *q)
0047 {
0048
0049
0050
0051
0052 memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
0053 }
0054
0055 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
0056 unsigned int elem_size, enum queue_type type)
0057 {
0058 struct rxe_queue *q;
0059 size_t buf_size;
0060 unsigned int num_slots;
0061
0062
0063 if (*num_elem < 0)
0064 goto err1;
0065
0066 q = kzalloc(sizeof(*q), GFP_KERNEL);
0067 if (!q)
0068 goto err1;
0069
0070 q->rxe = rxe;
0071 q->type = type;
0072
0073
0074 q->elem_size = elem_size;
0075
0076
0077 if (elem_size < cache_line_size())
0078 elem_size = cache_line_size();
0079 elem_size = roundup_pow_of_two(elem_size);
0080
0081 q->log2_elem_size = order_base_2(elem_size);
0082
0083 num_slots = *num_elem + 1;
0084 num_slots = roundup_pow_of_two(num_slots);
0085 q->index_mask = num_slots - 1;
0086
0087 buf_size = sizeof(struct rxe_queue_buf) + num_slots * elem_size;
0088
0089 q->buf = vmalloc_user(buf_size);
0090 if (!q->buf)
0091 goto err2;
0092
0093 q->buf->log2_elem_size = q->log2_elem_size;
0094 q->buf->index_mask = q->index_mask;
0095
0096 q->buf_size = buf_size;
0097
0098 *num_elem = num_slots - 1;
0099 return q;
0100
0101 err2:
0102 kfree(q);
0103 err1:
0104 return NULL;
0105 }
0106
0107
0108
0109
0110
0111 static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
0112 unsigned int num_elem)
0113 {
0114 enum queue_type type = q->type;
0115 u32 prod;
0116 u32 cons;
0117
0118 if (!queue_empty(q, q->type) && (num_elem < queue_count(q, type)))
0119 return -EINVAL;
0120
0121 prod = queue_get_producer(new_q, type);
0122 cons = queue_get_consumer(q, type);
0123
0124 while (!queue_empty(q, type)) {
0125 memcpy(queue_addr_from_index(new_q, prod),
0126 queue_addr_from_index(q, cons), new_q->elem_size);
0127 prod = queue_next_index(new_q, prod);
0128 cons = queue_next_index(q, cons);
0129 }
0130
0131 new_q->buf->producer_index = prod;
0132 q->buf->consumer_index = cons;
0133
0134
0135 if (type == QUEUE_TYPE_TO_CLIENT)
0136 new_q->index = new_q->buf->producer_index;
0137 else
0138 q->index = q->buf->consumer_index;
0139
0140
0141 swap(*q, *new_q);
0142
0143 return 0;
0144 }
0145
0146 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
0147 unsigned int elem_size, struct ib_udata *udata,
0148 struct mminfo __user *outbuf, spinlock_t *producer_lock,
0149 spinlock_t *consumer_lock)
0150 {
0151 struct rxe_queue *new_q;
0152 unsigned int num_elem = *num_elem_p;
0153 int err;
0154 unsigned long producer_flags;
0155 unsigned long consumer_flags;
0156
0157 new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type);
0158 if (!new_q)
0159 return -ENOMEM;
0160
0161 err = do_mmap_info(new_q->rxe, outbuf, udata, new_q->buf,
0162 new_q->buf_size, &new_q->ip);
0163 if (err) {
0164 vfree(new_q->buf);
0165 kfree(new_q);
0166 goto err1;
0167 }
0168
0169 spin_lock_irqsave(consumer_lock, consumer_flags);
0170
0171 if (producer_lock) {
0172 spin_lock_irqsave(producer_lock, producer_flags);
0173 err = resize_finish(q, new_q, num_elem);
0174 spin_unlock_irqrestore(producer_lock, producer_flags);
0175 } else {
0176 err = resize_finish(q, new_q, num_elem);
0177 }
0178
0179 spin_unlock_irqrestore(consumer_lock, consumer_flags);
0180
0181 rxe_queue_cleanup(new_q);
0182 if (err)
0183 goto err1;
0184
0185 *num_elem_p = num_elem;
0186 return 0;
0187
0188 err1:
0189 return err;
0190 }
0191
0192 void rxe_queue_cleanup(struct rxe_queue *q)
0193 {
0194 if (q->ip)
0195 kref_put(&q->ip->ref, rxe_mmap_release);
0196 else
0197 vfree(q->buf);
0198
0199 kfree(q);
0200 }