0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #ifndef _S390_IDALS_H
0014 #define _S390_IDALS_H
0015
0016 #include <linux/errno.h>
0017 #include <linux/err.h>
0018 #include <linux/types.h>
0019 #include <linux/slab.h>
0020 #include <asm/cio.h>
0021 #include <linux/uaccess.h>
0022
0023 #define IDA_SIZE_LOG 12
0024 #define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
0025
0026
0027
0028
0029 static inline int
0030 idal_is_needed(void *vaddr, unsigned int length)
0031 {
0032 return ((__pa(vaddr) + length - 1) >> 31) != 0;
0033 }
0034
0035
0036
0037
0038
0039 static inline unsigned int idal_nr_words(void *vaddr, unsigned int length)
0040 {
0041 return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length +
0042 (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
0043 }
0044
0045
0046
0047
0048 static inline unsigned long *idal_create_words(unsigned long *idaws,
0049 void *vaddr, unsigned int length)
0050 {
0051 unsigned long paddr;
0052 unsigned int cidaw;
0053
0054 paddr = __pa(vaddr);
0055 cidaw = ((paddr & (IDA_BLOCK_SIZE-1)) + length +
0056 (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
0057 *idaws++ = paddr;
0058 paddr &= -IDA_BLOCK_SIZE;
0059 while (--cidaw > 0) {
0060 paddr += IDA_BLOCK_SIZE;
0061 *idaws++ = paddr;
0062 }
0063 return idaws;
0064 }
0065
0066
0067
0068
0069
0070 static inline int
0071 set_normalized_cda(struct ccw1 * ccw, void *vaddr)
0072 {
0073 unsigned int nridaws;
0074 unsigned long *idal;
0075
0076 if (ccw->flags & CCW_FLAG_IDA)
0077 return -EINVAL;
0078 nridaws = idal_nr_words(vaddr, ccw->count);
0079 if (nridaws > 0) {
0080 idal = kmalloc(nridaws * sizeof(unsigned long),
0081 GFP_ATOMIC | GFP_DMA );
0082 if (idal == NULL)
0083 return -ENOMEM;
0084 idal_create_words(idal, vaddr, ccw->count);
0085 ccw->flags |= CCW_FLAG_IDA;
0086 vaddr = idal;
0087 }
0088 ccw->cda = (__u32)(unsigned long) vaddr;
0089 return 0;
0090 }
0091
0092
0093
0094
0095 static inline void
0096 clear_normalized_cda(struct ccw1 * ccw)
0097 {
0098 if (ccw->flags & CCW_FLAG_IDA) {
0099 kfree((void *)(unsigned long) ccw->cda);
0100 ccw->flags &= ~CCW_FLAG_IDA;
0101 }
0102 ccw->cda = 0;
0103 }
0104
0105
0106
0107
0108 struct idal_buffer {
0109 size_t size;
0110 size_t page_order;
0111 void *data[];
0112 };
0113
0114
0115
0116
0117 static inline struct idal_buffer *
0118 idal_buffer_alloc(size_t size, int page_order)
0119 {
0120 struct idal_buffer *ib;
0121 int nr_chunks, nr_ptrs, i;
0122
0123 nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
0124 nr_chunks = (4096 << page_order) >> IDA_SIZE_LOG;
0125 ib = kmalloc(struct_size(ib, data, nr_ptrs), GFP_DMA | GFP_KERNEL);
0126 if (ib == NULL)
0127 return ERR_PTR(-ENOMEM);
0128 ib->size = size;
0129 ib->page_order = page_order;
0130 for (i = 0; i < nr_ptrs; i++) {
0131 if ((i & (nr_chunks - 1)) != 0) {
0132 ib->data[i] = ib->data[i-1] + IDA_BLOCK_SIZE;
0133 continue;
0134 }
0135 ib->data[i] = (void *)
0136 __get_free_pages(GFP_KERNEL, page_order);
0137 if (ib->data[i] != NULL)
0138 continue;
0139
0140 while (i >= nr_chunks) {
0141 i -= nr_chunks;
0142 free_pages((unsigned long) ib->data[i],
0143 ib->page_order);
0144 }
0145 kfree(ib);
0146 return ERR_PTR(-ENOMEM);
0147 }
0148 return ib;
0149 }
0150
0151
0152
0153
0154 static inline void
0155 idal_buffer_free(struct idal_buffer *ib)
0156 {
0157 int nr_chunks, nr_ptrs, i;
0158
0159 nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
0160 nr_chunks = (4096 << ib->page_order) >> IDA_SIZE_LOG;
0161 for (i = 0; i < nr_ptrs; i += nr_chunks)
0162 free_pages((unsigned long) ib->data[i], ib->page_order);
0163 kfree(ib);
0164 }
0165
0166
0167
0168
0169 static inline int
0170 __idal_buffer_is_needed(struct idal_buffer *ib)
0171 {
0172 return ib->size > (4096ul << ib->page_order) ||
0173 idal_is_needed(ib->data[0], ib->size);
0174 }
0175
0176
0177
0178
0179 static inline void
0180 idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw)
0181 {
0182 if (__idal_buffer_is_needed(ib)) {
0183
0184 ccw->cda = (u32)(addr_t) ib->data;
0185 ccw->flags |= CCW_FLAG_IDA;
0186 } else
0187
0188 ccw->cda = (u32)(addr_t) ib->data[0];
0189 ccw->count = ib->size;
0190 }
0191
0192
0193
0194
0195 static inline size_t
0196 idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count)
0197 {
0198 size_t left;
0199 int i;
0200
0201 BUG_ON(count > ib->size);
0202 for (i = 0; count > IDA_BLOCK_SIZE; i++) {
0203 left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE);
0204 if (left)
0205 return left + count - IDA_BLOCK_SIZE;
0206 to = (void __user *) to + IDA_BLOCK_SIZE;
0207 count -= IDA_BLOCK_SIZE;
0208 }
0209 return copy_to_user(to, ib->data[i], count);
0210 }
0211
0212
0213
0214
0215 static inline size_t
0216 idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count)
0217 {
0218 size_t left;
0219 int i;
0220
0221 BUG_ON(count > ib->size);
0222 for (i = 0; count > IDA_BLOCK_SIZE; i++) {
0223 left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE);
0224 if (left)
0225 return left + count - IDA_BLOCK_SIZE;
0226 from = (void __user *) from + IDA_BLOCK_SIZE;
0227 count -= IDA_BLOCK_SIZE;
0228 }
0229 return copy_from_user(ib->data[i], from, count);
0230 }
0231
0232 #endif