0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/module.h>
0010 #include <linux/kernel.h>
0011 #include <linux/scatterlist.h>
0012 #include <crypto/scatterwalk.h>
0013
0014 #include "omap-crypto.h"
0015
0016 static int omap_crypto_copy_sg_lists(int total, int bs,
0017 struct scatterlist **sg,
0018 struct scatterlist *new_sg, u16 flags)
0019 {
0020 int n = sg_nents(*sg);
0021 struct scatterlist *tmp;
0022
0023 if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY)) {
0024 new_sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
0025 if (!new_sg)
0026 return -ENOMEM;
0027
0028 sg_init_table(new_sg, n);
0029 }
0030
0031 tmp = new_sg;
0032
0033 while (*sg && total) {
0034 int len = (*sg)->length;
0035
0036 if (total < len)
0037 len = total;
0038
0039 if (len > 0) {
0040 total -= len;
0041 sg_set_page(tmp, sg_page(*sg), len, (*sg)->offset);
0042 if (total <= 0)
0043 sg_mark_end(tmp);
0044 tmp = sg_next(tmp);
0045 }
0046
0047 *sg = sg_next(*sg);
0048 }
0049
0050 *sg = new_sg;
0051
0052 return 0;
0053 }
0054
0055 static int omap_crypto_copy_sgs(int total, int bs, struct scatterlist **sg,
0056 struct scatterlist *new_sg, u16 flags)
0057 {
0058 void *buf;
0059 int pages;
0060 int new_len;
0061
0062 new_len = ALIGN(total, bs);
0063 pages = get_order(new_len);
0064
0065 buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
0066 if (!buf) {
0067 pr_err("%s: Couldn't allocate pages for unaligned cases.\n",
0068 __func__);
0069 return -ENOMEM;
0070 }
0071
0072 if (flags & OMAP_CRYPTO_COPY_DATA) {
0073 scatterwalk_map_and_copy(buf, *sg, 0, total, 0);
0074 if (flags & OMAP_CRYPTO_ZERO_BUF)
0075 memset(buf + total, 0, new_len - total);
0076 }
0077
0078 if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY))
0079 sg_init_table(new_sg, 1);
0080
0081 sg_set_buf(new_sg, buf, new_len);
0082
0083 *sg = new_sg;
0084
0085 return 0;
0086 }
0087
0088 static int omap_crypto_check_sg(struct scatterlist *sg, int total, int bs,
0089 u16 flags)
0090 {
0091 int len = 0;
0092 int num_sg = 0;
0093
0094 if (!IS_ALIGNED(total, bs))
0095 return OMAP_CRYPTO_NOT_ALIGNED;
0096
0097 while (sg) {
0098 num_sg++;
0099
0100 if (!IS_ALIGNED(sg->offset, 4))
0101 return OMAP_CRYPTO_NOT_ALIGNED;
0102 if (!IS_ALIGNED(sg->length, bs))
0103 return OMAP_CRYPTO_NOT_ALIGNED;
0104 #ifdef CONFIG_ZONE_DMA
0105 if (page_zonenum(sg_page(sg)) != ZONE_DMA)
0106 return OMAP_CRYPTO_NOT_ALIGNED;
0107 #endif
0108
0109 len += sg->length;
0110 sg = sg_next(sg);
0111
0112 if (len >= total)
0113 break;
0114 }
0115
0116 if ((flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY) && num_sg > 1)
0117 return OMAP_CRYPTO_NOT_ALIGNED;
0118
0119 if (len != total)
0120 return OMAP_CRYPTO_BAD_DATA_LENGTH;
0121
0122 return 0;
0123 }
0124
0125 int omap_crypto_align_sg(struct scatterlist **sg, int total, int bs,
0126 struct scatterlist *new_sg, u16 flags,
0127 u8 flags_shift, unsigned long *dd_flags)
0128 {
0129 int ret;
0130
0131 *dd_flags &= ~(OMAP_CRYPTO_COPY_MASK << flags_shift);
0132
0133 if (flags & OMAP_CRYPTO_FORCE_COPY)
0134 ret = OMAP_CRYPTO_NOT_ALIGNED;
0135 else
0136 ret = omap_crypto_check_sg(*sg, total, bs, flags);
0137
0138 if (ret == OMAP_CRYPTO_NOT_ALIGNED) {
0139 ret = omap_crypto_copy_sgs(total, bs, sg, new_sg, flags);
0140 if (ret)
0141 return ret;
0142 *dd_flags |= OMAP_CRYPTO_DATA_COPIED << flags_shift;
0143 } else if (ret == OMAP_CRYPTO_BAD_DATA_LENGTH) {
0144 ret = omap_crypto_copy_sg_lists(total, bs, sg, new_sg, flags);
0145 if (ret)
0146 return ret;
0147 if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY))
0148 *dd_flags |= OMAP_CRYPTO_SG_COPIED << flags_shift;
0149 } else if (flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY) {
0150 sg_set_buf(new_sg, sg_virt(*sg), (*sg)->length);
0151 }
0152
0153 return 0;
0154 }
0155 EXPORT_SYMBOL_GPL(omap_crypto_align_sg);
0156
0157 static void omap_crypto_copy_data(struct scatterlist *src,
0158 struct scatterlist *dst,
0159 int offset, int len)
0160 {
0161 int amt;
0162 void *srcb, *dstb;
0163 int srco = 0, dsto = offset;
0164
0165 while (src && dst && len) {
0166 if (srco >= src->length) {
0167 srco -= src->length;
0168 src = sg_next(src);
0169 continue;
0170 }
0171
0172 if (dsto >= dst->length) {
0173 dsto -= dst->length;
0174 dst = sg_next(dst);
0175 continue;
0176 }
0177
0178 amt = min(src->length - srco, dst->length - dsto);
0179 amt = min(len, amt);
0180
0181 srcb = kmap_atomic(sg_page(src)) + srco + src->offset;
0182 dstb = kmap_atomic(sg_page(dst)) + dsto + dst->offset;
0183
0184 memcpy(dstb, srcb, amt);
0185
0186 flush_dcache_page(sg_page(dst));
0187
0188 kunmap_atomic(srcb);
0189 kunmap_atomic(dstb);
0190
0191 srco += amt;
0192 dsto += amt;
0193 len -= amt;
0194 }
0195 }
0196
0197 void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
0198 int offset, int len, u8 flags_shift,
0199 unsigned long flags)
0200 {
0201 void *buf;
0202 int pages;
0203
0204 flags >>= flags_shift;
0205 flags &= OMAP_CRYPTO_COPY_MASK;
0206
0207 if (!flags)
0208 return;
0209
0210 buf = sg_virt(sg);
0211 pages = get_order(len);
0212
0213 if (orig && (flags & OMAP_CRYPTO_DATA_COPIED))
0214 omap_crypto_copy_data(sg, orig, offset, len);
0215
0216 if (flags & OMAP_CRYPTO_DATA_COPIED)
0217 free_pages((unsigned long)buf, pages);
0218 else if (flags & OMAP_CRYPTO_SG_COPIED)
0219 kfree(sg);
0220 }
0221 EXPORT_SYMBOL_GPL(omap_crypto_cleanup);
0222
0223 MODULE_DESCRIPTION("OMAP crypto support library.");
0224 MODULE_LICENSE("GPL v2");
0225 MODULE_AUTHOR("Tero Kristo <t-kristo@ti.com>");