0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/compiler.h>
0011 #include <linux/netdevice.h>
0012 #include <linux/gfp.h>
0013 #include <linux/skbuff.h>
0014 #include <linux/types.h>
0015 #include <linux/pagemap.h>
0016 #include <linux/udp.h>
0017 #include <linux/sunrpc/msg_prot.h>
0018 #include <linux/sunrpc/sched.h>
0019 #include <linux/sunrpc/xdr.h>
0020 #include <linux/export.h>
0021
0022 #include "socklib.h"
0023
0024
0025
0026
0027 struct xdr_skb_reader {
0028 struct sk_buff *skb;
0029 unsigned int offset;
0030 size_t count;
0031 __wsum csum;
0032 };
0033
0034 typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to,
0035 size_t len);
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 static size_t
0047 xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
0048 {
0049 if (len > desc->count)
0050 len = desc->count;
0051 if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
0052 return 0;
0053 desc->count -= len;
0054 desc->offset += len;
0055 return len;
0056 }
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066 static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len)
0067 {
0068 unsigned int pos;
0069 __wsum csum2;
0070
0071 if (len > desc->count)
0072 len = desc->count;
0073 pos = desc->offset;
0074 csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len);
0075 desc->csum = csum_block_add(desc->csum, csum2, pos);
0076 desc->count -= len;
0077 desc->offset += len;
0078 return len;
0079 }
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089 static ssize_t
0090 xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor)
0091 {
0092 struct page **ppage = xdr->pages;
0093 unsigned int len, pglen = xdr->page_len;
0094 ssize_t copied = 0;
0095 size_t ret;
0096
0097 len = xdr->head[0].iov_len;
0098 if (base < len) {
0099 len -= base;
0100 ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
0101 copied += ret;
0102 if (ret != len || !desc->count)
0103 goto out;
0104 base = 0;
0105 } else
0106 base -= len;
0107
0108 if (unlikely(pglen == 0))
0109 goto copy_tail;
0110 if (unlikely(base >= pglen)) {
0111 base -= pglen;
0112 goto copy_tail;
0113 }
0114 if (base || xdr->page_base) {
0115 pglen -= base;
0116 base += xdr->page_base;
0117 ppage += base >> PAGE_SHIFT;
0118 base &= ~PAGE_MASK;
0119 }
0120 do {
0121 char *kaddr;
0122
0123
0124
0125 if ((xdr->flags & XDRBUF_SPARSE_PAGES) && *ppage == NULL) {
0126 *ppage = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
0127 if (unlikely(*ppage == NULL)) {
0128 if (copied == 0)
0129 copied = -ENOMEM;
0130 goto out;
0131 }
0132 }
0133
0134 len = PAGE_SIZE;
0135 kaddr = kmap_atomic(*ppage);
0136 if (base) {
0137 len -= base;
0138 if (pglen < len)
0139 len = pglen;
0140 ret = copy_actor(desc, kaddr + base, len);
0141 base = 0;
0142 } else {
0143 if (pglen < len)
0144 len = pglen;
0145 ret = copy_actor(desc, kaddr, len);
0146 }
0147 flush_dcache_page(*ppage);
0148 kunmap_atomic(kaddr);
0149 copied += ret;
0150 if (ret != len || !desc->count)
0151 goto out;
0152 ppage++;
0153 } while ((pglen -= len) != 0);
0154 copy_tail:
0155 len = xdr->tail[0].iov_len;
0156 if (base < len)
0157 copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
0158 out:
0159 return copied;
0160 }
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170 int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
0171 {
0172 struct xdr_skb_reader desc;
0173
0174 desc.skb = skb;
0175 desc.offset = 0;
0176 desc.count = skb->len - desc.offset;
0177
0178 if (skb_csum_unnecessary(skb))
0179 goto no_checksum;
0180
0181 desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
0182 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0)
0183 return -1;
0184 if (desc.offset != skb->len) {
0185 __wsum csum2;
0186 csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
0187 desc.csum = csum_block_add(desc.csum, csum2, desc.offset);
0188 }
0189 if (desc.count)
0190 return -1;
0191 if (csum_fold(desc.csum))
0192 return -1;
0193 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
0194 !skb->csum_complete_sw)
0195 netdev_rx_csum_fault(skb->dev, skb);
0196 return 0;
0197 no_checksum:
0198 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
0199 return -1;
0200 if (desc.count)
0201 return -1;
0202 return 0;
0203 }
0204 EXPORT_SYMBOL_GPL(csum_partial_copy_to_xdr);
0205
0206 static inline int xprt_sendmsg(struct socket *sock, struct msghdr *msg,
0207 size_t seek)
0208 {
0209 if (seek)
0210 iov_iter_advance(&msg->msg_iter, seek);
0211 return sock_sendmsg(sock, msg);
0212 }
0213
0214 static int xprt_send_kvec(struct socket *sock, struct msghdr *msg,
0215 struct kvec *vec, size_t seek)
0216 {
0217 iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len);
0218 return xprt_sendmsg(sock, msg, seek);
0219 }
0220
0221 static int xprt_send_pagedata(struct socket *sock, struct msghdr *msg,
0222 struct xdr_buf *xdr, size_t base)
0223 {
0224 iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec, xdr_buf_pagecount(xdr),
0225 xdr->page_len + xdr->page_base);
0226 return xprt_sendmsg(sock, msg, base + xdr->page_base);
0227 }
0228
0229
0230
0231
0232
0233
0234 static int xprt_send_rm_and_kvec(struct socket *sock, struct msghdr *msg,
0235 rpc_fraghdr marker, struct kvec *vec,
0236 size_t base)
0237 {
0238 struct kvec iov[2] = {
0239 [0] = {
0240 .iov_base = &marker,
0241 .iov_len = sizeof(marker)
0242 },
0243 [1] = *vec,
0244 };
0245 size_t len = iov[0].iov_len + iov[1].iov_len;
0246
0247 iov_iter_kvec(&msg->msg_iter, WRITE, iov, 2, len);
0248 return xprt_sendmsg(sock, msg, base);
0249 }
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264 int xprt_sock_sendmsg(struct socket *sock, struct msghdr *msg,
0265 struct xdr_buf *xdr, unsigned int base,
0266 rpc_fraghdr marker, unsigned int *sent_p)
0267 {
0268 unsigned int rmsize = marker ? sizeof(marker) : 0;
0269 unsigned int remainder = rmsize + xdr->len - base;
0270 unsigned int want;
0271 int err = 0;
0272
0273 *sent_p = 0;
0274
0275 if (unlikely(!sock))
0276 return -ENOTSOCK;
0277
0278 msg->msg_flags |= MSG_MORE;
0279 want = xdr->head[0].iov_len + rmsize;
0280 if (base < want) {
0281 unsigned int len = want - base;
0282
0283 remainder -= len;
0284 if (remainder == 0)
0285 msg->msg_flags &= ~MSG_MORE;
0286 if (rmsize)
0287 err = xprt_send_rm_and_kvec(sock, msg, marker,
0288 &xdr->head[0], base);
0289 else
0290 err = xprt_send_kvec(sock, msg, &xdr->head[0], base);
0291 if (remainder == 0 || err != len)
0292 goto out;
0293 *sent_p += err;
0294 base = 0;
0295 } else {
0296 base -= want;
0297 }
0298
0299 if (base < xdr->page_len) {
0300 unsigned int len = xdr->page_len - base;
0301
0302 remainder -= len;
0303 if (remainder == 0)
0304 msg->msg_flags &= ~MSG_MORE;
0305 err = xprt_send_pagedata(sock, msg, xdr, base);
0306 if (remainder == 0 || err != len)
0307 goto out;
0308 *sent_p += err;
0309 base = 0;
0310 } else {
0311 base -= xdr->page_len;
0312 }
0313
0314 if (base >= xdr->tail[0].iov_len)
0315 return 0;
0316 msg->msg_flags &= ~MSG_MORE;
0317 err = xprt_send_kvec(sock, msg, &xdr->tail[0], base);
0318 out:
0319 if (err > 0) {
0320 *sent_p += err;
0321 err = 0;
0322 }
0323 return err;
0324 }