Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2016 Citrix Systems Inc.
0003  *
0004  * This program is free software; you can redistribute it and/or
0005  * modify it under the terms of the GNU General Public License version 2
0006  * as published by the Free Softare Foundation; or, when distributed
0007  * separately from the Linux kernel or incorporated into other
0008  * software packages, subject to the following license:
0009  *
0010  * Permission is hereby granted, free of charge, to any person obtaining a copy
0011  * of this source file (the "Software"), to deal in the Software without
0012  * restriction, including without limitation the rights to use, copy, modify,
0013  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
0014  * and to permit persons to whom the Software is furnished to do so, subject to
0015  * the following conditions:
0016  *
0017  * The above copyright notice and this permission notice shall be included in
0018  * all copies or substantial portions of the Software.
0019  *
0020  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0021  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0022  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
0023  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0024  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
0025  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
0026  * IN THE SOFTWARE.
0027  */
0028 
0029 #define XEN_NETIF_DEFINE_TOEPLITZ
0030 
0031 #include "common.h"
0032 #include <linux/vmalloc.h>
0033 #include <linux/rculist.h>
0034 
0035 static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
0036                 unsigned int len, u32 val)
0037 {
0038     struct xenvif_hash_cache_entry *new, *entry, *oldest;
0039     unsigned long flags;
0040     bool found;
0041 
0042     new = kmalloc(sizeof(*entry), GFP_ATOMIC);
0043     if (!new)
0044         return;
0045 
0046     memcpy(new->tag, tag, len);
0047     new->len = len;
0048     new->val = val;
0049 
0050     spin_lock_irqsave(&vif->hash.cache.lock, flags);
0051 
0052     found = false;
0053     oldest = NULL;
0054     list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
0055                 lockdep_is_held(&vif->hash.cache.lock)) {
0056         /* Make sure we don't add duplicate entries */
0057         if (entry->len == len &&
0058             memcmp(entry->tag, tag, len) == 0)
0059             found = true;
0060         if (!oldest || entry->seq < oldest->seq)
0061             oldest = entry;
0062     }
0063 
0064     if (!found) {
0065         new->seq = atomic_inc_return(&vif->hash.cache.seq);
0066         list_add_rcu(&new->link, &vif->hash.cache.list);
0067 
0068         if (++vif->hash.cache.count > xenvif_hash_cache_size) {
0069             list_del_rcu(&oldest->link);
0070             vif->hash.cache.count--;
0071             kfree_rcu(oldest, rcu);
0072         }
0073     }
0074 
0075     spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
0076 
0077     if (found)
0078         kfree(new);
0079 }
0080 
0081 static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data,
0082                unsigned int len)
0083 {
0084     u32 val;
0085 
0086     val = xen_netif_toeplitz_hash(vif->hash.key,
0087                       sizeof(vif->hash.key),
0088                       data, len);
0089 
0090     if (xenvif_hash_cache_size != 0)
0091         xenvif_add_hash(vif, data, len, val);
0092 
0093     return val;
0094 }
0095 
0096 static void xenvif_flush_hash(struct xenvif *vif)
0097 {
0098     struct xenvif_hash_cache_entry *entry;
0099     unsigned long flags;
0100 
0101     if (xenvif_hash_cache_size == 0)
0102         return;
0103 
0104     spin_lock_irqsave(&vif->hash.cache.lock, flags);
0105 
0106     list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
0107                 lockdep_is_held(&vif->hash.cache.lock)) {
0108         list_del_rcu(&entry->link);
0109         vif->hash.cache.count--;
0110         kfree_rcu(entry, rcu);
0111     }
0112 
0113     spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
0114 }
0115 
0116 static u32 xenvif_find_hash(struct xenvif *vif, const u8 *data,
0117                 unsigned int len)
0118 {
0119     struct xenvif_hash_cache_entry *entry;
0120     u32 val;
0121     bool found;
0122 
0123     if (len >= XEN_NETBK_HASH_TAG_SIZE)
0124         return 0;
0125 
0126     if (xenvif_hash_cache_size == 0)
0127         return xenvif_new_hash(vif, data, len);
0128 
0129     rcu_read_lock();
0130 
0131     found = false;
0132 
0133     list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
0134         if (entry->len == len &&
0135             memcmp(entry->tag, data, len) == 0) {
0136             val = entry->val;
0137             entry->seq = atomic_inc_return(&vif->hash.cache.seq);
0138             found = true;
0139             break;
0140         }
0141     }
0142 
0143     rcu_read_unlock();
0144 
0145     if (!found)
0146         val = xenvif_new_hash(vif, data, len);
0147 
0148     return val;
0149 }
0150 
0151 void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb)
0152 {
0153     struct flow_keys flow;
0154     u32 hash = 0;
0155     enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
0156     u32 flags = vif->hash.flags;
0157     bool has_tcp_hdr;
0158 
0159     /* Quick rejection test: If the network protocol doesn't
0160      * correspond to any enabled hash type then there's no point
0161      * in parsing the packet header.
0162      */
0163     switch (skb->protocol) {
0164     case htons(ETH_P_IP):
0165         if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
0166                  XEN_NETIF_CTRL_HASH_TYPE_IPV4))
0167             break;
0168 
0169         goto done;
0170 
0171     case htons(ETH_P_IPV6):
0172         if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP |
0173                  XEN_NETIF_CTRL_HASH_TYPE_IPV6))
0174             break;
0175 
0176         goto done;
0177 
0178     default:
0179         goto done;
0180     }
0181 
0182     memset(&flow, 0, sizeof(flow));
0183     if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
0184         goto done;
0185 
0186     has_tcp_hdr = (flow.basic.ip_proto == IPPROTO_TCP) &&
0187               !(flow.control.flags & FLOW_DIS_IS_FRAGMENT);
0188 
0189     switch (skb->protocol) {
0190     case htons(ETH_P_IP):
0191         if (has_tcp_hdr &&
0192             (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)) {
0193             u8 data[12];
0194 
0195             memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
0196             memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
0197             memcpy(&data[8], &flow.ports.src, 2);
0198             memcpy(&data[10], &flow.ports.dst, 2);
0199 
0200             hash = xenvif_find_hash(vif, data, sizeof(data));
0201             type = PKT_HASH_TYPE_L4;
0202         } else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4) {
0203             u8 data[8];
0204 
0205             memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
0206             memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
0207 
0208             hash = xenvif_find_hash(vif, data, sizeof(data));
0209             type = PKT_HASH_TYPE_L3;
0210         }
0211 
0212         break;
0213 
0214     case htons(ETH_P_IPV6):
0215         if (has_tcp_hdr &&
0216             (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)) {
0217             u8 data[36];
0218 
0219             memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
0220             memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
0221             memcpy(&data[32], &flow.ports.src, 2);
0222             memcpy(&data[34], &flow.ports.dst, 2);
0223 
0224             hash = xenvif_find_hash(vif, data, sizeof(data));
0225             type = PKT_HASH_TYPE_L4;
0226         } else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6) {
0227             u8 data[32];
0228 
0229             memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
0230             memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
0231 
0232             hash = xenvif_find_hash(vif, data, sizeof(data));
0233             type = PKT_HASH_TYPE_L3;
0234         }
0235 
0236         break;
0237     }
0238 
0239 done:
0240     if (type == PKT_HASH_TYPE_NONE)
0241         skb_clear_hash(skb);
0242     else
0243         __skb_set_sw_hash(skb, hash, type == PKT_HASH_TYPE_L4);
0244 }
0245 
0246 u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg)
0247 {
0248     switch (alg) {
0249     case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
0250     case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
0251         break;
0252 
0253     default:
0254         return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
0255     }
0256 
0257     vif->hash.alg = alg;
0258 
0259     return XEN_NETIF_CTRL_STATUS_SUCCESS;
0260 }
0261 
0262 u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags)
0263 {
0264     if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
0265         return XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
0266 
0267     *flags = XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
0268          XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
0269          XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
0270          XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
0271 
0272     return XEN_NETIF_CTRL_STATUS_SUCCESS;
0273 }
0274 
0275 u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags)
0276 {
0277     if (flags & ~(XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
0278               XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
0279               XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
0280               XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP))
0281         return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
0282 
0283     if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
0284         return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
0285 
0286     vif->hash.flags = flags;
0287 
0288     return XEN_NETIF_CTRL_STATUS_SUCCESS;
0289 }
0290 
0291 u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len)
0292 {
0293     u8 *key = vif->hash.key;
0294     struct gnttab_copy copy_op = {
0295         .source.u.ref = gref,
0296         .source.domid = vif->domid,
0297         .dest.u.gmfn = virt_to_gfn(key),
0298         .dest.domid = DOMID_SELF,
0299         .dest.offset = xen_offset_in_page(key),
0300         .len = len,
0301         .flags = GNTCOPY_source_gref
0302     };
0303 
0304     if (len > XEN_NETBK_MAX_HASH_KEY_SIZE)
0305         return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
0306 
0307     if (copy_op.len != 0) {
0308         gnttab_batch_copy(&copy_op, 1);
0309 
0310         if (copy_op.status != GNTST_okay)
0311             return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
0312     }
0313 
0314     /* Clear any remaining key octets */
0315     if (len < XEN_NETBK_MAX_HASH_KEY_SIZE)
0316         memset(key + len, 0, XEN_NETBK_MAX_HASH_KEY_SIZE - len);
0317 
0318     xenvif_flush_hash(vif);
0319 
0320     return XEN_NETIF_CTRL_STATUS_SUCCESS;
0321 }
0322 
0323 u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
0324 {
0325     if (size > XEN_NETBK_MAX_HASH_MAPPING_SIZE)
0326         return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
0327 
0328     vif->hash.size = size;
0329     memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
0330            sizeof(u32) * size);
0331 
0332     return XEN_NETIF_CTRL_STATUS_SUCCESS;
0333 }
0334 
0335 u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
0336                 u32 off)
0337 {
0338     u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
0339     unsigned int nr = 1;
0340     struct gnttab_copy copy_op[2] = {{
0341         .source.u.ref = gref,
0342         .source.domid = vif->domid,
0343         .dest.domid = DOMID_SELF,
0344         .len = len * sizeof(*mapping),
0345         .flags = GNTCOPY_source_gref
0346     }};
0347 
0348     if ((off + len < off) || (off + len > vif->hash.size) ||
0349         len > XEN_PAGE_SIZE / sizeof(*mapping))
0350         return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
0351 
0352     copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off);
0353     copy_op[0].dest.offset = xen_offset_in_page(mapping + off);
0354     if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) {
0355         copy_op[1] = copy_op[0];
0356         copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset;
0357         copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len);
0358         copy_op[1].dest.offset = 0;
0359         copy_op[1].len = copy_op[0].len - copy_op[1].source.offset;
0360         copy_op[0].len = copy_op[1].source.offset;
0361         nr = 2;
0362     }
0363 
0364     memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
0365            vif->hash.size * sizeof(*mapping));
0366 
0367     if (copy_op[0].len != 0) {
0368         gnttab_batch_copy(copy_op, nr);
0369 
0370         if (copy_op[0].status != GNTST_okay ||
0371             copy_op[nr - 1].status != GNTST_okay)
0372             return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
0373     }
0374 
0375     while (len-- != 0)
0376         if (mapping[off++] >= vif->num_queues)
0377             return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
0378 
0379     vif->hash.mapping_sel = !vif->hash.mapping_sel;
0380 
0381     return XEN_NETIF_CTRL_STATUS_SUCCESS;
0382 }
0383 
0384 #ifdef CONFIG_DEBUG_FS
0385 void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
0386 {
0387     unsigned int i;
0388 
0389     switch (vif->hash.alg) {
0390     case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
0391         seq_puts(m, "Hash Algorithm: TOEPLITZ\n");
0392         break;
0393 
0394     case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
0395         seq_puts(m, "Hash Algorithm: NONE\n");
0396         fallthrough;
0397     default:
0398         return;
0399     }
0400 
0401     if (vif->hash.flags) {
0402         seq_puts(m, "\nHash Flags:\n");
0403 
0404         if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4)
0405             seq_puts(m, "- IPv4\n");
0406         if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
0407             seq_puts(m, "- IPv4 + TCP\n");
0408         if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6)
0409             seq_puts(m, "- IPv6\n");
0410         if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
0411             seq_puts(m, "- IPv6 + TCP\n");
0412     }
0413 
0414     seq_puts(m, "\nHash Key:\n");
0415 
0416     for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) {
0417         unsigned int j, n;
0418 
0419         n = 8;
0420         if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE)
0421             n = XEN_NETBK_MAX_HASH_KEY_SIZE - i;
0422 
0423         seq_printf(m, "[%2u - %2u]: ", i, i + n - 1);
0424 
0425         for (j = 0; j < n; j++, i++)
0426             seq_printf(m, "%02x ", vif->hash.key[i]);
0427 
0428         seq_puts(m, "\n");
0429     }
0430 
0431     if (vif->hash.size != 0) {
0432         const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
0433 
0434         seq_puts(m, "\nHash Mapping:\n");
0435 
0436         for (i = 0; i < vif->hash.size; ) {
0437             unsigned int j, n;
0438 
0439             n = 8;
0440             if (i + n >= vif->hash.size)
0441                 n = vif->hash.size - i;
0442 
0443             seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
0444 
0445             for (j = 0; j < n; j++, i++)
0446                 seq_printf(m, "%4u ", mapping[i]);
0447 
0448             seq_puts(m, "\n");
0449         }
0450     }
0451 }
0452 #endif /* CONFIG_DEBUG_FS */
0453 
0454 void xenvif_init_hash(struct xenvif *vif)
0455 {
0456     if (xenvif_hash_cache_size == 0)
0457         return;
0458 
0459     BUG_ON(vif->hash.cache.count);
0460 
0461     spin_lock_init(&vif->hash.cache.lock);
0462     INIT_LIST_HEAD(&vif->hash.cache.list);
0463 }
0464 
0465 void xenvif_deinit_hash(struct xenvif *vif)
0466 {
0467     xenvif_flush_hash(vif);
0468 }