0001
0002
0003
0004 #include "ixgbevf.h"
0005 #include <net/xfrm.h>
0006 #include <crypto/aead.h>
0007
0008 #define IXGBE_IPSEC_KEY_BITS 160
0009 static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
0010
0011
0012
0013
0014
0015
0016
0017
0018 static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
0019 struct xfrm_state *xs)
0020 {
0021 u32 msgbuf[IXGBE_VFMAILBOX_SIZE] = { 0 };
0022 struct ixgbe_hw *hw = &adapter->hw;
0023 struct sa_mbx_msg *sam;
0024 int ret;
0025
0026
0027 sam = (struct sa_mbx_msg *)(&msgbuf[1]);
0028 sam->dir = xs->xso.dir;
0029 sam->spi = xs->id.spi;
0030 sam->proto = xs->id.proto;
0031 sam->family = xs->props.family;
0032
0033 if (xs->props.family == AF_INET6)
0034 memcpy(sam->addr, &xs->id.daddr.a6, sizeof(xs->id.daddr.a6));
0035 else
0036 memcpy(sam->addr, &xs->id.daddr.a4, sizeof(xs->id.daddr.a4));
0037 memcpy(sam->key, xs->aead->alg_key, sizeof(sam->key));
0038
0039 msgbuf[0] = IXGBE_VF_IPSEC_ADD;
0040
0041 spin_lock_bh(&adapter->mbx_lock);
0042
0043 ret = ixgbevf_write_mbx(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
0044 if (ret)
0045 goto out;
0046
0047 ret = ixgbevf_poll_mbx(hw, msgbuf, 2);
0048 if (ret)
0049 goto out;
0050
0051 ret = (int)msgbuf[1];
0052 if (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE && ret >= 0)
0053 ret = -1;
0054
0055 out:
0056 spin_unlock_bh(&adapter->mbx_lock);
0057
0058 return ret;
0059 }
0060
0061
0062
0063
0064
0065
0066
0067
0068 static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa)
0069 {
0070 struct ixgbe_hw *hw = &adapter->hw;
0071 u32 msgbuf[2];
0072 int err;
0073
0074 memset(msgbuf, 0, sizeof(msgbuf));
0075 msgbuf[0] = IXGBE_VF_IPSEC_DEL;
0076 msgbuf[1] = (u32)pfsa;
0077
0078 spin_lock_bh(&adapter->mbx_lock);
0079
0080 err = ixgbevf_write_mbx(hw, msgbuf, 2);
0081 if (err)
0082 goto out;
0083
0084 err = ixgbevf_poll_mbx(hw, msgbuf, 2);
0085 if (err)
0086 goto out;
0087
0088 out:
0089 spin_unlock_bh(&adapter->mbx_lock);
0090 return err;
0091 }
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101 void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter)
0102 {
0103 struct ixgbevf_ipsec *ipsec = adapter->ipsec;
0104 struct net_device *netdev = adapter->netdev;
0105 int i;
0106
0107 if (!(adapter->netdev->features & NETIF_F_HW_ESP))
0108 return;
0109
0110
0111 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
0112 struct rx_sa *r = &ipsec->rx_tbl[i];
0113 struct tx_sa *t = &ipsec->tx_tbl[i];
0114 int ret;
0115
0116 if (r->used) {
0117 ret = ixgbevf_ipsec_set_pf_sa(adapter, r->xs);
0118 if (ret < 0)
0119 netdev_err(netdev, "reload rx_tbl[%d] failed = %d\n",
0120 i, ret);
0121 }
0122
0123 if (t->used) {
0124 ret = ixgbevf_ipsec_set_pf_sa(adapter, t->xs);
0125 if (ret < 0)
0126 netdev_err(netdev, "reload tx_tbl[%d] failed = %d\n",
0127 i, ret);
0128 }
0129 }
0130 }
0131
0132
0133
0134
0135
0136
0137
0138
0139 static
0140 int ixgbevf_ipsec_find_empty_idx(struct ixgbevf_ipsec *ipsec, bool rxtable)
0141 {
0142 u32 i;
0143
0144 if (rxtable) {
0145 if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
0146 return -ENOSPC;
0147
0148
0149 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
0150 if (!ipsec->rx_tbl[i].used)
0151 return i;
0152 }
0153 } else {
0154 if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
0155 return -ENOSPC;
0156
0157
0158 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
0159 if (!ipsec->tx_tbl[i].used)
0160 return i;
0161 }
0162 }
0163
0164 return -ENOSPC;
0165 }
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177 static
0178 struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
0179 __be32 *daddr, u8 proto,
0180 __be32 spi, bool ip4)
0181 {
0182 struct xfrm_state *ret = NULL;
0183 struct rx_sa *rsa;
0184
0185 rcu_read_lock();
0186 hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
0187 (__force u32)spi) {
0188 if (spi == rsa->xs->id.spi &&
0189 ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
0190 (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
0191 sizeof(rsa->xs->id.daddr.a6)))) &&
0192 proto == rsa->xs->id.proto) {
0193 ret = rsa->xs;
0194 xfrm_state_hold(ret);
0195 break;
0196 }
0197 }
0198 rcu_read_unlock();
0199 return ret;
0200 }
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211 static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
0212 u32 *mykey, u32 *mysalt)
0213 {
0214 struct net_device *dev = xs->xso.real_dev;
0215 unsigned char *key_data;
0216 char *alg_name = NULL;
0217 int key_len;
0218
0219 if (!xs->aead) {
0220 netdev_err(dev, "Unsupported IPsec algorithm\n");
0221 return -EINVAL;
0222 }
0223
0224 if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
0225 netdev_err(dev, "IPsec offload requires %d bit authentication\n",
0226 IXGBE_IPSEC_AUTH_BITS);
0227 return -EINVAL;
0228 }
0229
0230 key_data = &xs->aead->alg_key[0];
0231 key_len = xs->aead->alg_key_len;
0232 alg_name = xs->aead->alg_name;
0233
0234 if (strcmp(alg_name, aes_gcm_name)) {
0235 netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
0236 aes_gcm_name);
0237 return -EINVAL;
0238 }
0239
0240
0241
0242
0243
0244 if (key_len > IXGBE_IPSEC_KEY_BITS) {
0245 *mysalt = ((u32 *)key_data)[4];
0246 } else if (key_len == IXGBE_IPSEC_KEY_BITS) {
0247 *mysalt = 0;
0248 } else {
0249 netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
0250 return -EINVAL;
0251 }
0252 memcpy(mykey, key_data, 16);
0253
0254 return 0;
0255 }
0256
0257
0258
0259
0260
0261 static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
0262 {
0263 struct net_device *dev = xs->xso.real_dev;
0264 struct ixgbevf_adapter *adapter;
0265 struct ixgbevf_ipsec *ipsec;
0266 u16 sa_idx;
0267 int ret;
0268
0269 adapter = netdev_priv(dev);
0270 ipsec = adapter->ipsec;
0271
0272 if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
0273 netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n",
0274 xs->id.proto);
0275 return -EINVAL;
0276 }
0277
0278 if (xs->props.mode != XFRM_MODE_TRANSPORT) {
0279 netdev_err(dev, "Unsupported mode for ipsec offload\n");
0280 return -EINVAL;
0281 }
0282
0283 if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
0284 struct rx_sa rsa;
0285
0286 if (xs->calg) {
0287 netdev_err(dev, "Compression offload not supported\n");
0288 return -EINVAL;
0289 }
0290
0291
0292 ret = ixgbevf_ipsec_find_empty_idx(ipsec, true);
0293 if (ret < 0) {
0294 netdev_err(dev, "No space for SA in Rx table!\n");
0295 return ret;
0296 }
0297 sa_idx = (u16)ret;
0298
0299 memset(&rsa, 0, sizeof(rsa));
0300 rsa.used = true;
0301 rsa.xs = xs;
0302
0303 if (rsa.xs->id.proto & IPPROTO_ESP)
0304 rsa.decrypt = xs->ealg || xs->aead;
0305
0306
0307 ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
0308 if (ret) {
0309 netdev_err(dev, "Failed to get key data for Rx SA table\n");
0310 return ret;
0311 }
0312
0313
0314 if (xs->props.family == AF_INET6)
0315 memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
0316 else
0317 memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
0318
0319 rsa.mode = IXGBE_RXMOD_VALID;
0320 if (rsa.xs->id.proto & IPPROTO_ESP)
0321 rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
0322 if (rsa.decrypt)
0323 rsa.mode |= IXGBE_RXMOD_DECRYPT;
0324 if (rsa.xs->props.family == AF_INET6)
0325 rsa.mode |= IXGBE_RXMOD_IPV6;
0326
0327 ret = ixgbevf_ipsec_set_pf_sa(adapter, xs);
0328 if (ret < 0)
0329 return ret;
0330 rsa.pfsa = ret;
0331
0332
0333 memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
0334
0335 xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
0336
0337 ipsec->num_rx_sa++;
0338
0339
0340 hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
0341 (__force u32)rsa.xs->id.spi);
0342 } else {
0343 struct tx_sa tsa;
0344
0345
0346 ret = ixgbevf_ipsec_find_empty_idx(ipsec, false);
0347 if (ret < 0) {
0348 netdev_err(dev, "No space for SA in Tx table\n");
0349 return ret;
0350 }
0351 sa_idx = (u16)ret;
0352
0353 memset(&tsa, 0, sizeof(tsa));
0354 tsa.used = true;
0355 tsa.xs = xs;
0356
0357 if (xs->id.proto & IPPROTO_ESP)
0358 tsa.encrypt = xs->ealg || xs->aead;
0359
0360 ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
0361 if (ret) {
0362 netdev_err(dev, "Failed to get key data for Tx SA table\n");
0363 memset(&tsa, 0, sizeof(tsa));
0364 return ret;
0365 }
0366
0367 ret = ixgbevf_ipsec_set_pf_sa(adapter, xs);
0368 if (ret < 0)
0369 return ret;
0370 tsa.pfsa = ret;
0371
0372
0373 memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
0374
0375 xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
0376
0377 ipsec->num_tx_sa++;
0378 }
0379
0380 return 0;
0381 }
0382
0383
0384
0385
0386
0387 static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
0388 {
0389 struct net_device *dev = xs->xso.real_dev;
0390 struct ixgbevf_adapter *adapter;
0391 struct ixgbevf_ipsec *ipsec;
0392 u16 sa_idx;
0393
0394 adapter = netdev_priv(dev);
0395 ipsec = adapter->ipsec;
0396
0397 if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
0398 sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
0399
0400 if (!ipsec->rx_tbl[sa_idx].used) {
0401 netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
0402 sa_idx, xs->xso.offload_handle);
0403 return;
0404 }
0405
0406 ixgbevf_ipsec_del_pf_sa(adapter, ipsec->rx_tbl[sa_idx].pfsa);
0407 hash_del_rcu(&ipsec->rx_tbl[sa_idx].hlist);
0408 memset(&ipsec->rx_tbl[sa_idx], 0, sizeof(struct rx_sa));
0409 ipsec->num_rx_sa--;
0410 } else {
0411 sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
0412
0413 if (!ipsec->tx_tbl[sa_idx].used) {
0414 netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
0415 sa_idx, xs->xso.offload_handle);
0416 return;
0417 }
0418
0419 ixgbevf_ipsec_del_pf_sa(adapter, ipsec->tx_tbl[sa_idx].pfsa);
0420 memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
0421 ipsec->num_tx_sa--;
0422 }
0423 }
0424
0425
0426
0427
0428
0429
0430 static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
0431 {
0432 if (xs->props.family == AF_INET) {
0433
0434 if (ip_hdr(skb)->ihl != 5)
0435 return false;
0436 } else {
0437
0438 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
0439 return false;
0440 }
0441
0442 return true;
0443 }
0444
0445 static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = {
0446 .xdo_dev_state_add = ixgbevf_ipsec_add_sa,
0447 .xdo_dev_state_delete = ixgbevf_ipsec_del_sa,
0448 .xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok,
0449 };
0450
0451
0452
0453
0454
0455
0456
0457 int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
0458 struct ixgbevf_tx_buffer *first,
0459 struct ixgbevf_ipsec_tx_data *itd)
0460 {
0461 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
0462 struct ixgbevf_ipsec *ipsec = adapter->ipsec;
0463 struct xfrm_state *xs;
0464 struct sec_path *sp;
0465 struct tx_sa *tsa;
0466 u16 sa_idx;
0467
0468 sp = skb_sec_path(first->skb);
0469 if (unlikely(!sp->len)) {
0470 netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
0471 __func__, sp->len);
0472 return 0;
0473 }
0474
0475 xs = xfrm_input_state(first->skb);
0476 if (unlikely(!xs)) {
0477 netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
0478 __func__, xs);
0479 return 0;
0480 }
0481
0482 sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
0483 if (unlikely(sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
0484 netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
0485 __func__, sa_idx, xs->xso.offload_handle);
0486 return 0;
0487 }
0488
0489 tsa = &ipsec->tx_tbl[sa_idx];
0490 if (unlikely(!tsa->used)) {
0491 netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
0492 __func__, sa_idx);
0493 return 0;
0494 }
0495
0496 itd->pfsa = tsa->pfsa - IXGBE_IPSEC_BASE_TX_INDEX;
0497
0498 first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CSUM;
0499
0500 if (xs->id.proto == IPPROTO_ESP) {
0501 itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
0502 IXGBE_ADVTXD_TUCMD_L4T_TCP;
0503 if (first->protocol == htons(ETH_P_IP))
0504 itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514 if (!skb_is_gso(first->skb)) {
0515
0516
0517
0518
0519
0520
0521
0522 const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
0523 struct sk_buff *skb = first->skb;
0524 u8 padlen;
0525 int ret;
0526
0527 ret = skb_copy_bits(skb, skb->len - (authlen + 2),
0528 &padlen, 1);
0529 if (unlikely(ret))
0530 return 0;
0531 itd->trailer_len = authlen + 2 + padlen;
0532 }
0533 }
0534 if (tsa->encrypt)
0535 itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
0536
0537 return 1;
0538 }
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549 void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
0550 union ixgbe_adv_rx_desc *rx_desc,
0551 struct sk_buff *skb)
0552 {
0553 struct ixgbevf_adapter *adapter = netdev_priv(rx_ring->netdev);
0554 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
0555 __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
0556 IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
0557 struct ixgbevf_ipsec *ipsec = adapter->ipsec;
0558 struct xfrm_offload *xo = NULL;
0559 struct xfrm_state *xs = NULL;
0560 struct ipv6hdr *ip6 = NULL;
0561 struct iphdr *ip4 = NULL;
0562 struct sec_path *sp;
0563 void *daddr;
0564 __be32 spi;
0565 u8 *c_hdr;
0566 u8 proto;
0567
0568
0569
0570
0571
0572
0573 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
0574 ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
0575 daddr = &ip4->daddr;
0576 c_hdr = (u8 *)ip4 + ip4->ihl * 4;
0577 } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
0578 ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
0579 daddr = &ip6->daddr;
0580 c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
0581 } else {
0582 return;
0583 }
0584
0585 switch (pkt_info & ipsec_pkt_types) {
0586 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
0587 spi = ((struct ip_auth_hdr *)c_hdr)->spi;
0588 proto = IPPROTO_AH;
0589 break;
0590 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
0591 spi = ((struct ip_esp_hdr *)c_hdr)->spi;
0592 proto = IPPROTO_ESP;
0593 break;
0594 default:
0595 return;
0596 }
0597
0598 xs = ixgbevf_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
0599 if (unlikely(!xs))
0600 return;
0601
0602 sp = secpath_set(skb);
0603 if (unlikely(!sp))
0604 return;
0605
0606 sp->xvec[sp->len++] = xs;
0607 sp->olen++;
0608 xo = xfrm_offload(skb);
0609 xo->flags = CRYPTO_DONE;
0610 xo->status = CRYPTO_SUCCESS;
0611
0612 adapter->rx_ipsec++;
0613 }
0614
0615
0616
0617
0618
0619 void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
0620 {
0621 struct ixgbevf_ipsec *ipsec;
0622 size_t size;
0623
0624 switch (adapter->hw.api_version) {
0625 case ixgbe_mbox_api_14:
0626 case ixgbe_mbox_api_15:
0627 break;
0628 default:
0629 return;
0630 }
0631
0632 ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
0633 if (!ipsec)
0634 goto err1;
0635 hash_init(ipsec->rx_sa_list);
0636
0637 size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
0638 ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
0639 if (!ipsec->rx_tbl)
0640 goto err2;
0641
0642 size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
0643 ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
0644 if (!ipsec->tx_tbl)
0645 goto err2;
0646
0647 ipsec->num_rx_sa = 0;
0648 ipsec->num_tx_sa = 0;
0649
0650 adapter->ipsec = ipsec;
0651
0652 adapter->netdev->xfrmdev_ops = &ixgbevf_xfrmdev_ops;
0653
0654 #define IXGBEVF_ESP_FEATURES (NETIF_F_HW_ESP | \
0655 NETIF_F_HW_ESP_TX_CSUM | \
0656 NETIF_F_GSO_ESP)
0657
0658 adapter->netdev->features |= IXGBEVF_ESP_FEATURES;
0659 adapter->netdev->hw_enc_features |= IXGBEVF_ESP_FEATURES;
0660
0661 return;
0662
0663 err2:
0664 kfree(ipsec->rx_tbl);
0665 kfree(ipsec->tx_tbl);
0666 kfree(ipsec);
0667 err1:
0668 netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
0669 }
0670
0671
0672
0673
0674
0675 void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter)
0676 {
0677 struct ixgbevf_ipsec *ipsec = adapter->ipsec;
0678
0679 adapter->ipsec = NULL;
0680 if (ipsec) {
0681 kfree(ipsec->rx_tbl);
0682 kfree(ipsec->tx_tbl);
0683 kfree(ipsec);
0684 }
0685 }