0001
0002
0003
0004 #include "ixgbe.h"
0005 #include <net/xfrm.h>
0006 #include <crypto/aead.h>
0007 #include <linux/if_bridge.h>
0008
0009 #define IXGBE_IPSEC_KEY_BITS 160
0010 static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
0011
0012 static void ixgbe_ipsec_del_sa(struct xfrm_state *xs);
0013
0014
0015
0016
0017
0018
0019
0020
0021 static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
0022 u32 key[], u32 salt)
0023 {
0024 u32 reg;
0025 int i;
0026
0027 for (i = 0; i < 4; i++)
0028 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i),
0029 (__force u32)cpu_to_be32(key[3 - i]));
0030 IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, (__force u32)cpu_to_be32(salt));
0031 IXGBE_WRITE_FLUSH(hw);
0032
0033 reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
0034 reg &= IXGBE_RXTXIDX_IPS_EN;
0035 reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE;
0036 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg);
0037 IXGBE_WRITE_FLUSH(hw);
0038 }
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049 static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx,
0050 enum ixgbe_ipsec_tbl_sel tbl)
0051 {
0052 u32 reg;
0053
0054 reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX);
0055 reg &= IXGBE_RXTXIDX_IPS_EN;
0056 reg |= tbl << IXGBE_RXIDX_TBL_SHIFT |
0057 idx << IXGBE_RXTXIDX_IDX_SHIFT |
0058 IXGBE_RXTXIDX_WRITE;
0059 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg);
0060 IXGBE_WRITE_FLUSH(hw);
0061 }
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073 static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
0074 u32 key[], u32 salt, u32 mode, u32 ip_idx)
0075 {
0076 int i;
0077
0078
0079 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
0080 (__force u32)cpu_to_le32((__force u32)spi));
0081 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
0082 IXGBE_WRITE_FLUSH(hw);
0083
0084 ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl);
0085
0086
0087 for (i = 0; i < 4; i++)
0088 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i),
0089 (__force u32)cpu_to_be32(key[3 - i]));
0090 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, (__force u32)cpu_to_be32(salt));
0091 IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
0092 IXGBE_WRITE_FLUSH(hw);
0093
0094 ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl);
0095 }
0096
0097
0098
0099
0100
0101
0102
0103 static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
0104 {
0105 int i;
0106
0107
0108 for (i = 0; i < 4; i++)
0109 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i),
0110 (__force u32)cpu_to_le32((__force u32)addr[i]));
0111 IXGBE_WRITE_FLUSH(hw);
0112
0113 ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
0114 }
0115
0116
0117
0118
0119
0120 static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
0121 {
0122 struct ixgbe_hw *hw = &adapter->hw;
0123 u32 buf[4] = {0, 0, 0, 0};
0124 u16 idx;
0125
0126
0127 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
0128 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
0129
0130
0131 for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) {
0132 ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
0133 ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
0134 ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf);
0135 }
0136 for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) {
0137 ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
0138 ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
0139 }
0140 }
0141
0142
0143
0144
0145
0146 static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
0147 {
0148 struct ixgbe_hw *hw = &adapter->hw;
0149 bool link = adapter->link_up;
0150 u32 t_rdy, r_rdy;
0151 u32 limit;
0152 u32 reg;
0153
0154
0155 reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
0156 reg |= IXGBE_SECTXCTRL_TX_DIS;
0157 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
0158
0159 reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
0160 reg |= IXGBE_SECRXCTRL_RX_DIS;
0161 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
0162
0163
0164
0165
0166
0167 t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
0168 IXGBE_SECTXSTAT_SECTX_RDY;
0169 r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
0170 IXGBE_SECRXSTAT_SECRX_RDY;
0171 if (t_rdy && r_rdy)
0172 return;
0173
0174
0175
0176
0177
0178 if (!link) {
0179 reg = IXGBE_READ_REG(hw, IXGBE_MACC);
0180 reg |= IXGBE_MACC_FLU;
0181 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
0182
0183 reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
0184 reg |= IXGBE_HLREG0_LPBK;
0185 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
0186
0187 IXGBE_WRITE_FLUSH(hw);
0188 mdelay(3);
0189 }
0190
0191
0192 limit = 20;
0193 do {
0194 mdelay(10);
0195 t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
0196 IXGBE_SECTXSTAT_SECTX_RDY;
0197 r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
0198 IXGBE_SECRXSTAT_SECRX_RDY;
0199 } while (!(t_rdy && r_rdy) && limit--);
0200
0201
0202 if (!link) {
0203 reg = IXGBE_READ_REG(hw, IXGBE_MACC);
0204 reg &= ~IXGBE_MACC_FLU;
0205 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
0206
0207 reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
0208 reg &= ~IXGBE_HLREG0_LPBK;
0209 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
0210
0211 IXGBE_WRITE_FLUSH(hw);
0212 }
0213 }
0214
0215
0216
0217
0218
0219 static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter)
0220 {
0221 struct ixgbe_hw *hw = &adapter->hw;
0222 u32 reg;
0223
0224 ixgbe_ipsec_stop_data(adapter);
0225
0226
0227 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
0228 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
0229
0230
0231 reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
0232 reg |= IXGBE_SECTXCTRL_SECTX_DIS;
0233 reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD;
0234 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
0235
0236 reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
0237 reg |= IXGBE_SECRXCTRL_SECRX_DIS;
0238 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
0239
0240
0241 IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250);
0242
0243
0244 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
0245 reg = (reg & 0xfffffff0) | 0x1;
0246 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
0247
0248
0249 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS);
0250 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS);
0251
0252 IXGBE_WRITE_FLUSH(hw);
0253 }
0254
0255
0256
0257
0258
0259
0260
0261 static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter)
0262 {
0263 struct ixgbe_hw *hw = &adapter->hw;
0264 u32 reg;
0265
0266 ixgbe_ipsec_stop_data(adapter);
0267
0268
0269 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
0270 reg = (reg & 0xfffffff0) | 0x3;
0271 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
0272
0273
0274
0275
0276
0277 reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
0278 reg = (reg & 0xfffffc00) | 0x15;
0279 IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg);
0280
0281
0282 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
0283 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD);
0284
0285
0286 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN);
0287 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN);
0288
0289 IXGBE_WRITE_FLUSH(hw);
0290 }
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303 void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
0304 {
0305 struct ixgbe_ipsec *ipsec = adapter->ipsec;
0306 struct ixgbe_hw *hw = &adapter->hw;
0307 int i;
0308
0309 if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED))
0310 return;
0311
0312
0313 ixgbe_ipsec_stop_engine(adapter);
0314 ixgbe_ipsec_clear_hw_tables(adapter);
0315 ixgbe_ipsec_start_engine(adapter);
0316
0317
0318 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
0319 struct rx_sa *r = &ipsec->rx_tbl[i];
0320 struct tx_sa *t = &ipsec->tx_tbl[i];
0321
0322 if (r->used) {
0323 if (r->mode & IXGBE_RXTXMOD_VF)
0324 ixgbe_ipsec_del_sa(r->xs);
0325 else
0326 ixgbe_ipsec_set_rx_sa(hw, i, r->xs->id.spi,
0327 r->key, r->salt,
0328 r->mode, r->iptbl_ind);
0329 }
0330
0331 if (t->used) {
0332 if (t->mode & IXGBE_RXTXMOD_VF)
0333 ixgbe_ipsec_del_sa(t->xs);
0334 else
0335 ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt);
0336 }
0337 }
0338
0339
0340 for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
0341 struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
0342
0343 if (ipsa->used)
0344 ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
0345 }
0346 }
0347
0348
0349
0350
0351
0352
0353
0354
0355 static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable)
0356 {
0357 u32 i;
0358
0359 if (rxtable) {
0360 if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
0361 return -ENOSPC;
0362
0363
0364 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
0365 if (!ipsec->rx_tbl[i].used)
0366 return i;
0367 }
0368 } else {
0369 if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
0370 return -ENOSPC;
0371
0372
0373 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
0374 if (!ipsec->tx_tbl[i].used)
0375 return i;
0376 }
0377 }
0378
0379 return -ENOSPC;
0380 }
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392 static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
0393 __be32 *daddr, u8 proto,
0394 __be32 spi, bool ip4)
0395 {
0396 struct rx_sa *rsa;
0397 struct xfrm_state *ret = NULL;
0398
0399 rcu_read_lock();
0400 hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
0401 (__force u32)spi) {
0402 if (rsa->mode & IXGBE_RXTXMOD_VF)
0403 continue;
0404 if (spi == rsa->xs->id.spi &&
0405 ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
0406 (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
0407 sizeof(rsa->xs->id.daddr.a6)))) &&
0408 proto == rsa->xs->id.proto) {
0409 ret = rsa->xs;
0410 xfrm_state_hold(ret);
0411 break;
0412 }
0413 }
0414 rcu_read_unlock();
0415 return ret;
0416 }
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427 static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
0428 u32 *mykey, u32 *mysalt)
0429 {
0430 struct net_device *dev = xs->xso.real_dev;
0431 unsigned char *key_data;
0432 char *alg_name = NULL;
0433 int key_len;
0434
0435 if (!xs->aead) {
0436 netdev_err(dev, "Unsupported IPsec algorithm\n");
0437 return -EINVAL;
0438 }
0439
0440 if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
0441 netdev_err(dev, "IPsec offload requires %d bit authentication\n",
0442 IXGBE_IPSEC_AUTH_BITS);
0443 return -EINVAL;
0444 }
0445
0446 key_data = &xs->aead->alg_key[0];
0447 key_len = xs->aead->alg_key_len;
0448 alg_name = xs->aead->alg_name;
0449
0450 if (strcmp(alg_name, aes_gcm_name)) {
0451 netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
0452 aes_gcm_name);
0453 return -EINVAL;
0454 }
0455
0456
0457
0458
0459
0460 if (key_len == IXGBE_IPSEC_KEY_BITS) {
0461 *mysalt = ((u32 *)key_data)[4];
0462 } else if (key_len != (IXGBE_IPSEC_KEY_BITS - (sizeof(*mysalt) * 8))) {
0463 netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
0464 return -EINVAL;
0465 } else {
0466 netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n");
0467 *mysalt = 0;
0468 }
0469 memcpy(mykey, key_data, 16);
0470
0471 return 0;
0472 }
0473
0474
0475
0476
0477
0478 static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
0479 {
0480 struct net_device *dev = xs->xso.real_dev;
0481 struct ixgbe_adapter *adapter = netdev_priv(dev);
0482 struct ixgbe_hw *hw = &adapter->hw;
0483 u32 mfval, manc, reg;
0484 int num_filters = 4;
0485 bool manc_ipv4;
0486 u32 bmcipval;
0487 int i, j;
0488
0489 #define MANC_EN_IPV4_FILTER BIT(24)
0490 #define MFVAL_IPV4_FILTER_SHIFT 16
0491 #define MFVAL_IPV6_FILTER_SHIFT 24
0492 #define MIPAF_ARR(_m, _n) (IXGBE_MIPAF + ((_m) * 0x10) + ((_n) * 4))
0493
0494 #define IXGBE_BMCIP(_n) (0x5050 + ((_n) * 4))
0495 #define IXGBE_BMCIPVAL 0x5060
0496 #define BMCIP_V4 0x2
0497 #define BMCIP_V6 0x3
0498 #define BMCIP_MASK 0x3
0499
0500 manc = IXGBE_READ_REG(hw, IXGBE_MANC);
0501 manc_ipv4 = !!(manc & MANC_EN_IPV4_FILTER);
0502 mfval = IXGBE_READ_REG(hw, IXGBE_MFVAL);
0503 bmcipval = IXGBE_READ_REG(hw, IXGBE_BMCIPVAL);
0504
0505 if (xs->props.family == AF_INET) {
0506
0507 if (manc_ipv4) {
0508
0509 for (i = 0; i < num_filters; i++) {
0510 if (!(mfval & BIT(MFVAL_IPV4_FILTER_SHIFT + i)))
0511 continue;
0512
0513 reg = IXGBE_READ_REG(hw, MIPAF_ARR(3, i));
0514 if (reg == (__force u32)xs->id.daddr.a4)
0515 return 1;
0516 }
0517 }
0518
0519 if ((bmcipval & BMCIP_MASK) == BMCIP_V4) {
0520 reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(3));
0521 if (reg == (__force u32)xs->id.daddr.a4)
0522 return 1;
0523 }
0524
0525 } else {
0526
0527 if (manc_ipv4)
0528 num_filters = 3;
0529
0530 for (i = 0; i < num_filters; i++) {
0531 if (!(mfval & BIT(MFVAL_IPV6_FILTER_SHIFT + i)))
0532 continue;
0533
0534 for (j = 0; j < 4; j++) {
0535 reg = IXGBE_READ_REG(hw, MIPAF_ARR(i, j));
0536 if (reg != (__force u32)xs->id.daddr.a6[j])
0537 break;
0538 }
0539 if (j == 4)
0540 return 1;
0541 }
0542
0543 if ((bmcipval & BMCIP_MASK) == BMCIP_V6) {
0544 for (j = 0; j < 4; j++) {
0545 reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(j));
0546 if (reg != (__force u32)xs->id.daddr.a6[j])
0547 break;
0548 }
0549 if (j == 4)
0550 return 1;
0551 }
0552 }
0553
0554 return 0;
0555 }
0556
0557
0558
0559
0560
0561 static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
0562 {
0563 struct net_device *dev = xs->xso.real_dev;
0564 struct ixgbe_adapter *adapter = netdev_priv(dev);
0565 struct ixgbe_ipsec *ipsec = adapter->ipsec;
0566 struct ixgbe_hw *hw = &adapter->hw;
0567 int checked, match, first;
0568 u16 sa_idx;
0569 int ret;
0570 int i;
0571
0572 if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
0573 netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
0574 xs->id.proto);
0575 return -EINVAL;
0576 }
0577
0578 if (xs->props.mode != XFRM_MODE_TRANSPORT) {
0579 netdev_err(dev, "Unsupported mode for ipsec offload\n");
0580 return -EINVAL;
0581 }
0582
0583 if (ixgbe_ipsec_check_mgmt_ip(xs)) {
0584 netdev_err(dev, "IPsec IP addr clash with mgmt filters\n");
0585 return -EINVAL;
0586 }
0587
0588 if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
0589 struct rx_sa rsa;
0590
0591 if (xs->calg) {
0592 netdev_err(dev, "Compression offload not supported\n");
0593 return -EINVAL;
0594 }
0595
0596
0597 ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
0598 if (ret < 0) {
0599 netdev_err(dev, "No space for SA in Rx table!\n");
0600 return ret;
0601 }
0602 sa_idx = (u16)ret;
0603
0604 memset(&rsa, 0, sizeof(rsa));
0605 rsa.used = true;
0606 rsa.xs = xs;
0607
0608 if (rsa.xs->id.proto & IPPROTO_ESP)
0609 rsa.decrypt = xs->ealg || xs->aead;
0610
0611
0612 ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
0613 if (ret) {
0614 netdev_err(dev, "Failed to get key data for Rx SA table\n");
0615 return ret;
0616 }
0617
0618
0619 if (xs->props.family == AF_INET6)
0620 memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
0621 else
0622 memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634 checked = 0;
0635 match = -1;
0636 first = -1;
0637 for (i = 0;
0638 i < IXGBE_IPSEC_MAX_RX_IP_COUNT &&
0639 (checked < ipsec->num_rx_sa || first < 0);
0640 i++) {
0641 if (ipsec->ip_tbl[i].used) {
0642 if (!memcmp(ipsec->ip_tbl[i].ipaddr,
0643 rsa.ipaddr, sizeof(rsa.ipaddr))) {
0644 match = i;
0645 break;
0646 }
0647 checked++;
0648 } else if (first < 0) {
0649 first = i;
0650 }
0651 }
0652
0653 if (ipsec->num_rx_sa == 0)
0654 first = 0;
0655
0656 if (match >= 0) {
0657
0658 rsa.iptbl_ind = match;
0659 ipsec->ip_tbl[match].ref_cnt++;
0660
0661 } else if (first >= 0) {
0662
0663 rsa.iptbl_ind = first;
0664
0665 memcpy(ipsec->ip_tbl[first].ipaddr,
0666 rsa.ipaddr, sizeof(rsa.ipaddr));
0667 ipsec->ip_tbl[first].ref_cnt = 1;
0668 ipsec->ip_tbl[first].used = true;
0669
0670 ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr);
0671
0672 } else {
0673
0674 netdev_err(dev, "No space for SA in Rx IP SA table\n");
0675 memset(&rsa, 0, sizeof(rsa));
0676 return -ENOSPC;
0677 }
0678
0679 rsa.mode = IXGBE_RXMOD_VALID;
0680 if (rsa.xs->id.proto & IPPROTO_ESP)
0681 rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
0682 if (rsa.decrypt)
0683 rsa.mode |= IXGBE_RXMOD_DECRYPT;
0684 if (rsa.xs->props.family == AF_INET6)
0685 rsa.mode |= IXGBE_RXMOD_IPV6;
0686
0687
0688 memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
0689
0690 ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key,
0691 rsa.salt, rsa.mode, rsa.iptbl_ind);
0692 xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
0693
0694 ipsec->num_rx_sa++;
0695
0696
0697 hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
0698 (__force u32)rsa.xs->id.spi);
0699 } else {
0700 struct tx_sa tsa;
0701
0702 if (adapter->num_vfs &&
0703 adapter->bridge_mode != BRIDGE_MODE_VEPA)
0704 return -EOPNOTSUPP;
0705
0706
0707 ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
0708 if (ret < 0) {
0709 netdev_err(dev, "No space for SA in Tx table\n");
0710 return ret;
0711 }
0712 sa_idx = (u16)ret;
0713
0714 memset(&tsa, 0, sizeof(tsa));
0715 tsa.used = true;
0716 tsa.xs = xs;
0717
0718 if (xs->id.proto & IPPROTO_ESP)
0719 tsa.encrypt = xs->ealg || xs->aead;
0720
0721 ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
0722 if (ret) {
0723 netdev_err(dev, "Failed to get key data for Tx SA table\n");
0724 memset(&tsa, 0, sizeof(tsa));
0725 return ret;
0726 }
0727
0728
0729 memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
0730
0731 ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt);
0732
0733 xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
0734
0735 ipsec->num_tx_sa++;
0736 }
0737
0738
0739 if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) {
0740 ixgbe_ipsec_start_engine(adapter);
0741 adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED;
0742 }
0743
0744 return 0;
0745 }
0746
0747
0748
0749
0750
0751 static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
0752 {
0753 struct net_device *dev = xs->xso.real_dev;
0754 struct ixgbe_adapter *adapter = netdev_priv(dev);
0755 struct ixgbe_ipsec *ipsec = adapter->ipsec;
0756 struct ixgbe_hw *hw = &adapter->hw;
0757 u32 zerobuf[4] = {0, 0, 0, 0};
0758 u16 sa_idx;
0759
0760 if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
0761 struct rx_sa *rsa;
0762 u8 ipi;
0763
0764 sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
0765 rsa = &ipsec->rx_tbl[sa_idx];
0766
0767 if (!rsa->used) {
0768 netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
0769 sa_idx, xs->xso.offload_handle);
0770 return;
0771 }
0772
0773 ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0);
0774 hash_del_rcu(&rsa->hlist);
0775
0776
0777
0778
0779 ipi = rsa->iptbl_ind;
0780 if (ipsec->ip_tbl[ipi].ref_cnt > 0) {
0781 ipsec->ip_tbl[ipi].ref_cnt--;
0782
0783 if (!ipsec->ip_tbl[ipi].ref_cnt) {
0784 memset(&ipsec->ip_tbl[ipi], 0,
0785 sizeof(struct rx_ip_sa));
0786 ixgbe_ipsec_set_rx_ip(hw, ipi,
0787 (__force __be32 *)zerobuf);
0788 }
0789 }
0790
0791 memset(rsa, 0, sizeof(struct rx_sa));
0792 ipsec->num_rx_sa--;
0793 } else {
0794 sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
0795
0796 if (!ipsec->tx_tbl[sa_idx].used) {
0797 netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
0798 sa_idx, xs->xso.offload_handle);
0799 return;
0800 }
0801
0802 ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0);
0803 memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
0804 ipsec->num_tx_sa--;
0805 }
0806
0807
0808 if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) {
0809 adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED;
0810 ixgbe_ipsec_stop_engine(adapter);
0811 }
0812 }
0813
0814
0815
0816
0817
0818
0819 static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
0820 {
0821 if (xs->props.family == AF_INET) {
0822
0823 if (ip_hdr(skb)->ihl != 5)
0824 return false;
0825 } else {
0826
0827 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
0828 return false;
0829 }
0830
0831 return true;
0832 }
0833
0834 static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
0835 .xdo_dev_state_add = ixgbe_ipsec_add_sa,
0836 .xdo_dev_state_delete = ixgbe_ipsec_del_sa,
0837 .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
0838 };
0839
0840
0841
0842
0843
0844
0845 void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
0846 {
0847 struct ixgbe_ipsec *ipsec = adapter->ipsec;
0848 int i;
0849
0850 if (!ipsec)
0851 return;
0852
0853
0854 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_rx_sa; i++) {
0855 if (!ipsec->rx_tbl[i].used)
0856 continue;
0857 if (ipsec->rx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
0858 ipsec->rx_tbl[i].vf == vf)
0859 ixgbe_ipsec_del_sa(ipsec->rx_tbl[i].xs);
0860 }
0861
0862
0863 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_tx_sa; i++) {
0864 if (!ipsec->tx_tbl[i].used)
0865 continue;
0866 if (ipsec->tx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
0867 ipsec->tx_tbl[i].vf == vf)
0868 ixgbe_ipsec_del_sa(ipsec->tx_tbl[i].xs);
0869 }
0870 }
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884 int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
0885 {
0886 struct ixgbe_ipsec *ipsec = adapter->ipsec;
0887 struct xfrm_algo_desc *algo;
0888 struct sa_mbx_msg *sam;
0889 struct xfrm_state *xs;
0890 size_t aead_len;
0891 u16 sa_idx;
0892 u32 pfsa;
0893 int err;
0894
0895 sam = (struct sa_mbx_msg *)(&msgbuf[1]);
0896 if (!adapter->vfinfo[vf].trusted ||
0897 !(adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)) {
0898 e_warn(drv, "VF %d attempted to add an IPsec SA\n", vf);
0899 err = -EACCES;
0900 goto err_out;
0901 }
0902
0903
0904
0905
0906 if (sam->dir != XFRM_DEV_OFFLOAD_IN) {
0907 err = -EOPNOTSUPP;
0908 goto err_out;
0909 }
0910
0911 xs = kzalloc(sizeof(*xs), GFP_KERNEL);
0912 if (unlikely(!xs)) {
0913 err = -ENOMEM;
0914 goto err_out;
0915 }
0916
0917 xs->xso.dir = sam->dir;
0918 xs->id.spi = sam->spi;
0919 xs->id.proto = sam->proto;
0920 xs->props.family = sam->family;
0921 if (xs->props.family == AF_INET6)
0922 memcpy(&xs->id.daddr.a6, sam->addr, sizeof(xs->id.daddr.a6));
0923 else
0924 memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4));
0925 xs->xso.dev = adapter->netdev;
0926
0927 algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
0928 if (unlikely(!algo)) {
0929 err = -ENOENT;
0930 goto err_xs;
0931 }
0932
0933 aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8;
0934 xs->aead = kzalloc(aead_len, GFP_KERNEL);
0935 if (unlikely(!xs->aead)) {
0936 err = -ENOMEM;
0937 goto err_xs;
0938 }
0939
0940 xs->props.ealgo = algo->desc.sadb_alg_id;
0941 xs->geniv = algo->uinfo.aead.geniv;
0942 xs->aead->alg_icv_len = IXGBE_IPSEC_AUTH_BITS;
0943 xs->aead->alg_key_len = IXGBE_IPSEC_KEY_BITS;
0944 memcpy(xs->aead->alg_key, sam->key, sizeof(sam->key));
0945 memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name));
0946
0947
0948 err = ixgbe_ipsec_add_sa(xs);
0949 if (err)
0950 goto err_aead;
0951
0952 pfsa = xs->xso.offload_handle;
0953 if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) {
0954 sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX;
0955 ipsec->rx_tbl[sa_idx].vf = vf;
0956 ipsec->rx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF;
0957 } else {
0958 sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX;
0959 ipsec->tx_tbl[sa_idx].vf = vf;
0960 ipsec->tx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF;
0961 }
0962
0963 msgbuf[1] = xs->xso.offload_handle;
0964
0965 return 0;
0966
0967 err_aead:
0968 kfree_sensitive(xs->aead);
0969 err_xs:
0970 kfree_sensitive(xs);
0971 err_out:
0972 msgbuf[1] = err;
0973 return err;
0974 }
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994 int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
0995 {
0996 struct ixgbe_ipsec *ipsec = adapter->ipsec;
0997 struct xfrm_state *xs;
0998 u32 pfsa = msgbuf[1];
0999 u16 sa_idx;
1000
1001 if (!adapter->vfinfo[vf].trusted) {
1002 e_err(drv, "vf %d attempted to delete an SA\n", vf);
1003 return -EPERM;
1004 }
1005
1006 if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) {
1007 struct rx_sa *rsa;
1008
1009 sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX;
1010 if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) {
1011 e_err(drv, "vf %d SA index %d out of range\n",
1012 vf, sa_idx);
1013 return -EINVAL;
1014 }
1015
1016 rsa = &ipsec->rx_tbl[sa_idx];
1017
1018 if (!rsa->used)
1019 return 0;
1020
1021 if (!(rsa->mode & IXGBE_RXTXMOD_VF) ||
1022 rsa->vf != vf) {
1023 e_err(drv, "vf %d bad Rx SA index %d\n", vf, sa_idx);
1024 return -ENOENT;
1025 }
1026
1027 xs = ipsec->rx_tbl[sa_idx].xs;
1028 } else {
1029 struct tx_sa *tsa;
1030
1031 sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX;
1032 if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) {
1033 e_err(drv, "vf %d SA index %d out of range\n",
1034 vf, sa_idx);
1035 return -EINVAL;
1036 }
1037
1038 tsa = &ipsec->tx_tbl[sa_idx];
1039
1040 if (!tsa->used)
1041 return 0;
1042
1043 if (!(tsa->mode & IXGBE_RXTXMOD_VF) ||
1044 tsa->vf != vf) {
1045 e_err(drv, "vf %d bad Tx SA index %d\n", vf, sa_idx);
1046 return -ENOENT;
1047 }
1048
1049 xs = ipsec->tx_tbl[sa_idx].xs;
1050 }
1051
1052 ixgbe_ipsec_del_sa(xs);
1053
1054
1055 kfree_sensitive(xs);
1056
1057 return 0;
1058 }
1059
1060
1061
1062
1063
1064
1065
1066 int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
1067 struct ixgbe_tx_buffer *first,
1068 struct ixgbe_ipsec_tx_data *itd)
1069 {
1070 struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
1071 struct ixgbe_ipsec *ipsec = adapter->ipsec;
1072 struct xfrm_state *xs;
1073 struct sec_path *sp;
1074 struct tx_sa *tsa;
1075
1076 sp = skb_sec_path(first->skb);
1077 if (unlikely(!sp->len)) {
1078 netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
1079 __func__, sp->len);
1080 return 0;
1081 }
1082
1083 xs = xfrm_input_state(first->skb);
1084 if (unlikely(!xs)) {
1085 netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
1086 __func__, xs);
1087 return 0;
1088 }
1089
1090 itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
1091 if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
1092 netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
1093 __func__, itd->sa_idx, xs->xso.offload_handle);
1094 return 0;
1095 }
1096
1097 tsa = &ipsec->tx_tbl[itd->sa_idx];
1098 if (unlikely(!tsa->used)) {
1099 netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
1100 __func__, itd->sa_idx);
1101 return 0;
1102 }
1103
1104 first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;
1105
1106 if (xs->id.proto == IPPROTO_ESP) {
1107
1108 itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
1109 IXGBE_ADVTXD_TUCMD_L4T_TCP;
1110 if (first->protocol == htons(ETH_P_IP))
1111 itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121 if (!skb_is_gso(first->skb)) {
1122
1123
1124
1125
1126
1127
1128
1129 const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
1130 struct sk_buff *skb = first->skb;
1131 u8 padlen;
1132 int ret;
1133
1134 ret = skb_copy_bits(skb, skb->len - (authlen + 2),
1135 &padlen, 1);
1136 if (unlikely(ret))
1137 return 0;
1138 itd->trailer_len = authlen + 2 + padlen;
1139 }
1140 }
1141 if (tsa->encrypt)
1142 itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
1143
1144 return 1;
1145 }
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156 void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
1157 union ixgbe_adv_rx_desc *rx_desc,
1158 struct sk_buff *skb)
1159 {
1160 struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
1161 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1162 __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
1163 IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
1164 struct ixgbe_ipsec *ipsec = adapter->ipsec;
1165 struct xfrm_offload *xo = NULL;
1166 struct xfrm_state *xs = NULL;
1167 struct ipv6hdr *ip6 = NULL;
1168 struct iphdr *ip4 = NULL;
1169 struct sec_path *sp;
1170 void *daddr;
1171 __be32 spi;
1172 u8 *c_hdr;
1173 u8 proto;
1174
1175
1176
1177
1178
1179
1180 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
1181 ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
1182 daddr = &ip4->daddr;
1183 c_hdr = (u8 *)ip4 + ip4->ihl * 4;
1184 } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
1185 ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
1186 daddr = &ip6->daddr;
1187 c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
1188 } else {
1189 return;
1190 }
1191
1192 switch (pkt_info & ipsec_pkt_types) {
1193 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
1194 spi = ((struct ip_auth_hdr *)c_hdr)->spi;
1195 proto = IPPROTO_AH;
1196 break;
1197 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
1198 spi = ((struct ip_esp_hdr *)c_hdr)->spi;
1199 proto = IPPROTO_ESP;
1200 break;
1201 default:
1202 return;
1203 }
1204
1205 xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
1206 if (unlikely(!xs))
1207 return;
1208
1209 sp = secpath_set(skb);
1210 if (unlikely(!sp))
1211 return;
1212
1213 sp->xvec[sp->len++] = xs;
1214 sp->olen++;
1215 xo = xfrm_offload(skb);
1216 xo->flags = CRYPTO_DONE;
1217 xo->status = CRYPTO_SUCCESS;
1218
1219 adapter->rx_ipsec++;
1220 }
1221
1222
1223
1224
1225
1226 void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
1227 {
1228 struct ixgbe_hw *hw = &adapter->hw;
1229 struct ixgbe_ipsec *ipsec;
1230 u32 t_dis, r_dis;
1231 size_t size;
1232
1233 if (hw->mac.type == ixgbe_mac_82598EB)
1234 return;
1235
1236
1237
1238
1239 t_dis = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
1240 IXGBE_SECTXSTAT_SECTX_OFF_DIS;
1241 r_dis = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
1242 IXGBE_SECRXSTAT_SECRX_OFF_DIS;
1243 if (t_dis || r_dis)
1244 return;
1245
1246 ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
1247 if (!ipsec)
1248 goto err1;
1249 hash_init(ipsec->rx_sa_list);
1250
1251 size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
1252 ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
1253 if (!ipsec->rx_tbl)
1254 goto err2;
1255
1256 size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
1257 ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
1258 if (!ipsec->tx_tbl)
1259 goto err2;
1260
1261 size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT;
1262 ipsec->ip_tbl = kzalloc(size, GFP_KERNEL);
1263 if (!ipsec->ip_tbl)
1264 goto err2;
1265
1266 ipsec->num_rx_sa = 0;
1267 ipsec->num_tx_sa = 0;
1268
1269 adapter->ipsec = ipsec;
1270 ixgbe_ipsec_stop_engine(adapter);
1271 ixgbe_ipsec_clear_hw_tables(adapter);
1272
1273 adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
1274
1275 return;
1276
1277 err2:
1278 kfree(ipsec->ip_tbl);
1279 kfree(ipsec->rx_tbl);
1280 kfree(ipsec->tx_tbl);
1281 kfree(ipsec);
1282 err1:
1283 netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
1284 }
1285
1286
1287
1288
1289
1290 void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter)
1291 {
1292 struct ixgbe_ipsec *ipsec = adapter->ipsec;
1293
1294 adapter->ipsec = NULL;
1295 if (ipsec) {
1296 kfree(ipsec->ip_tbl);
1297 kfree(ipsec->rx_tbl);
1298 kfree(ipsec->tx_tbl);
1299 kfree(ipsec);
1300 }
1301 }