0001
0002
0003
0004
0005
0006
0007 #include <linux/types.h>
0008 #include <linux/kernel.h>
0009 #include <linux/bits.h>
0010 #include <linux/bitops.h>
0011 #include <linux/bitfield.h>
0012 #include <linux/io.h>
0013 #include <linux/build_bug.h>
0014 #include <linux/device.h>
0015 #include <linux/dma-mapping.h>
0016
0017 #include "ipa.h"
0018 #include "ipa_version.h"
0019 #include "ipa_endpoint.h"
0020 #include "ipa_table.h"
0021 #include "ipa_reg.h"
0022 #include "ipa_mem.h"
0023 #include "ipa_cmd.h"
0024 #include "gsi.h"
0025 #include "gsi_trans.h"
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110 #define IPA_ROUTE_MODEM_MIN 0
0111 #define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
0112 #define IPA_ROUTE_AP_COUNT \
0113 (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
0114
0115
0116
0117
0118
0119 #define IPA_ZERO_RULE_SIZE (2 * sizeof(__le32))
0120
0121
0122 static void ipa_table_validate_build(void)
0123 {
0124
0125
0126
0127
0128
0129
0130
0131 BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(__le64));
0132
0133
0134
0135
0136
0137 BUILD_BUG_ON(IPA_ZERO_RULE_SIZE != sizeof(__le64));
0138
0139
0140 BUILD_BUG_ON(IPA_ROUTE_COUNT_MAX > 32);
0141
0142 BUILD_BUG_ON(!IPA_ROUTE_MODEM_COUNT);
0143
0144 BUILD_BUG_ON(IPA_ROUTE_MODEM_COUNT > IPA_ROUTE_COUNT_MAX);
0145
0146 }
0147
0148 static bool
0149 ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route)
0150 {
0151 const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
0152 struct device *dev = &ipa->pdev->dev;
0153 u32 size;
0154
0155 if (route)
0156 size = IPA_ROUTE_COUNT_MAX * sizeof(__le64);
0157 else
0158 size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64);
0159
0160 if (!ipa_cmd_table_valid(ipa, mem, route))
0161 return false;
0162
0163
0164 if (mem->size == size)
0165 return true;
0166
0167
0168 if (ipa_table_hash_support(ipa) && !mem->size)
0169 return true;
0170
0171 dev_err(dev, "%s table region %u size 0x%02x, expected 0x%02x\n",
0172 route ? "route" : "filter", mem_id, mem->size, size);
0173
0174 return false;
0175 }
0176
0177
0178 bool ipa_table_valid(struct ipa *ipa)
0179 {
0180 bool valid;
0181
0182 valid = ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER, false);
0183 valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER, false);
0184 valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE, true);
0185 valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE, true);
0186
0187 if (!ipa_table_hash_support(ipa))
0188 return valid;
0189
0190 valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER_HASHED,
0191 false);
0192 valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER_HASHED,
0193 false);
0194 valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE_HASHED,
0195 true);
0196 valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE_HASHED,
0197 true);
0198
0199 return valid;
0200 }
0201
0202 bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_map)
0203 {
0204 struct device *dev = &ipa->pdev->dev;
0205 u32 count;
0206
0207 if (!filter_map) {
0208 dev_err(dev, "at least one filtering endpoint is required\n");
0209
0210 return false;
0211 }
0212
0213 count = hweight32(filter_map);
0214 if (count > IPA_FILTER_COUNT_MAX) {
0215 dev_err(dev, "too many filtering endpoints (%u, max %u)\n",
0216 count, IPA_FILTER_COUNT_MAX);
0217
0218 return false;
0219 }
0220
0221 return true;
0222 }
0223
0224
0225 static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
0226 {
0227 u32 skip;
0228
0229 if (!count)
0230 return 0;
0231
0232 WARN_ON(count > max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX));
0233
0234
0235 skip = filter_mask ? 1 : 2;
0236
0237 return ipa->table_addr + skip * sizeof(*ipa->table_virt);
0238 }
0239
0240 static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
0241 u16 first, u16 count, enum ipa_mem_id mem_id)
0242 {
0243 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
0244 const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
0245 dma_addr_t addr;
0246 u32 offset;
0247 u16 size;
0248
0249
0250 if (!mem->size)
0251 return;
0252
0253 if (filter)
0254 first++;
0255
0256 offset = mem->offset + first * sizeof(__le64);
0257 size = count * sizeof(__le64);
0258 addr = ipa_table_addr(ipa, false, count);
0259
0260 ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true);
0261 }
0262
0263
0264
0265
0266
0267 static int
0268 ipa_filter_reset_table(struct ipa *ipa, enum ipa_mem_id mem_id, bool modem)
0269 {
0270 u32 ep_mask = ipa->filter_map;
0271 u32 count = hweight32(ep_mask);
0272 struct gsi_trans *trans;
0273 enum gsi_ee_id ee_id;
0274
0275 trans = ipa_cmd_trans_alloc(ipa, count);
0276 if (!trans) {
0277 dev_err(&ipa->pdev->dev,
0278 "no transaction for %s filter reset\n",
0279 modem ? "modem" : "AP");
0280 return -EBUSY;
0281 }
0282
0283 ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
0284 while (ep_mask) {
0285 u32 endpoint_id = __ffs(ep_mask);
0286 struct ipa_endpoint *endpoint;
0287
0288 ep_mask ^= BIT(endpoint_id);
0289
0290 endpoint = &ipa->endpoint[endpoint_id];
0291 if (endpoint->ee_id != ee_id)
0292 continue;
0293
0294 ipa_table_reset_add(trans, true, endpoint_id, 1, mem_id);
0295 }
0296
0297 gsi_trans_commit_wait(trans);
0298
0299 return 0;
0300 }
0301
0302
0303
0304
0305
0306 static int ipa_filter_reset(struct ipa *ipa, bool modem)
0307 {
0308 int ret;
0309
0310 ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER, modem);
0311 if (ret)
0312 return ret;
0313
0314 ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER_HASHED, modem);
0315 if (ret)
0316 return ret;
0317
0318 ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER, modem);
0319 if (ret)
0320 return ret;
0321 ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER_HASHED, modem);
0322
0323 return ret;
0324 }
0325
0326
0327
0328
0329
0330 static int ipa_route_reset(struct ipa *ipa, bool modem)
0331 {
0332 struct gsi_trans *trans;
0333 u16 first;
0334 u16 count;
0335
0336 trans = ipa_cmd_trans_alloc(ipa, 4);
0337 if (!trans) {
0338 dev_err(&ipa->pdev->dev,
0339 "no transaction for %s route reset\n",
0340 modem ? "modem" : "AP");
0341 return -EBUSY;
0342 }
0343
0344 if (modem) {
0345 first = IPA_ROUTE_MODEM_MIN;
0346 count = IPA_ROUTE_MODEM_COUNT;
0347 } else {
0348 first = IPA_ROUTE_AP_MIN;
0349 count = IPA_ROUTE_AP_COUNT;
0350 }
0351
0352 ipa_table_reset_add(trans, false, first, count, IPA_MEM_V4_ROUTE);
0353 ipa_table_reset_add(trans, false, first, count,
0354 IPA_MEM_V4_ROUTE_HASHED);
0355
0356 ipa_table_reset_add(trans, false, first, count, IPA_MEM_V6_ROUTE);
0357 ipa_table_reset_add(trans, false, first, count,
0358 IPA_MEM_V6_ROUTE_HASHED);
0359
0360 gsi_trans_commit_wait(trans);
0361
0362 return 0;
0363 }
0364
0365 void ipa_table_reset(struct ipa *ipa, bool modem)
0366 {
0367 struct device *dev = &ipa->pdev->dev;
0368 const char *ee_name;
0369 int ret;
0370
0371 ee_name = modem ? "modem" : "AP";
0372
0373
0374 ret = ipa_filter_reset(ipa, modem);
0375 if (ret)
0376 dev_err(dev, "error %d resetting filter table for %s\n",
0377 ret, ee_name);
0378
0379 ret = ipa_route_reset(ipa, modem);
0380 if (ret)
0381 dev_err(dev, "error %d resetting route table for %s\n",
0382 ret, ee_name);
0383 }
0384
0385 int ipa_table_hash_flush(struct ipa *ipa)
0386 {
0387 u32 offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
0388 struct gsi_trans *trans;
0389 u32 val;
0390
0391 if (!ipa_table_hash_support(ipa))
0392 return 0;
0393
0394 trans = ipa_cmd_trans_alloc(ipa, 1);
0395 if (!trans) {
0396 dev_err(&ipa->pdev->dev, "no transaction for hash flush\n");
0397 return -EBUSY;
0398 }
0399
0400 val = IPV4_FILTER_HASH_FMASK | IPV6_FILTER_HASH_FMASK;
0401 val |= IPV6_ROUTER_HASH_FMASK | IPV4_ROUTER_HASH_FMASK;
0402
0403 ipa_cmd_register_write_add(trans, offset, val, val, false);
0404
0405 gsi_trans_commit_wait(trans);
0406
0407 return 0;
0408 }
0409
0410 static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
0411 enum ipa_cmd_opcode opcode,
0412 enum ipa_mem_id mem_id,
0413 enum ipa_mem_id hash_mem_id)
0414 {
0415 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
0416 const struct ipa_mem *hash_mem = ipa_mem_find(ipa, hash_mem_id);
0417 const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
0418 dma_addr_t hash_addr;
0419 dma_addr_t addr;
0420 u32 zero_offset;
0421 u16 hash_count;
0422 u32 zero_size;
0423 u16 hash_size;
0424 u16 count;
0425 u16 size;
0426
0427
0428 if (filter) {
0429
0430
0431
0432
0433
0434 count = 1 + hweight32(ipa->filter_map);
0435 hash_count = hash_mem->size ? count : 0;
0436 } else {
0437
0438
0439
0440 count = mem->size / sizeof(__le64);
0441 hash_count = hash_mem->size / sizeof(__le64);
0442 }
0443 size = count * sizeof(__le64);
0444 hash_size = hash_count * sizeof(__le64);
0445
0446 addr = ipa_table_addr(ipa, filter, count);
0447 hash_addr = ipa_table_addr(ipa, filter, hash_count);
0448
0449 ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr,
0450 hash_size, hash_mem->offset, hash_addr);
0451 if (!filter)
0452 return;
0453
0454
0455 zero_offset = mem->offset + size;
0456 zero_size = mem->size - size;
0457 ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size,
0458 ipa->zero_addr, true);
0459 if (!hash_size)
0460 return;
0461
0462
0463 zero_offset = hash_mem->offset + hash_size;
0464 zero_size = hash_mem->size - hash_size;
0465 ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size,
0466 ipa->zero_addr, true);
0467 }
0468
0469 int ipa_table_setup(struct ipa *ipa)
0470 {
0471 struct gsi_trans *trans;
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486 trans = ipa_cmd_trans_alloc(ipa, 8);
0487 if (!trans) {
0488 dev_err(&ipa->pdev->dev, "no transaction for table setup\n");
0489 return -EBUSY;
0490 }
0491
0492 ipa_table_init_add(trans, false, IPA_CMD_IP_V4_ROUTING_INIT,
0493 IPA_MEM_V4_ROUTE, IPA_MEM_V4_ROUTE_HASHED);
0494
0495 ipa_table_init_add(trans, false, IPA_CMD_IP_V6_ROUTING_INIT,
0496 IPA_MEM_V6_ROUTE, IPA_MEM_V6_ROUTE_HASHED);
0497
0498 ipa_table_init_add(trans, true, IPA_CMD_IP_V4_FILTER_INIT,
0499 IPA_MEM_V4_FILTER, IPA_MEM_V4_FILTER_HASHED);
0500
0501 ipa_table_init_add(trans, true, IPA_CMD_IP_V6_FILTER_INIT,
0502 IPA_MEM_V6_FILTER, IPA_MEM_V6_FILTER_HASHED);
0503
0504 gsi_trans_commit_wait(trans);
0505
0506 return 0;
0507 }
0508
0509
0510
0511
0512
0513
0514
0515
0516 static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
0517 {
0518 u32 endpoint_id = endpoint->endpoint_id;
0519 u32 offset;
0520 u32 val;
0521
0522 offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(endpoint_id);
0523
0524 val = ioread32(endpoint->ipa->reg_virt + offset);
0525
0526
0527 u32p_replace_bits(&val, 0, IPA_REG_ENDP_FILTER_HASH_MSK_ALL);
0528
0529 iowrite32(val, endpoint->ipa->reg_virt + offset);
0530 }
0531
0532
0533 static void ipa_filter_config(struct ipa *ipa, bool modem)
0534 {
0535 enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
0536 u32 ep_mask = ipa->filter_map;
0537
0538 if (!ipa_table_hash_support(ipa))
0539 return;
0540
0541 while (ep_mask) {
0542 u32 endpoint_id = __ffs(ep_mask);
0543 struct ipa_endpoint *endpoint;
0544
0545 ep_mask ^= BIT(endpoint_id);
0546
0547 endpoint = &ipa->endpoint[endpoint_id];
0548 if (endpoint->ee_id == ee_id)
0549 ipa_filter_tuple_zero(endpoint);
0550 }
0551 }
0552
0553 static bool ipa_route_id_modem(u32 route_id)
0554 {
0555 return route_id >= IPA_ROUTE_MODEM_MIN &&
0556 route_id <= IPA_ROUTE_MODEM_MIN + IPA_ROUTE_MODEM_COUNT - 1;
0557 }
0558
0559
0560
0561
0562
0563
0564
0565
0566 static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
0567 {
0568 u32 offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(route_id);
0569 u32 val;
0570
0571 val = ioread32(ipa->reg_virt + offset);
0572
0573
0574 u32p_replace_bits(&val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL);
0575
0576 iowrite32(val, ipa->reg_virt + offset);
0577 }
0578
0579
0580 static void ipa_route_config(struct ipa *ipa, bool modem)
0581 {
0582 u32 route_id;
0583
0584 if (!ipa_table_hash_support(ipa))
0585 return;
0586
0587 for (route_id = 0; route_id < IPA_ROUTE_COUNT_MAX; route_id++)
0588 if (ipa_route_id_modem(route_id) == modem)
0589 ipa_route_tuple_zero(ipa, route_id);
0590 }
0591
0592
0593 void ipa_table_config(struct ipa *ipa)
0594 {
0595 ipa_filter_config(ipa, false);
0596 ipa_filter_config(ipa, true);
0597 ipa_route_config(ipa, false);
0598 ipa_route_config(ipa, true);
0599 }
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639 int ipa_table_init(struct ipa *ipa)
0640 {
0641 u32 count = max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX);
0642 struct device *dev = &ipa->pdev->dev;
0643 dma_addr_t addr;
0644 __le64 le_addr;
0645 __le64 *virt;
0646 size_t size;
0647
0648 ipa_table_validate_build();
0649
0650
0651
0652
0653
0654
0655
0656 size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
0657 virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
0658 if (!virt)
0659 return -ENOMEM;
0660
0661 ipa->table_virt = virt;
0662 ipa->table_addr = addr;
0663
0664
0665 *virt++ = 0;
0666
0667
0668
0669
0670
0671
0672 *virt++ = cpu_to_le64((u64)ipa->filter_map << 1);
0673
0674
0675 le_addr = cpu_to_le64(addr);
0676 while (count--)
0677 *virt++ = le_addr;
0678
0679 return 0;
0680 }
0681
0682 void ipa_table_exit(struct ipa *ipa)
0683 {
0684 u32 count = max_t(u32, 1 + IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX);
0685 struct device *dev = &ipa->pdev->dev;
0686 size_t size;
0687
0688 size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
0689
0690 dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr);
0691 ipa->table_addr = 0;
0692 ipa->table_virt = NULL;
0693 }