0001
0002
0003
0004
0005
0006
0007 #include <linux/types.h>
0008 #include <linux/bitfield.h>
0009 #include <linux/bug.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/iommu.h>
0012 #include <linux/io.h>
0013 #include <linux/soc/qcom/smem.h>
0014
0015 #include "ipa.h"
0016 #include "ipa_reg.h"
0017 #include "ipa_data.h"
0018 #include "ipa_cmd.h"
0019 #include "ipa_mem.h"
0020 #include "ipa_table.h"
0021 #include "gsi_trans.h"
0022
0023
0024 #define IPA_MEM_CANARY_VAL cpu_to_le32(0xdeadbeef)
0025
0026
0027 #define QCOM_SMEM_HOST_MODEM 1
0028
0029 const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id)
0030 {
0031 u32 i;
0032
0033 for (i = 0; i < ipa->mem_count; i++) {
0034 const struct ipa_mem *mem = &ipa->mem[i];
0035
0036 if (mem->id == mem_id)
0037 return mem;
0038 }
0039
0040 return NULL;
0041 }
0042
0043
0044 static void
0045 ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
0046 {
0047 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
0048 const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
0049 dma_addr_t addr = ipa->zero_addr;
0050
0051 if (!mem->size)
0052 return;
0053
0054 ipa_cmd_dma_shared_mem_add(trans, mem->offset, mem->size, addr, true);
0055 }
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075 int ipa_mem_setup(struct ipa *ipa)
0076 {
0077 dma_addr_t addr = ipa->zero_addr;
0078 const struct ipa_mem *mem;
0079 struct gsi_trans *trans;
0080 u32 offset;
0081 u16 size;
0082 u32 val;
0083
0084
0085
0086
0087 trans = ipa_cmd_trans_alloc(ipa, 4);
0088 if (!trans) {
0089 dev_err(&ipa->pdev->dev, "no transaction for memory setup\n");
0090 return -EBUSY;
0091 }
0092
0093
0094
0095
0096
0097 mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
0098 offset = mem->offset;
0099 size = mem->size;
0100 mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
0101 if (mem)
0102 size += mem->size;
0103
0104 ipa_cmd_hdr_init_local_add(trans, offset, size, addr);
0105
0106 ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
0107 ipa_mem_zero_region_add(trans, IPA_MEM_AP_PROC_CTX);
0108 ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
0109
0110 gsi_trans_commit_wait(trans);
0111
0112
0113 mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
0114 offset = ipa->mem_offset + mem->offset;
0115 val = proc_cntxt_base_addr_encoded(ipa->version, offset);
0116 iowrite32(val, ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET);
0117
0118 return 0;
0119 }
0120
0121
0122 static bool ipa_mem_id_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
0123 {
0124 enum ipa_version version = ipa->version;
0125
0126 switch (mem_id) {
0127 case IPA_MEM_UC_SHARED:
0128 case IPA_MEM_UC_INFO:
0129 case IPA_MEM_V4_FILTER_HASHED:
0130 case IPA_MEM_V4_FILTER:
0131 case IPA_MEM_V6_FILTER_HASHED:
0132 case IPA_MEM_V6_FILTER:
0133 case IPA_MEM_V4_ROUTE_HASHED:
0134 case IPA_MEM_V4_ROUTE:
0135 case IPA_MEM_V6_ROUTE_HASHED:
0136 case IPA_MEM_V6_ROUTE:
0137 case IPA_MEM_MODEM_HEADER:
0138 case IPA_MEM_AP_HEADER:
0139 case IPA_MEM_MODEM_PROC_CTX:
0140 case IPA_MEM_AP_PROC_CTX:
0141 case IPA_MEM_MODEM:
0142 case IPA_MEM_UC_EVENT_RING:
0143 case IPA_MEM_PDN_CONFIG:
0144 case IPA_MEM_STATS_QUOTA_MODEM:
0145 case IPA_MEM_STATS_QUOTA_AP:
0146 case IPA_MEM_END_MARKER:
0147 break;
0148
0149 case IPA_MEM_STATS_TETHERING:
0150 case IPA_MEM_STATS_DROP:
0151 if (version < IPA_VERSION_4_0)
0152 return false;
0153 break;
0154
0155 case IPA_MEM_STATS_V4_FILTER:
0156 case IPA_MEM_STATS_V6_FILTER:
0157 case IPA_MEM_STATS_V4_ROUTE:
0158 case IPA_MEM_STATS_V6_ROUTE:
0159 if (version < IPA_VERSION_4_0 || version > IPA_VERSION_4_2)
0160 return false;
0161 break;
0162
0163 case IPA_MEM_NAT_TABLE:
0164 case IPA_MEM_STATS_FILTER_ROUTE:
0165 if (version < IPA_VERSION_4_5)
0166 return false;
0167 break;
0168
0169 default:
0170 return false;
0171 }
0172
0173 return true;
0174 }
0175
0176
0177 static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
0178 {
0179 switch (mem_id) {
0180 case IPA_MEM_UC_SHARED:
0181 case IPA_MEM_UC_INFO:
0182 case IPA_MEM_V4_FILTER_HASHED:
0183 case IPA_MEM_V4_FILTER:
0184 case IPA_MEM_V6_FILTER_HASHED:
0185 case IPA_MEM_V6_FILTER:
0186 case IPA_MEM_V4_ROUTE_HASHED:
0187 case IPA_MEM_V4_ROUTE:
0188 case IPA_MEM_V6_ROUTE_HASHED:
0189 case IPA_MEM_V6_ROUTE:
0190 case IPA_MEM_MODEM_HEADER:
0191 case IPA_MEM_MODEM_PROC_CTX:
0192 case IPA_MEM_AP_PROC_CTX:
0193 case IPA_MEM_MODEM:
0194 return true;
0195
0196 case IPA_MEM_PDN_CONFIG:
0197 case IPA_MEM_STATS_QUOTA_MODEM:
0198 case IPA_MEM_STATS_TETHERING:
0199 return ipa->version >= IPA_VERSION_4_0;
0200
0201 default:
0202 return false;
0203 }
0204 }
0205
0206 static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
0207 {
0208 struct device *dev = &ipa->pdev->dev;
0209 enum ipa_mem_id mem_id = mem->id;
0210 u16 size_multiple;
0211
0212
0213 if (!ipa_mem_id_valid(ipa, mem_id)) {
0214 dev_err(dev, "region id %u not valid\n", mem_id);
0215 return false;
0216 }
0217
0218 if (!mem->size && !mem->canary_count) {
0219 dev_err(dev, "empty memory region %u\n", mem_id);
0220 return false;
0221 }
0222
0223
0224 size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8;
0225 if (mem->size % size_multiple)
0226 dev_err(dev, "region %u size not a multiple of %u bytes\n",
0227 mem_id, size_multiple);
0228 else if (mem->offset % 8)
0229 dev_err(dev, "region %u offset not 8-byte aligned\n", mem_id);
0230 else if (mem->offset < mem->canary_count * sizeof(__le32))
0231 dev_err(dev, "region %u offset too small for %hu canaries\n",
0232 mem_id, mem->canary_count);
0233 else if (mem_id == IPA_MEM_END_MARKER && mem->size)
0234 dev_err(dev, "non-zero end marker region size\n");
0235 else
0236 return true;
0237
0238 return false;
0239 }
0240
0241
0242 static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
0243 {
0244 DECLARE_BITMAP(regions, IPA_MEM_COUNT) = { };
0245 struct device *dev = &ipa->pdev->dev;
0246 enum ipa_mem_id mem_id;
0247 u32 i;
0248
0249 if (mem_data->local_count > IPA_MEM_COUNT) {
0250 dev_err(dev, "too many memory regions (%u > %u)\n",
0251 mem_data->local_count, IPA_MEM_COUNT);
0252 return false;
0253 }
0254
0255 for (i = 0; i < mem_data->local_count; i++) {
0256 const struct ipa_mem *mem = &mem_data->local[i];
0257
0258 if (__test_and_set_bit(mem->id, regions)) {
0259 dev_err(dev, "duplicate memory region %u\n", mem->id);
0260 return false;
0261 }
0262
0263
0264 if (!ipa_mem_valid_one(ipa, mem))
0265 return false;
0266 }
0267
0268
0269 for_each_clear_bit(mem_id, regions, IPA_MEM_COUNT) {
0270 if (ipa_mem_id_required(ipa, mem_id))
0271 dev_err(dev, "required memory region %u missing\n",
0272 mem_id);
0273 }
0274
0275 return true;
0276 }
0277
0278
0279 static bool ipa_mem_size_valid(struct ipa *ipa)
0280 {
0281 struct device *dev = &ipa->pdev->dev;
0282 u32 limit = ipa->mem_size;
0283 u32 i;
0284
0285 for (i = 0; i < ipa->mem_count; i++) {
0286 const struct ipa_mem *mem = &ipa->mem[i];
0287
0288 if (mem->offset + mem->size <= limit)
0289 continue;
0290
0291 dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
0292 mem->id, limit);
0293
0294 return false;
0295 }
0296
0297 return true;
0298 }
0299
0300
0301
0302
0303
0304
0305
0306 int ipa_mem_config(struct ipa *ipa)
0307 {
0308 struct device *dev = &ipa->pdev->dev;
0309 const struct ipa_mem *mem;
0310 dma_addr_t addr;
0311 u32 mem_size;
0312 void *virt;
0313 u32 val;
0314 u32 i;
0315
0316
0317 val = ioread32(ipa->reg_virt + IPA_REG_SHARED_MEM_SIZE_OFFSET);
0318
0319
0320 ipa->mem_offset = 8 * u32_get_bits(val, SHARED_MEM_BADDR_FMASK);
0321
0322 mem_size = 8 * u32_get_bits(val, SHARED_MEM_SIZE_FMASK);
0323
0324
0325 if (ipa->mem_offset + mem_size < ipa->mem_size) {
0326 dev_warn(dev, "limiting IPA memory size to 0x%08x\n",
0327 mem_size);
0328 ipa->mem_size = mem_size;
0329 } else if (ipa->mem_offset + mem_size > ipa->mem_size) {
0330 dev_dbg(dev, "ignoring larger reported memory size: 0x%08x\n",
0331 mem_size);
0332 }
0333
0334
0335 if (!ipa_mem_size_valid(ipa))
0336 return -EINVAL;
0337
0338
0339 virt = dma_alloc_coherent(dev, IPA_MEM_MAX, &addr, GFP_KERNEL);
0340 if (!virt)
0341 return -ENOMEM;
0342 ipa->zero_addr = addr;
0343 ipa->zero_virt = virt;
0344 ipa->zero_size = IPA_MEM_MAX;
0345
0346
0347
0348
0349 for (i = 0; i < ipa->mem_count; i++) {
0350 u16 canary_count = ipa->mem[i].canary_count;
0351 __le32 *canary;
0352
0353 if (!canary_count)
0354 continue;
0355
0356
0357 canary = ipa->mem_virt + ipa->mem_offset + ipa->mem[i].offset;
0358 do
0359 *--canary = IPA_MEM_CANARY_VAL;
0360 while (--canary_count);
0361 }
0362
0363
0364 if (!ipa_table_valid(ipa))
0365 goto err_dma_free;
0366
0367
0368 if (!ipa_cmd_data_valid(ipa))
0369 goto err_dma_free;
0370
0371
0372 mem = ipa_mem_find(ipa, IPA_MEM_UC_EVENT_RING);
0373 if (mem && mem->offset % 1024) {
0374 dev_err(dev, "microcontroller ring not 1024-byte aligned\n");
0375 goto err_dma_free;
0376 }
0377
0378 return 0;
0379
0380 err_dma_free:
0381 dma_free_coherent(dev, IPA_MEM_MAX, ipa->zero_virt, ipa->zero_addr);
0382
0383 return -EINVAL;
0384 }
0385
0386
0387 void ipa_mem_deconfig(struct ipa *ipa)
0388 {
0389 struct device *dev = &ipa->pdev->dev;
0390
0391 dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr);
0392 ipa->zero_size = 0;
0393 ipa->zero_virt = NULL;
0394 ipa->zero_addr = 0;
0395 }
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407 int ipa_mem_zero_modem(struct ipa *ipa)
0408 {
0409 struct gsi_trans *trans;
0410
0411
0412
0413
0414 trans = ipa_cmd_trans_alloc(ipa, 3);
0415 if (!trans) {
0416 dev_err(&ipa->pdev->dev,
0417 "no transaction to zero modem memory\n");
0418 return -EBUSY;
0419 }
0420
0421 ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_HEADER);
0422 ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
0423 ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
0424
0425 gsi_trans_commit_wait(trans);
0426
0427 return 0;
0428 }
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446 static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
0447 {
0448 struct device *dev = &ipa->pdev->dev;
0449 struct iommu_domain *domain;
0450 unsigned long iova;
0451 phys_addr_t phys;
0452 int ret;
0453
0454 if (!size)
0455 return 0;
0456
0457 domain = iommu_get_domain_for_dev(dev);
0458 if (!domain) {
0459 dev_err(dev, "no IOMMU domain found for IMEM\n");
0460 return -EINVAL;
0461 }
0462
0463
0464 phys = addr & PAGE_MASK;
0465 size = PAGE_ALIGN(size + addr - phys);
0466 iova = phys;
0467
0468 ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
0469 if (ret)
0470 return ret;
0471
0472 ipa->imem_iova = iova;
0473 ipa->imem_size = size;
0474
0475 return 0;
0476 }
0477
0478 static void ipa_imem_exit(struct ipa *ipa)
0479 {
0480 struct iommu_domain *domain;
0481 struct device *dev;
0482
0483 if (!ipa->imem_size)
0484 return;
0485
0486 dev = &ipa->pdev->dev;
0487 domain = iommu_get_domain_for_dev(dev);
0488 if (domain) {
0489 size_t size;
0490
0491 size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size);
0492 if (size != ipa->imem_size)
0493 dev_warn(dev, "unmapped %zu IMEM bytes, expected %zu\n",
0494 size, ipa->imem_size);
0495 } else {
0496 dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n");
0497 }
0498
0499 ipa->imem_size = 0;
0500 ipa->imem_iova = 0;
0501 }
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520 static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
0521 {
0522 struct device *dev = &ipa->pdev->dev;
0523 struct iommu_domain *domain;
0524 unsigned long iova;
0525 phys_addr_t phys;
0526 phys_addr_t addr;
0527 size_t actual;
0528 void *virt;
0529 int ret;
0530
0531 if (!size)
0532 return 0;
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542 ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, item, size);
0543 if (ret && ret != -EEXIST) {
0544 dev_err(dev, "error %d allocating size %zu SMEM item %u\n",
0545 ret, size, item);
0546 return ret;
0547 }
0548
0549
0550 virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, item, &actual);
0551 if (IS_ERR(virt)) {
0552 ret = PTR_ERR(virt);
0553 dev_err(dev, "error %d getting SMEM item %u\n", ret, item);
0554 return ret;
0555 }
0556
0557
0558 if (ret && actual != size) {
0559 dev_err(dev, "SMEM item %u has size %zu, expected %zu\n",
0560 item, actual, size);
0561 return -EINVAL;
0562 }
0563
0564 domain = iommu_get_domain_for_dev(dev);
0565 if (!domain) {
0566 dev_err(dev, "no IOMMU domain found for SMEM\n");
0567 return -EINVAL;
0568 }
0569
0570
0571 addr = qcom_smem_virt_to_phys(virt);
0572 phys = addr & PAGE_MASK;
0573 size = PAGE_ALIGN(size + addr - phys);
0574 iova = phys;
0575
0576 ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
0577 if (ret)
0578 return ret;
0579
0580 ipa->smem_iova = iova;
0581 ipa->smem_size = size;
0582
0583 return 0;
0584 }
0585
0586 static void ipa_smem_exit(struct ipa *ipa)
0587 {
0588 struct device *dev = &ipa->pdev->dev;
0589 struct iommu_domain *domain;
0590
0591 domain = iommu_get_domain_for_dev(dev);
0592 if (domain) {
0593 size_t size;
0594
0595 size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size);
0596 if (size != ipa->smem_size)
0597 dev_warn(dev, "unmapped %zu SMEM bytes, expected %zu\n",
0598 size, ipa->smem_size);
0599
0600 } else {
0601 dev_err(dev, "couldn't get IPA IOMMU domain for SMEM\n");
0602 }
0603
0604 ipa->smem_size = 0;
0605 ipa->smem_iova = 0;
0606 }
0607
0608
0609 int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
0610 {
0611 struct device *dev = &ipa->pdev->dev;
0612 struct resource *res;
0613 int ret;
0614
0615
0616 if (!ipa_mem_valid(ipa, mem_data))
0617 return -EINVAL;
0618
0619 ipa->mem_count = mem_data->local_count;
0620 ipa->mem = mem_data->local;
0621
0622 ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
0623 if (ret) {
0624 dev_err(dev, "error %d setting DMA mask\n", ret);
0625 return ret;
0626 }
0627
0628 res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
0629 "ipa-shared");
0630 if (!res) {
0631 dev_err(dev,
0632 "DT error getting \"ipa-shared\" memory property\n");
0633 return -ENODEV;
0634 }
0635
0636 ipa->mem_virt = memremap(res->start, resource_size(res), MEMREMAP_WC);
0637 if (!ipa->mem_virt) {
0638 dev_err(dev, "unable to remap \"ipa-shared\" memory\n");
0639 return -ENOMEM;
0640 }
0641
0642 ipa->mem_addr = res->start;
0643 ipa->mem_size = resource_size(res);
0644
0645 ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
0646 if (ret)
0647 goto err_unmap;
0648
0649 ret = ipa_smem_init(ipa, mem_data->smem_id, mem_data->smem_size);
0650 if (ret)
0651 goto err_imem_exit;
0652
0653 return 0;
0654
0655 err_imem_exit:
0656 ipa_imem_exit(ipa);
0657 err_unmap:
0658 memunmap(ipa->mem_virt);
0659
0660 return ret;
0661 }
0662
0663
0664 void ipa_mem_exit(struct ipa *ipa)
0665 {
0666 ipa_smem_exit(ipa);
0667 ipa_imem_exit(ipa);
0668 memunmap(ipa->mem_virt);
0669 }