0001
0002
0003
0004
0005
0006
0007 #include <linux/types.h>
0008 #include <linux/device.h>
0009 #include <linux/slab.h>
0010 #include <linux/bitfield.h>
0011 #include <linux/dma-direction.h>
0012
0013 #include "gsi.h"
0014 #include "gsi_trans.h"
0015 #include "ipa.h"
0016 #include "ipa_endpoint.h"
0017 #include "ipa_table.h"
0018 #include "ipa_cmd.h"
0019 #include "ipa_mem.h"
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 enum pipeline_clear_options {
0040 pipeline_clear_hps = 0x0,
0041 pipeline_clear_src_grp = 0x1,
0042 pipeline_clear_full = 0x2,
0043 };
0044
0045
0046
0047 struct ipa_cmd_hw_ip_fltrt_init {
0048 __le64 hash_rules_addr;
0049 __le64 flags;
0050 __le64 nhash_rules_addr;
0051 };
0052
0053
0054 #define IP_FLTRT_FLAGS_HASH_SIZE_FMASK GENMASK_ULL(11, 0)
0055 #define IP_FLTRT_FLAGS_HASH_ADDR_FMASK GENMASK_ULL(27, 12)
0056 #define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK GENMASK_ULL(39, 28)
0057 #define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK GENMASK_ULL(55, 40)
0058
0059
0060
0061 struct ipa_cmd_hw_hdr_init_local {
0062 __le64 hdr_table_addr;
0063 __le32 flags;
0064 __le32 reserved;
0065 };
0066
0067
0068 #define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK GENMASK(11, 0)
0069 #define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK GENMASK(27, 12)
0070
0071
0072
0073
0074 #define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
0075 #define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
0076
0077 struct ipa_cmd_register_write {
0078 __le16 flags;
0079 __le16 offset;
0080 __le32 value;
0081 __le32 value_mask;
0082 __le32 clear_options;
0083 };
0084
0085
0086
0087 #define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK GENMASK(14, 11)
0088
0089 #define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK GENMASK(15, 15)
0090
0091
0092 #define REGISTER_WRITE_CLEAR_OPTIONS_FMASK GENMASK(1, 0)
0093
0094
0095
0096 struct ipa_cmd_ip_packet_init {
0097 u8 dest_endpoint;
0098 u8 reserved[7];
0099 };
0100
0101
0102 #define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0)
0103
0104
0105
0106
0107
0108 #define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
0109 #define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
0110
0111 struct ipa_cmd_hw_dma_mem_mem {
0112 __le16 clear_after_read;
0113 __le16 size;
0114 __le16 local_addr;
0115 __le16 flags;
0116 __le64 system_addr;
0117 };
0118
0119
0120 #define DMA_SHARED_MEM_CLEAR_AFTER_READ GENMASK(15, 15)
0121
0122
0123 #define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK GENMASK(0, 0)
0124
0125 #define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK GENMASK(1, 1)
0126 #define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK GENMASK(3, 2)
0127
0128
0129
0130 struct ipa_cmd_ip_packet_tag_status {
0131 __le64 tag;
0132 };
0133
0134 #define IP_PACKET_TAG_STATUS_TAG_FMASK GENMASK_ULL(63, 16)
0135
0136
0137 union ipa_cmd_payload {
0138 struct ipa_cmd_hw_ip_fltrt_init table_init;
0139 struct ipa_cmd_hw_hdr_init_local hdr_init_local;
0140 struct ipa_cmd_register_write register_write;
0141 struct ipa_cmd_ip_packet_init ip_packet_init;
0142 struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
0143 struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
0144 };
0145
0146 static void ipa_cmd_validate_build(void)
0147 {
0148
0149
0150
0151
0152
0153
0154
0155 #define TABLE_SIZE (TABLE_COUNT_MAX * sizeof(__le64))
0156 #define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX)
0157 BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK));
0158 BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
0159 #undef TABLE_COUNT_MAX
0160 #undef TABLE_SIZE
0161
0162
0163 BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) !=
0164 field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
0165 BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
0166 field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
0167
0168
0169 BUILD_BUG_ON(field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK) <
0170 IPA_ENDPOINT_MAX - 1);
0171 }
0172
0173
0174 bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route)
0175 {
0176 u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
0177 u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
0178 const char *table = route ? "route" : "filter";
0179 struct device *dev = &ipa->pdev->dev;
0180
0181
0182 if (mem->size > size_max) {
0183 dev_err(dev, "%s table region size too large\n", table);
0184 dev_err(dev, " (0x%04x > 0x%04x)\n",
0185 mem->size, size_max);
0186
0187 return false;
0188 }
0189
0190
0191 if (mem->offset > offset_max ||
0192 ipa->mem_offset > offset_max - mem->offset) {
0193 dev_err(dev, "%s table region offset too large\n", table);
0194 dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
0195 ipa->mem_offset, mem->offset, offset_max);
0196
0197 return false;
0198 }
0199
0200
0201 if (mem->offset > ipa->mem_size ||
0202 mem->size > ipa->mem_size - mem->offset) {
0203 dev_err(dev, "%s table region out of range\n", table);
0204 dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
0205 mem->offset, mem->size, ipa->mem_size);
0206
0207 return false;
0208 }
0209
0210 return true;
0211 }
0212
0213
0214 static bool ipa_cmd_header_valid(struct ipa *ipa)
0215 {
0216 struct device *dev = &ipa->pdev->dev;
0217 const struct ipa_mem *mem;
0218 u32 offset_max;
0219 u32 size_max;
0220 u32 offset;
0221 u32 size;
0222
0223
0224
0225
0226
0227
0228 offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
0229 size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
0230
0231
0232
0233
0234 mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
0235 offset = mem->offset;
0236 size = mem->size;
0237
0238
0239 if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
0240 dev_err(dev, "header table region offset too large\n");
0241 dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
0242 ipa->mem_offset, offset, offset_max);
0243
0244 return false;
0245 }
0246
0247
0248 mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
0249 if (mem)
0250 size += mem->size;
0251
0252
0253 if (size > size_max) {
0254 dev_err(dev, "header table region size too large\n");
0255 dev_err(dev, " (0x%04x > 0x%08x)\n", size, size_max);
0256
0257 return false;
0258 }
0259
0260
0261 if (size > ipa->mem_size || offset > ipa->mem_size - size) {
0262 dev_err(dev, "header table region out of range\n");
0263 dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
0264 offset, size, ipa->mem_size);
0265
0266 return false;
0267 }
0268
0269 return true;
0270 }
0271
0272
0273 static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
0274 const char *name, u32 offset)
0275 {
0276 struct ipa_cmd_register_write *payload;
0277 struct device *dev = &ipa->pdev->dev;
0278 u32 offset_max;
0279 u32 bit_count;
0280
0281
0282
0283
0284
0285
0286 bit_count = BITS_PER_BYTE * sizeof(payload->offset);
0287 if (ipa->version >= IPA_VERSION_4_0)
0288 bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
0289 BUILD_BUG_ON(bit_count > 32);
0290 offset_max = ~0U >> (32 - bit_count);
0291
0292
0293
0294
0295
0296 if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
0297 dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
0298 name, ipa->mem_offset, offset, offset_max);
0299 return false;
0300 }
0301
0302 return true;
0303 }
0304
0305
0306 static bool ipa_cmd_register_write_valid(struct ipa *ipa)
0307 {
0308 const char *name;
0309 u32 offset;
0310
0311
0312
0313
0314 if (ipa_table_hash_support(ipa)) {
0315 offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
0316 name = "filter/route hash flush";
0317 if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
0318 return false;
0319 }
0320
0321
0322
0323
0324
0325
0326
0327
0328 offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT - 1);
0329 name = "maximal endpoint status";
0330 if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
0331 return false;
0332
0333 return true;
0334 }
0335
0336 bool ipa_cmd_data_valid(struct ipa *ipa)
0337 {
0338 if (!ipa_cmd_header_valid(ipa))
0339 return false;
0340
0341 if (!ipa_cmd_register_write_valid(ipa))
0342 return false;
0343
0344 return true;
0345 }
0346
0347
0348 int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
0349 {
0350 struct gsi_trans_info *trans_info = &channel->trans_info;
0351 struct device *dev = channel->gsi->dev;
0352
0353
0354 ipa_cmd_validate_build();
0355
0356
0357
0358
0359
0360 return gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
0361 sizeof(union ipa_cmd_payload),
0362 tre_max, channel->trans_tre_max);
0363 }
0364
0365 void ipa_cmd_pool_exit(struct gsi_channel *channel)
0366 {
0367 struct gsi_trans_info *trans_info = &channel->trans_info;
0368 struct device *dev = channel->gsi->dev;
0369
0370 gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
0371 }
0372
0373 static union ipa_cmd_payload *
0374 ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr)
0375 {
0376 struct gsi_trans_info *trans_info;
0377 struct ipa_endpoint *endpoint;
0378
0379 endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
0380 trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info;
0381
0382 return gsi_trans_pool_alloc_dma(&trans_info->cmd_pool, addr);
0383 }
0384
0385
0386 void ipa_cmd_table_init_add(struct gsi_trans *trans,
0387 enum ipa_cmd_opcode opcode, u16 size, u32 offset,
0388 dma_addr_t addr, u16 hash_size, u32 hash_offset,
0389 dma_addr_t hash_addr)
0390 {
0391 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
0392 struct ipa_cmd_hw_ip_fltrt_init *payload;
0393 union ipa_cmd_payload *cmd_payload;
0394 dma_addr_t payload_addr;
0395 u64 val;
0396
0397
0398 offset += ipa->mem_offset;
0399 val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
0400 val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
0401
0402
0403 if (hash_size) {
0404
0405 hash_offset += ipa->mem_offset;
0406 val |= u64_encode_bits(hash_offset,
0407 IP_FLTRT_FLAGS_HASH_ADDR_FMASK);
0408 val |= u64_encode_bits(hash_size,
0409 IP_FLTRT_FLAGS_HASH_SIZE_FMASK);
0410 }
0411
0412 cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
0413 payload = &cmd_payload->table_init;
0414
0415
0416 if (hash_size)
0417 payload->hash_rules_addr = cpu_to_le64(hash_addr);
0418 payload->flags = cpu_to_le64(val);
0419 payload->nhash_rules_addr = cpu_to_le64(addr);
0420
0421 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
0422 opcode);
0423 }
0424
0425
0426 void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
0427 dma_addr_t addr)
0428 {
0429 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
0430 enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
0431 struct ipa_cmd_hw_hdr_init_local *payload;
0432 union ipa_cmd_payload *cmd_payload;
0433 dma_addr_t payload_addr;
0434 u32 flags;
0435
0436 offset += ipa->mem_offset;
0437
0438
0439
0440
0441
0442
0443 cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
0444 payload = &cmd_payload->hdr_init_local;
0445
0446 payload->hdr_table_addr = cpu_to_le64(addr);
0447 flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
0448 flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
0449 payload->flags = cpu_to_le32(flags);
0450
0451 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
0452 opcode);
0453 }
0454
0455 void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
0456 u32 mask, bool clear_full)
0457 {
0458 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
0459 struct ipa_cmd_register_write *payload;
0460 union ipa_cmd_payload *cmd_payload;
0461 u32 opcode = IPA_CMD_REGISTER_WRITE;
0462 dma_addr_t payload_addr;
0463 u32 clear_option;
0464 u32 options;
0465 u16 flags;
0466
0467
0468 clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps;
0469
0470
0471
0472
0473
0474 if (ipa->version >= IPA_VERSION_4_0) {
0475 u16 offset_high;
0476 u32 val;
0477
0478
0479
0480 val = u16_encode_bits(clear_option,
0481 REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK);
0482 opcode |= val;
0483
0484
0485 offset_high = (u16)u32_get_bits(offset, GENMASK(19, 16));
0486 offset &= (1 << 16) - 1;
0487
0488
0489 flags = u16_encode_bits(offset_high,
0490 REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
0491 options = 0;
0492
0493 } else {
0494 flags = 0;
0495 options = u16_encode_bits(clear_option,
0496 REGISTER_WRITE_CLEAR_OPTIONS_FMASK);
0497 }
0498
0499 cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
0500 payload = &cmd_payload->register_write;
0501
0502 payload->flags = cpu_to_le16(flags);
0503 payload->offset = cpu_to_le16((u16)offset);
0504 payload->value = cpu_to_le32(value);
0505 payload->value_mask = cpu_to_le32(mask);
0506 payload->clear_options = cpu_to_le32(options);
0507
0508 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
0509 opcode);
0510 }
0511
0512
0513 static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
0514 {
0515 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
0516 enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
0517 struct ipa_cmd_ip_packet_init *payload;
0518 union ipa_cmd_payload *cmd_payload;
0519 dma_addr_t payload_addr;
0520
0521 cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
0522 payload = &cmd_payload->ip_packet_init;
0523
0524 payload->dest_endpoint = u8_encode_bits(endpoint_id,
0525 IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
0526
0527 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
0528 opcode);
0529 }
0530
0531
0532 void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
0533 dma_addr_t addr, bool toward_ipa)
0534 {
0535 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
0536 enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
0537 struct ipa_cmd_hw_dma_mem_mem *payload;
0538 union ipa_cmd_payload *cmd_payload;
0539 dma_addr_t payload_addr;
0540 u16 flags;
0541
0542
0543 WARN_ON(!size);
0544 WARN_ON(size > U16_MAX);
0545 WARN_ON(offset > U16_MAX || ipa->mem_offset > U16_MAX - offset);
0546
0547 offset += ipa->mem_offset;
0548
0549 cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
0550 payload = &cmd_payload->dma_shared_mem;
0551
0552
0553
0554
0555 payload->size = cpu_to_le16(size);
0556 payload->local_addr = cpu_to_le16(offset);
0557
0558
0559
0560
0561
0562
0563
0564
0565 flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK;
0566 payload->flags = cpu_to_le16(flags);
0567 payload->system_addr = cpu_to_le64(addr);
0568
0569 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
0570 opcode);
0571 }
0572
0573 static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
0574 {
0575 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
0576 enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
0577 struct ipa_cmd_ip_packet_tag_status *payload;
0578 union ipa_cmd_payload *cmd_payload;
0579 dma_addr_t payload_addr;
0580
0581 cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
0582 payload = &cmd_payload->ip_packet_tag_status;
0583
0584 payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
0585
0586 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
0587 opcode);
0588 }
0589
0590
0591 static void ipa_cmd_transfer_add(struct gsi_trans *trans)
0592 {
0593 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
0594 enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
0595 union ipa_cmd_payload *payload;
0596 dma_addr_t payload_addr;
0597
0598
0599 payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
0600
0601 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
0602 opcode);
0603 }
0604
0605
0606 void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans)
0607 {
0608 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
0609 struct ipa_endpoint *endpoint;
0610
0611
0612 reinit_completion(&ipa->completion);
0613
0614
0615 ipa_cmd_register_write_add(trans, 0, 0, 0, true);
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625 endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
0626 ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
0627 ipa_cmd_ip_tag_status_add(trans);
0628 ipa_cmd_transfer_add(trans);
0629 }
0630
0631
0632 u32 ipa_cmd_pipeline_clear_count(void)
0633 {
0634 return 4;
0635 }
0636
0637 void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
0638 {
0639 wait_for_completion(&ipa->completion);
0640 }
0641
0642
0643 struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
0644 {
0645 struct ipa_endpoint *endpoint;
0646
0647 if (WARN_ON(tre_count > IPA_COMMAND_TRANS_TRE_MAX))
0648 return NULL;
0649
0650 endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
0651
0652 return gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
0653 tre_count, DMA_NONE);
0654 }