0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/clk.h>
0010 #include <linux/compiler.h>
0011 #include <linux/delay.h>
0012 #include <linux/device.h>
0013 #include <linux/dma-mapping.h>
0014 #include <linux/errno.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/io.h>
0017 #include <linux/iommu.h>
0018 #include <linux/iopoll.h>
0019 #include <linux/list.h>
0020 #include <linux/mm.h>
0021 #include <linux/init.h>
0022 #include <linux/of.h>
0023 #include <linux/of_platform.h>
0024 #include <linux/platform_device.h>
0025 #include <linux/pm_runtime.h>
0026 #include <linux/slab.h>
0027 #include <linux/spinlock.h>
0028
0029
0030 #define RK_MMU_DTE_ADDR 0x00
0031 #define RK_MMU_STATUS 0x04
0032 #define RK_MMU_COMMAND 0x08
0033 #define RK_MMU_PAGE_FAULT_ADDR 0x0C
0034 #define RK_MMU_ZAP_ONE_LINE 0x10
0035 #define RK_MMU_INT_RAWSTAT 0x14
0036 #define RK_MMU_INT_CLEAR 0x18
0037 #define RK_MMU_INT_MASK 0x1C
0038 #define RK_MMU_INT_STATUS 0x20
0039 #define RK_MMU_AUTO_GATING 0x24
0040
0041 #define DTE_ADDR_DUMMY 0xCAFEBABE
0042
0043 #define RK_MMU_POLL_PERIOD_US 100
0044 #define RK_MMU_FORCE_RESET_TIMEOUT_US 100000
0045 #define RK_MMU_POLL_TIMEOUT_US 1000
0046
0047
0048 #define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
0049 #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
0050 #define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
0051 #define RK_MMU_STATUS_IDLE BIT(3)
0052 #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
0053 #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
0054 #define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
0055
0056
0057 #define RK_MMU_CMD_ENABLE_PAGING 0
0058 #define RK_MMU_CMD_DISABLE_PAGING 1
0059 #define RK_MMU_CMD_ENABLE_STALL 2
0060 #define RK_MMU_CMD_DISABLE_STALL 3
0061 #define RK_MMU_CMD_ZAP_CACHE 4
0062 #define RK_MMU_CMD_PAGE_FAULT_DONE 5
0063 #define RK_MMU_CMD_FORCE_RESET 6
0064
0065
0066 #define RK_MMU_IRQ_PAGE_FAULT 0x01
0067 #define RK_MMU_IRQ_BUS_ERROR 0x02
0068 #define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
0069
0070 #define NUM_DT_ENTRIES 1024
0071 #define NUM_PT_ENTRIES 1024
0072
0073 #define SPAGE_ORDER 12
0074 #define SPAGE_SIZE (1 << SPAGE_ORDER)
0075
0076
0077
0078
0079
0080 #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
0081
0082 struct rk_iommu_domain {
0083 struct list_head iommus;
0084 u32 *dt;
0085 dma_addr_t dt_dma;
0086 spinlock_t iommus_lock;
0087 spinlock_t dt_lock;
0088
0089 struct iommu_domain domain;
0090 };
0091
0092
0093 static const char * const rk_iommu_clocks[] = {
0094 "aclk", "iface",
0095 };
0096
0097 struct rk_iommu_ops {
0098 phys_addr_t (*pt_address)(u32 dte);
0099 u32 (*mk_dtentries)(dma_addr_t pt_dma);
0100 u32 (*mk_ptentries)(phys_addr_t page, int prot);
0101 phys_addr_t (*dte_addr_phys)(u32 addr);
0102 u32 (*dma_addr_dte)(dma_addr_t dt_dma);
0103 u64 dma_bit_mask;
0104 };
0105
0106 struct rk_iommu {
0107 struct device *dev;
0108 void __iomem **bases;
0109 int num_mmu;
0110 int num_irq;
0111 struct clk_bulk_data *clocks;
0112 int num_clocks;
0113 bool reset_disabled;
0114 struct iommu_device iommu;
0115 struct list_head node;
0116 struct iommu_domain *domain;
0117 struct iommu_group *group;
0118 };
0119
0120 struct rk_iommudata {
0121 struct device_link *link;
0122 struct rk_iommu *iommu;
0123 };
0124
0125 static struct device *dma_dev;
0126 static const struct rk_iommu_ops *rk_ops;
0127
0128 static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
0129 unsigned int count)
0130 {
0131 size_t size = count * sizeof(u32);
0132
0133 dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
0134 }
0135
0136 static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
0137 {
0138 return container_of(dom, struct rk_iommu_domain, domain);
0139 }
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182 #define RK_DTE_PT_ADDRESS_MASK 0xfffff000
0183 #define RK_DTE_PT_VALID BIT(0)
0184
0185 static inline phys_addr_t rk_dte_pt_address(u32 dte)
0186 {
0187 return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
0188 }
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198 #define RK_DTE_PT_ADDRESS_MASK_V2 GENMASK_ULL(31, 4)
0199 #define DTE_HI_MASK1 GENMASK(11, 8)
0200 #define DTE_HI_MASK2 GENMASK(7, 4)
0201 #define DTE_HI_SHIFT1 24
0202 #define DTE_HI_SHIFT2 32
0203 #define PAGE_DESC_HI_MASK1 GENMASK_ULL(35, 32)
0204 #define PAGE_DESC_HI_MASK2 GENMASK_ULL(39, 36)
0205
0206 static inline phys_addr_t rk_dte_pt_address_v2(u32 dte)
0207 {
0208 u64 dte_v2 = dte;
0209
0210 dte_v2 = ((dte_v2 & DTE_HI_MASK2) << DTE_HI_SHIFT2) |
0211 ((dte_v2 & DTE_HI_MASK1) << DTE_HI_SHIFT1) |
0212 (dte_v2 & RK_DTE_PT_ADDRESS_MASK);
0213
0214 return (phys_addr_t)dte_v2;
0215 }
0216
0217 static inline bool rk_dte_is_pt_valid(u32 dte)
0218 {
0219 return dte & RK_DTE_PT_VALID;
0220 }
0221
0222 static inline u32 rk_mk_dte(dma_addr_t pt_dma)
0223 {
0224 return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
0225 }
0226
0227 static inline u32 rk_mk_dte_v2(dma_addr_t pt_dma)
0228 {
0229 pt_dma = (pt_dma & RK_DTE_PT_ADDRESS_MASK) |
0230 ((pt_dma & PAGE_DESC_HI_MASK1) >> DTE_HI_SHIFT1) |
0231 (pt_dma & PAGE_DESC_HI_MASK2) >> DTE_HI_SHIFT2;
0232
0233 return (pt_dma & RK_DTE_PT_ADDRESS_MASK_V2) | RK_DTE_PT_VALID;
0234 }
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256 #define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
0257 #define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
0258 #define RK_PTE_PAGE_WRITABLE BIT(2)
0259 #define RK_PTE_PAGE_READABLE BIT(1)
0260 #define RK_PTE_PAGE_VALID BIT(0)
0261
0262 static inline bool rk_pte_is_page_valid(u32 pte)
0263 {
0264 return pte & RK_PTE_PAGE_VALID;
0265 }
0266
0267
0268 static u32 rk_mk_pte(phys_addr_t page, int prot)
0269 {
0270 u32 flags = 0;
0271 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
0272 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
0273 page &= RK_PTE_PAGE_ADDRESS_MASK;
0274 return page | flags | RK_PTE_PAGE_VALID;
0275 }
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287 #define RK_PTE_PAGE_READABLE_V2 BIT(2)
0288 #define RK_PTE_PAGE_WRITABLE_V2 BIT(1)
0289
0290 static u32 rk_mk_pte_v2(phys_addr_t page, int prot)
0291 {
0292 u32 flags = 0;
0293
0294 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0;
0295 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0;
0296
0297 return rk_mk_dte_v2(page) | flags;
0298 }
0299
0300 static u32 rk_mk_pte_invalid(u32 pte)
0301 {
0302 return pte & ~RK_PTE_PAGE_VALID;
0303 }
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315 #define RK_IOVA_DTE_MASK 0xffc00000
0316 #define RK_IOVA_DTE_SHIFT 22
0317 #define RK_IOVA_PTE_MASK 0x003ff000
0318 #define RK_IOVA_PTE_SHIFT 12
0319 #define RK_IOVA_PAGE_MASK 0x00000fff
0320 #define RK_IOVA_PAGE_SHIFT 0
0321
0322 static u32 rk_iova_dte_index(dma_addr_t iova)
0323 {
0324 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
0325 }
0326
0327 static u32 rk_iova_pte_index(dma_addr_t iova)
0328 {
0329 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
0330 }
0331
0332 static u32 rk_iova_page_offset(dma_addr_t iova)
0333 {
0334 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
0335 }
0336
0337 static u32 rk_iommu_read(void __iomem *base, u32 offset)
0338 {
0339 return readl(base + offset);
0340 }
0341
0342 static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
0343 {
0344 writel(value, base + offset);
0345 }
0346
0347 static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
0348 {
0349 int i;
0350
0351 for (i = 0; i < iommu->num_mmu; i++)
0352 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
0353 }
0354
0355 static void rk_iommu_base_command(void __iomem *base, u32 command)
0356 {
0357 writel(command, base + RK_MMU_COMMAND);
0358 }
0359 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
0360 size_t size)
0361 {
0362 int i;
0363 dma_addr_t iova_end = iova_start + size;
0364
0365
0366
0367
0368 for (i = 0; i < iommu->num_mmu; i++) {
0369 dma_addr_t iova;
0370
0371 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
0372 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
0373 }
0374 }
0375
0376 static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
0377 {
0378 bool active = true;
0379 int i;
0380
0381 for (i = 0; i < iommu->num_mmu; i++)
0382 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
0383 RK_MMU_STATUS_STALL_ACTIVE);
0384
0385 return active;
0386 }
0387
0388 static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
0389 {
0390 bool enable = true;
0391 int i;
0392
0393 for (i = 0; i < iommu->num_mmu; i++)
0394 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
0395 RK_MMU_STATUS_PAGING_ENABLED);
0396
0397 return enable;
0398 }
0399
0400 static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
0401 {
0402 bool done = true;
0403 int i;
0404
0405 for (i = 0; i < iommu->num_mmu; i++)
0406 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
0407
0408 return done;
0409 }
0410
0411 static int rk_iommu_enable_stall(struct rk_iommu *iommu)
0412 {
0413 int ret, i;
0414 bool val;
0415
0416 if (rk_iommu_is_stall_active(iommu))
0417 return 0;
0418
0419
0420 if (!rk_iommu_is_paging_enabled(iommu))
0421 return 0;
0422
0423 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
0424
0425 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
0426 val, RK_MMU_POLL_PERIOD_US,
0427 RK_MMU_POLL_TIMEOUT_US);
0428 if (ret)
0429 for (i = 0; i < iommu->num_mmu; i++)
0430 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
0431 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
0432
0433 return ret;
0434 }
0435
0436 static int rk_iommu_disable_stall(struct rk_iommu *iommu)
0437 {
0438 int ret, i;
0439 bool val;
0440
0441 if (!rk_iommu_is_stall_active(iommu))
0442 return 0;
0443
0444 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
0445
0446 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
0447 !val, RK_MMU_POLL_PERIOD_US,
0448 RK_MMU_POLL_TIMEOUT_US);
0449 if (ret)
0450 for (i = 0; i < iommu->num_mmu; i++)
0451 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
0452 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
0453
0454 return ret;
0455 }
0456
0457 static int rk_iommu_enable_paging(struct rk_iommu *iommu)
0458 {
0459 int ret, i;
0460 bool val;
0461
0462 if (rk_iommu_is_paging_enabled(iommu))
0463 return 0;
0464
0465 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
0466
0467 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
0468 val, RK_MMU_POLL_PERIOD_US,
0469 RK_MMU_POLL_TIMEOUT_US);
0470 if (ret)
0471 for (i = 0; i < iommu->num_mmu; i++)
0472 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
0473 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
0474
0475 return ret;
0476 }
0477
0478 static int rk_iommu_disable_paging(struct rk_iommu *iommu)
0479 {
0480 int ret, i;
0481 bool val;
0482
0483 if (!rk_iommu_is_paging_enabled(iommu))
0484 return 0;
0485
0486 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
0487
0488 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
0489 !val, RK_MMU_POLL_PERIOD_US,
0490 RK_MMU_POLL_TIMEOUT_US);
0491 if (ret)
0492 for (i = 0; i < iommu->num_mmu; i++)
0493 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
0494 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
0495
0496 return ret;
0497 }
0498
0499 static int rk_iommu_force_reset(struct rk_iommu *iommu)
0500 {
0501 int ret, i;
0502 u32 dte_addr;
0503 bool val;
0504
0505 if (iommu->reset_disabled)
0506 return 0;
0507
0508
0509
0510
0511
0512 for (i = 0; i < iommu->num_mmu; i++) {
0513 dte_addr = rk_ops->pt_address(DTE_ADDR_DUMMY);
0514 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
0515
0516 if (dte_addr != rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR)) {
0517 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
0518 return -EFAULT;
0519 }
0520 }
0521
0522 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
0523
0524 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
0525 val, RK_MMU_FORCE_RESET_TIMEOUT_US,
0526 RK_MMU_POLL_TIMEOUT_US);
0527 if (ret) {
0528 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
0529 return ret;
0530 }
0531
0532 return 0;
0533 }
0534
0535 static inline phys_addr_t rk_dte_addr_phys(u32 addr)
0536 {
0537 return (phys_addr_t)addr;
0538 }
0539
0540 static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma)
0541 {
0542 return dt_dma;
0543 }
0544
0545 #define DT_HI_MASK GENMASK_ULL(39, 32)
0546 #define DTE_BASE_HI_MASK GENMASK(11, 4)
0547 #define DT_SHIFT 28
0548
0549 static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr)
0550 {
0551 u64 addr64 = addr;
0552 return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) |
0553 ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT);
0554 }
0555
0556 static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma)
0557 {
0558 return (dt_dma & RK_DTE_PT_ADDRESS_MASK) |
0559 ((dt_dma & DT_HI_MASK) >> DT_SHIFT);
0560 }
0561
0562 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
0563 {
0564 void __iomem *base = iommu->bases[index];
0565 u32 dte_index, pte_index, page_offset;
0566 u32 mmu_dte_addr;
0567 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
0568 u32 *dte_addr;
0569 u32 dte;
0570 phys_addr_t pte_addr_phys = 0;
0571 u32 *pte_addr = NULL;
0572 u32 pte = 0;
0573 phys_addr_t page_addr_phys = 0;
0574 u32 page_flags = 0;
0575
0576 dte_index = rk_iova_dte_index(iova);
0577 pte_index = rk_iova_pte_index(iova);
0578 page_offset = rk_iova_page_offset(iova);
0579
0580 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
0581 mmu_dte_addr_phys = rk_ops->dte_addr_phys(mmu_dte_addr);
0582
0583 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
0584 dte_addr = phys_to_virt(dte_addr_phys);
0585 dte = *dte_addr;
0586
0587 if (!rk_dte_is_pt_valid(dte))
0588 goto print_it;
0589
0590 pte_addr_phys = rk_ops->pt_address(dte) + (pte_index * 4);
0591 pte_addr = phys_to_virt(pte_addr_phys);
0592 pte = *pte_addr;
0593
0594 if (!rk_pte_is_page_valid(pte))
0595 goto print_it;
0596
0597 page_addr_phys = rk_ops->pt_address(pte) + page_offset;
0598 page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
0599
0600 print_it:
0601 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
0602 &iova, dte_index, pte_index, page_offset);
0603 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
0604 &mmu_dte_addr_phys, &dte_addr_phys, dte,
0605 rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
0606 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
0607 }
0608
0609 static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
0610 {
0611 struct rk_iommu *iommu = dev_id;
0612 u32 status;
0613 u32 int_status;
0614 dma_addr_t iova;
0615 irqreturn_t ret = IRQ_NONE;
0616 int i, err;
0617
0618 err = pm_runtime_get_if_in_use(iommu->dev);
0619 if (!err || WARN_ON_ONCE(err < 0))
0620 return ret;
0621
0622 if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
0623 goto out;
0624
0625 for (i = 0; i < iommu->num_mmu; i++) {
0626 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
0627 if (int_status == 0)
0628 continue;
0629
0630 ret = IRQ_HANDLED;
0631 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
0632
0633 if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
0634 int flags;
0635
0636 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
0637 flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
0638 IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
0639
0640 dev_err(iommu->dev, "Page fault at %pad of type %s\n",
0641 &iova,
0642 (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
0643
0644 log_iova(iommu, i, iova);
0645
0646
0647
0648
0649
0650
0651 if (iommu->domain)
0652 report_iommu_fault(iommu->domain, iommu->dev, iova,
0653 flags);
0654 else
0655 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
0656
0657 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
0658 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
0659 }
0660
0661 if (int_status & RK_MMU_IRQ_BUS_ERROR)
0662 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
0663
0664 if (int_status & ~RK_MMU_IRQ_MASK)
0665 dev_err(iommu->dev, "unexpected int_status: %#08x\n",
0666 int_status);
0667
0668 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
0669 }
0670
0671 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
0672
0673 out:
0674 pm_runtime_put(iommu->dev);
0675 return ret;
0676 }
0677
0678 static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
0679 dma_addr_t iova)
0680 {
0681 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
0682 unsigned long flags;
0683 phys_addr_t pt_phys, phys = 0;
0684 u32 dte, pte;
0685 u32 *page_table;
0686
0687 spin_lock_irqsave(&rk_domain->dt_lock, flags);
0688
0689 dte = rk_domain->dt[rk_iova_dte_index(iova)];
0690 if (!rk_dte_is_pt_valid(dte))
0691 goto out;
0692
0693 pt_phys = rk_ops->pt_address(dte);
0694 page_table = (u32 *)phys_to_virt(pt_phys);
0695 pte = page_table[rk_iova_pte_index(iova)];
0696 if (!rk_pte_is_page_valid(pte))
0697 goto out;
0698
0699 phys = rk_ops->pt_address(pte) + rk_iova_page_offset(iova);
0700 out:
0701 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
0702
0703 return phys;
0704 }
0705
0706 static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
0707 dma_addr_t iova, size_t size)
0708 {
0709 struct list_head *pos;
0710 unsigned long flags;
0711
0712
0713 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
0714 list_for_each(pos, &rk_domain->iommus) {
0715 struct rk_iommu *iommu;
0716 int ret;
0717
0718 iommu = list_entry(pos, struct rk_iommu, node);
0719
0720
0721 ret = pm_runtime_get_if_in_use(iommu->dev);
0722 if (WARN_ON_ONCE(ret < 0))
0723 continue;
0724 if (ret) {
0725 WARN_ON(clk_bulk_enable(iommu->num_clocks,
0726 iommu->clocks));
0727 rk_iommu_zap_lines(iommu, iova, size);
0728 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
0729 pm_runtime_put(iommu->dev);
0730 }
0731 }
0732 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
0733 }
0734
0735 static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
0736 dma_addr_t iova, size_t size)
0737 {
0738 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
0739 if (size > SPAGE_SIZE)
0740 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
0741 SPAGE_SIZE);
0742 }
0743
0744 static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
0745 dma_addr_t iova)
0746 {
0747 u32 *page_table, *dte_addr;
0748 u32 dte_index, dte;
0749 phys_addr_t pt_phys;
0750 dma_addr_t pt_dma;
0751
0752 assert_spin_locked(&rk_domain->dt_lock);
0753
0754 dte_index = rk_iova_dte_index(iova);
0755 dte_addr = &rk_domain->dt[dte_index];
0756 dte = *dte_addr;
0757 if (rk_dte_is_pt_valid(dte))
0758 goto done;
0759
0760 page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
0761 if (!page_table)
0762 return ERR_PTR(-ENOMEM);
0763
0764 pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
0765 if (dma_mapping_error(dma_dev, pt_dma)) {
0766 dev_err(dma_dev, "DMA mapping error while allocating page table\n");
0767 free_page((unsigned long)page_table);
0768 return ERR_PTR(-ENOMEM);
0769 }
0770
0771 dte = rk_ops->mk_dtentries(pt_dma);
0772 *dte_addr = dte;
0773
0774 rk_table_flush(rk_domain,
0775 rk_domain->dt_dma + dte_index * sizeof(u32), 1);
0776 done:
0777 pt_phys = rk_ops->pt_address(dte);
0778 return (u32 *)phys_to_virt(pt_phys);
0779 }
0780
0781 static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
0782 u32 *pte_addr, dma_addr_t pte_dma,
0783 size_t size)
0784 {
0785 unsigned int pte_count;
0786 unsigned int pte_total = size / SPAGE_SIZE;
0787
0788 assert_spin_locked(&rk_domain->dt_lock);
0789
0790 for (pte_count = 0; pte_count < pte_total; pte_count++) {
0791 u32 pte = pte_addr[pte_count];
0792 if (!rk_pte_is_page_valid(pte))
0793 break;
0794
0795 pte_addr[pte_count] = rk_mk_pte_invalid(pte);
0796 }
0797
0798 rk_table_flush(rk_domain, pte_dma, pte_count);
0799
0800 return pte_count * SPAGE_SIZE;
0801 }
0802
0803 static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
0804 dma_addr_t pte_dma, dma_addr_t iova,
0805 phys_addr_t paddr, size_t size, int prot)
0806 {
0807 unsigned int pte_count;
0808 unsigned int pte_total = size / SPAGE_SIZE;
0809 phys_addr_t page_phys;
0810
0811 assert_spin_locked(&rk_domain->dt_lock);
0812
0813 for (pte_count = 0; pte_count < pte_total; pte_count++) {
0814 u32 pte = pte_addr[pte_count];
0815
0816 if (rk_pte_is_page_valid(pte))
0817 goto unwind;
0818
0819 pte_addr[pte_count] = rk_ops->mk_ptentries(paddr, prot);
0820
0821 paddr += SPAGE_SIZE;
0822 }
0823
0824 rk_table_flush(rk_domain, pte_dma, pte_total);
0825
0826
0827
0828
0829
0830
0831
0832 rk_iommu_zap_iova_first_last(rk_domain, iova, size);
0833
0834 return 0;
0835 unwind:
0836
0837 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
0838 pte_count * SPAGE_SIZE);
0839
0840 iova += pte_count * SPAGE_SIZE;
0841 page_phys = rk_ops->pt_address(pte_addr[pte_count]);
0842 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
0843 &iova, &page_phys, &paddr, prot);
0844
0845 return -EADDRINUSE;
0846 }
0847
0848 static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
0849 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
0850 {
0851 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
0852 unsigned long flags;
0853 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
0854 u32 *page_table, *pte_addr;
0855 u32 dte_index, pte_index;
0856 int ret;
0857
0858 spin_lock_irqsave(&rk_domain->dt_lock, flags);
0859
0860
0861
0862
0863
0864
0865
0866
0867 page_table = rk_dte_get_page_table(rk_domain, iova);
0868 if (IS_ERR(page_table)) {
0869 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
0870 return PTR_ERR(page_table);
0871 }
0872
0873 dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
0874 pte_index = rk_iova_pte_index(iova);
0875 pte_addr = &page_table[pte_index];
0876
0877 pte_dma = rk_ops->pt_address(dte_index) + pte_index * sizeof(u32);
0878 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
0879 paddr, size, prot);
0880
0881 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
0882
0883 return ret;
0884 }
0885
0886 static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
0887 size_t size, struct iommu_iotlb_gather *gather)
0888 {
0889 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
0890 unsigned long flags;
0891 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
0892 phys_addr_t pt_phys;
0893 u32 dte;
0894 u32 *pte_addr;
0895 size_t unmap_size;
0896
0897 spin_lock_irqsave(&rk_domain->dt_lock, flags);
0898
0899
0900
0901
0902
0903
0904
0905
0906 dte = rk_domain->dt[rk_iova_dte_index(iova)];
0907
0908 if (!rk_dte_is_pt_valid(dte)) {
0909 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
0910 return 0;
0911 }
0912
0913 pt_phys = rk_ops->pt_address(dte);
0914 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
0915 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
0916 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
0917
0918 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
0919
0920
0921 rk_iommu_zap_iova(rk_domain, iova, unmap_size);
0922
0923 return unmap_size;
0924 }
0925
0926 static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
0927 {
0928 struct rk_iommudata *data = dev_iommu_priv_get(dev);
0929
0930 return data ? data->iommu : NULL;
0931 }
0932
0933
0934 static void rk_iommu_disable(struct rk_iommu *iommu)
0935 {
0936 int i;
0937
0938
0939 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
0940 rk_iommu_enable_stall(iommu);
0941 rk_iommu_disable_paging(iommu);
0942 for (i = 0; i < iommu->num_mmu; i++) {
0943 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
0944 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
0945 }
0946 rk_iommu_disable_stall(iommu);
0947 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
0948 }
0949
0950
0951 static int rk_iommu_enable(struct rk_iommu *iommu)
0952 {
0953 struct iommu_domain *domain = iommu->domain;
0954 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
0955 int ret, i;
0956
0957 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
0958 if (ret)
0959 return ret;
0960
0961 ret = rk_iommu_enable_stall(iommu);
0962 if (ret)
0963 goto out_disable_clocks;
0964
0965 ret = rk_iommu_force_reset(iommu);
0966 if (ret)
0967 goto out_disable_stall;
0968
0969 for (i = 0; i < iommu->num_mmu; i++) {
0970 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
0971 rk_ops->dma_addr_dte(rk_domain->dt_dma));
0972 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
0973 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
0974 }
0975
0976 ret = rk_iommu_enable_paging(iommu);
0977
0978 out_disable_stall:
0979 rk_iommu_disable_stall(iommu);
0980 out_disable_clocks:
0981 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
0982 return ret;
0983 }
0984
0985 static void rk_iommu_detach_device(struct iommu_domain *domain,
0986 struct device *dev)
0987 {
0988 struct rk_iommu *iommu;
0989 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
0990 unsigned long flags;
0991 int ret;
0992
0993
0994 iommu = rk_iommu_from_dev(dev);
0995 if (!iommu)
0996 return;
0997
0998 dev_dbg(dev, "Detaching from iommu domain\n");
0999
1000
1001 if (iommu->domain != domain)
1002 return;
1003
1004 iommu->domain = NULL;
1005
1006 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
1007 list_del_init(&iommu->node);
1008 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
1009
1010 ret = pm_runtime_get_if_in_use(iommu->dev);
1011 WARN_ON_ONCE(ret < 0);
1012 if (ret > 0) {
1013 rk_iommu_disable(iommu);
1014 pm_runtime_put(iommu->dev);
1015 }
1016 }
1017
1018 static int rk_iommu_attach_device(struct iommu_domain *domain,
1019 struct device *dev)
1020 {
1021 struct rk_iommu *iommu;
1022 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1023 unsigned long flags;
1024 int ret;
1025
1026
1027
1028
1029
1030 iommu = rk_iommu_from_dev(dev);
1031 if (!iommu)
1032 return 0;
1033
1034 dev_dbg(dev, "Attaching to iommu domain\n");
1035
1036
1037 if (iommu->domain == domain)
1038 return 0;
1039
1040 if (iommu->domain)
1041 rk_iommu_detach_device(iommu->domain, dev);
1042
1043 iommu->domain = domain;
1044
1045 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
1046 list_add_tail(&iommu->node, &rk_domain->iommus);
1047 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
1048
1049 ret = pm_runtime_get_if_in_use(iommu->dev);
1050 if (!ret || WARN_ON_ONCE(ret < 0))
1051 return 0;
1052
1053 ret = rk_iommu_enable(iommu);
1054 if (ret)
1055 rk_iommu_detach_device(iommu->domain, dev);
1056
1057 pm_runtime_put(iommu->dev);
1058
1059 return ret;
1060 }
1061
1062 static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
1063 {
1064 struct rk_iommu_domain *rk_domain;
1065
1066 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
1067 return NULL;
1068
1069 if (!dma_dev)
1070 return NULL;
1071
1072 rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
1073 if (!rk_domain)
1074 return NULL;
1075
1076
1077
1078
1079
1080
1081 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
1082 if (!rk_domain->dt)
1083 goto err_free_domain;
1084
1085 rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
1086 SPAGE_SIZE, DMA_TO_DEVICE);
1087 if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
1088 dev_err(dma_dev, "DMA map error for DT\n");
1089 goto err_free_dt;
1090 }
1091
1092 spin_lock_init(&rk_domain->iommus_lock);
1093 spin_lock_init(&rk_domain->dt_lock);
1094 INIT_LIST_HEAD(&rk_domain->iommus);
1095
1096 rk_domain->domain.geometry.aperture_start = 0;
1097 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
1098 rk_domain->domain.geometry.force_aperture = true;
1099
1100 return &rk_domain->domain;
1101
1102 err_free_dt:
1103 free_page((unsigned long)rk_domain->dt);
1104 err_free_domain:
1105 kfree(rk_domain);
1106
1107 return NULL;
1108 }
1109
1110 static void rk_iommu_domain_free(struct iommu_domain *domain)
1111 {
1112 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1113 int i;
1114
1115 WARN_ON(!list_empty(&rk_domain->iommus));
1116
1117 for (i = 0; i < NUM_DT_ENTRIES; i++) {
1118 u32 dte = rk_domain->dt[i];
1119 if (rk_dte_is_pt_valid(dte)) {
1120 phys_addr_t pt_phys = rk_ops->pt_address(dte);
1121 u32 *page_table = phys_to_virt(pt_phys);
1122 dma_unmap_single(dma_dev, pt_phys,
1123 SPAGE_SIZE, DMA_TO_DEVICE);
1124 free_page((unsigned long)page_table);
1125 }
1126 }
1127
1128 dma_unmap_single(dma_dev, rk_domain->dt_dma,
1129 SPAGE_SIZE, DMA_TO_DEVICE);
1130 free_page((unsigned long)rk_domain->dt);
1131
1132 kfree(rk_domain);
1133 }
1134
1135 static struct iommu_device *rk_iommu_probe_device(struct device *dev)
1136 {
1137 struct rk_iommudata *data;
1138 struct rk_iommu *iommu;
1139
1140 data = dev_iommu_priv_get(dev);
1141 if (!data)
1142 return ERR_PTR(-ENODEV);
1143
1144 iommu = rk_iommu_from_dev(dev);
1145
1146 data->link = device_link_add(dev, iommu->dev,
1147 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
1148
1149 return &iommu->iommu;
1150 }
1151
1152 static void rk_iommu_release_device(struct device *dev)
1153 {
1154 struct rk_iommudata *data = dev_iommu_priv_get(dev);
1155
1156 device_link_del(data->link);
1157 }
1158
1159 static struct iommu_group *rk_iommu_device_group(struct device *dev)
1160 {
1161 struct rk_iommu *iommu;
1162
1163 iommu = rk_iommu_from_dev(dev);
1164
1165 return iommu_group_ref_get(iommu->group);
1166 }
1167
1168 static int rk_iommu_of_xlate(struct device *dev,
1169 struct of_phandle_args *args)
1170 {
1171 struct platform_device *iommu_dev;
1172 struct rk_iommudata *data;
1173
1174 data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
1175 if (!data)
1176 return -ENOMEM;
1177
1178 iommu_dev = of_find_device_by_node(args->np);
1179
1180 data->iommu = platform_get_drvdata(iommu_dev);
1181 dev_iommu_priv_set(dev, data);
1182
1183 platform_device_put(iommu_dev);
1184
1185 return 0;
1186 }
1187
1188 static const struct iommu_ops rk_iommu_ops = {
1189 .domain_alloc = rk_iommu_domain_alloc,
1190 .probe_device = rk_iommu_probe_device,
1191 .release_device = rk_iommu_release_device,
1192 .device_group = rk_iommu_device_group,
1193 .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
1194 .of_xlate = rk_iommu_of_xlate,
1195 .default_domain_ops = &(const struct iommu_domain_ops) {
1196 .attach_dev = rk_iommu_attach_device,
1197 .detach_dev = rk_iommu_detach_device,
1198 .map = rk_iommu_map,
1199 .unmap = rk_iommu_unmap,
1200 .iova_to_phys = rk_iommu_iova_to_phys,
1201 .free = rk_iommu_domain_free,
1202 }
1203 };
1204
1205 static int rk_iommu_probe(struct platform_device *pdev)
1206 {
1207 struct device *dev = &pdev->dev;
1208 struct rk_iommu *iommu;
1209 struct resource *res;
1210 const struct rk_iommu_ops *ops;
1211 int num_res = pdev->num_resources;
1212 int err, i;
1213
1214 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1215 if (!iommu)
1216 return -ENOMEM;
1217
1218 platform_set_drvdata(pdev, iommu);
1219 iommu->dev = dev;
1220 iommu->num_mmu = 0;
1221
1222 ops = of_device_get_match_data(dev);
1223 if (!rk_ops)
1224 rk_ops = ops;
1225
1226
1227
1228
1229
1230 if (WARN_ON(rk_ops != ops))
1231 return -EINVAL;
1232
1233 iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases),
1234 GFP_KERNEL);
1235 if (!iommu->bases)
1236 return -ENOMEM;
1237
1238 for (i = 0; i < num_res; i++) {
1239 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1240 if (!res)
1241 continue;
1242 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1243 if (IS_ERR(iommu->bases[i]))
1244 continue;
1245 iommu->num_mmu++;
1246 }
1247 if (iommu->num_mmu == 0)
1248 return PTR_ERR(iommu->bases[0]);
1249
1250 iommu->num_irq = platform_irq_count(pdev);
1251 if (iommu->num_irq < 0)
1252 return iommu->num_irq;
1253
1254 iommu->reset_disabled = device_property_read_bool(dev,
1255 "rockchip,disable-mmu-reset");
1256
1257 iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
1258 iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
1259 sizeof(*iommu->clocks), GFP_KERNEL);
1260 if (!iommu->clocks)
1261 return -ENOMEM;
1262
1263 for (i = 0; i < iommu->num_clocks; ++i)
1264 iommu->clocks[i].id = rk_iommu_clocks[i];
1265
1266
1267
1268
1269
1270
1271 err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
1272 if (err == -ENOENT)
1273 iommu->num_clocks = 0;
1274 else if (err)
1275 return err;
1276
1277 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1278 if (err)
1279 return err;
1280
1281 iommu->group = iommu_group_alloc();
1282 if (IS_ERR(iommu->group)) {
1283 err = PTR_ERR(iommu->group);
1284 goto err_unprepare_clocks;
1285 }
1286
1287 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1288 if (err)
1289 goto err_put_group;
1290
1291 err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev);
1292 if (err)
1293 goto err_remove_sysfs;
1294
1295
1296
1297
1298
1299
1300 if (!dma_dev)
1301 dma_dev = &pdev->dev;
1302
1303 bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1304
1305 pm_runtime_enable(dev);
1306
1307 for (i = 0; i < iommu->num_irq; i++) {
1308 int irq = platform_get_irq(pdev, i);
1309
1310 if (irq < 0)
1311 return irq;
1312
1313 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
1314 IRQF_SHARED, dev_name(dev), iommu);
1315 if (err) {
1316 pm_runtime_disable(dev);
1317 goto err_remove_sysfs;
1318 }
1319 }
1320
1321 dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask);
1322
1323 return 0;
1324 err_remove_sysfs:
1325 iommu_device_sysfs_remove(&iommu->iommu);
1326 err_put_group:
1327 iommu_group_put(iommu->group);
1328 err_unprepare_clocks:
1329 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
1330 return err;
1331 }
1332
1333 static void rk_iommu_shutdown(struct platform_device *pdev)
1334 {
1335 struct rk_iommu *iommu = platform_get_drvdata(pdev);
1336 int i;
1337
1338 for (i = 0; i < iommu->num_irq; i++) {
1339 int irq = platform_get_irq(pdev, i);
1340
1341 devm_free_irq(iommu->dev, irq, iommu);
1342 }
1343
1344 pm_runtime_force_suspend(&pdev->dev);
1345 }
1346
1347 static int __maybe_unused rk_iommu_suspend(struct device *dev)
1348 {
1349 struct rk_iommu *iommu = dev_get_drvdata(dev);
1350
1351 if (!iommu->domain)
1352 return 0;
1353
1354 rk_iommu_disable(iommu);
1355 return 0;
1356 }
1357
1358 static int __maybe_unused rk_iommu_resume(struct device *dev)
1359 {
1360 struct rk_iommu *iommu = dev_get_drvdata(dev);
1361
1362 if (!iommu->domain)
1363 return 0;
1364
1365 return rk_iommu_enable(iommu);
1366 }
1367
1368 static const struct dev_pm_ops rk_iommu_pm_ops = {
1369 SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL)
1370 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1371 pm_runtime_force_resume)
1372 };
1373
1374 static struct rk_iommu_ops iommu_data_ops_v1 = {
1375 .pt_address = &rk_dte_pt_address,
1376 .mk_dtentries = &rk_mk_dte,
1377 .mk_ptentries = &rk_mk_pte,
1378 .dte_addr_phys = &rk_dte_addr_phys,
1379 .dma_addr_dte = &rk_dma_addr_dte,
1380 .dma_bit_mask = DMA_BIT_MASK(32),
1381 };
1382
1383 static struct rk_iommu_ops iommu_data_ops_v2 = {
1384 .pt_address = &rk_dte_pt_address_v2,
1385 .mk_dtentries = &rk_mk_dte_v2,
1386 .mk_ptentries = &rk_mk_pte_v2,
1387 .dte_addr_phys = &rk_dte_addr_phys_v2,
1388 .dma_addr_dte = &rk_dma_addr_dte_v2,
1389 .dma_bit_mask = DMA_BIT_MASK(40),
1390 };
1391
1392 static const struct of_device_id rk_iommu_dt_ids[] = {
1393 { .compatible = "rockchip,iommu",
1394 .data = &iommu_data_ops_v1,
1395 },
1396 { .compatible = "rockchip,rk3568-iommu",
1397 .data = &iommu_data_ops_v2,
1398 },
1399 { }
1400 };
1401
1402 static struct platform_driver rk_iommu_driver = {
1403 .probe = rk_iommu_probe,
1404 .shutdown = rk_iommu_shutdown,
1405 .driver = {
1406 .name = "rk_iommu",
1407 .of_match_table = rk_iommu_dt_ids,
1408 .pm = &rk_iommu_pm_ops,
1409 .suppress_bind_attrs = true,
1410 },
1411 };
1412 builtin_platform_driver(rk_iommu_driver);