0001
0002
0003
0004
0005
0006
0007 #include <linux/dma-iommu.h>
0008 #include <linux/dma-map-ops.h>
0009 #include <linux/iommu.h>
0010 #include <linux/platform_device.h>
0011
0012 #include <drm/drm_print.h>
0013 #include <drm/exynos_drm.h>
0014
0015 #include "exynos_drm_drv.h"
0016
0017 #if defined(CONFIG_ARM_DMA_USE_IOMMU)
0018 #include <asm/dma-iommu.h>
0019 #else
0020 #define arm_iommu_create_mapping(...) ({ NULL; })
0021 #define arm_iommu_attach_device(...) ({ -ENODEV; })
0022 #define arm_iommu_release_mapping(...) ({ })
0023 #define arm_iommu_detach_device(...) ({ })
0024 #define to_dma_iommu_mapping(dev) NULL
0025 #endif
0026
0027 #if !defined(CONFIG_IOMMU_DMA)
0028 #define iommu_dma_init_domain(...) ({ -EINVAL; })
0029 #endif
0030
0031 #define EXYNOS_DEV_ADDR_START 0x20000000
0032 #define EXYNOS_DEV_ADDR_SIZE 0x40000000
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043 static int drm_iommu_attach_device(struct drm_device *drm_dev,
0044 struct device *subdrv_dev, void **dma_priv)
0045 {
0046 struct exynos_drm_private *priv = drm_dev->dev_private;
0047 int ret = 0;
0048
0049 if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
0050 DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n",
0051 dev_name(subdrv_dev));
0052 return -EINVAL;
0053 }
0054
0055 dma_set_max_seg_size(subdrv_dev, DMA_BIT_MASK(32));
0056 if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
0057
0058
0059
0060
0061
0062
0063 *dma_priv = to_dma_iommu_mapping(subdrv_dev);
0064 if (*dma_priv)
0065 arm_iommu_detach_device(subdrv_dev);
0066
0067 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
0068 } else if (IS_ENABLED(CONFIG_IOMMU_DMA)) {
0069 ret = iommu_attach_device(priv->mapping, subdrv_dev);
0070 }
0071
0072 return ret;
0073 }
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084 static void drm_iommu_detach_device(struct drm_device *drm_dev,
0085 struct device *subdrv_dev, void **dma_priv)
0086 {
0087 struct exynos_drm_private *priv = drm_dev->dev_private;
0088
0089 if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
0090 arm_iommu_detach_device(subdrv_dev);
0091 arm_iommu_attach_device(subdrv_dev, *dma_priv);
0092 } else if (IS_ENABLED(CONFIG_IOMMU_DMA))
0093 iommu_detach_device(priv->mapping, subdrv_dev);
0094 }
0095
0096 int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
0097 void **dma_priv)
0098 {
0099 struct exynos_drm_private *priv = drm->dev_private;
0100
0101 if (!priv->dma_dev) {
0102 priv->dma_dev = dev;
0103 DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
0104 dev_name(dev));
0105 }
0106
0107 if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
0108 return 0;
0109
0110 if (!priv->mapping) {
0111 void *mapping;
0112
0113 if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
0114 mapping = arm_iommu_create_mapping(&platform_bus_type,
0115 EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
0116 else if (IS_ENABLED(CONFIG_IOMMU_DMA))
0117 mapping = iommu_get_domain_for_dev(priv->dma_dev);
0118 else
0119 mapping = ERR_PTR(-ENODEV);
0120
0121 if (IS_ERR(mapping))
0122 return PTR_ERR(mapping);
0123 priv->mapping = mapping;
0124 }
0125
0126 return drm_iommu_attach_device(drm, dev, dma_priv);
0127 }
0128
0129 void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
0130 void **dma_priv)
0131 {
0132 if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
0133 drm_iommu_detach_device(drm, dev, dma_priv);
0134 }
0135
0136 void exynos_drm_cleanup_dma(struct drm_device *drm)
0137 {
0138 struct exynos_drm_private *priv = drm->dev_private;
0139
0140 if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
0141 return;
0142
0143 arm_iommu_release_mapping(priv->mapping);
0144 priv->mapping = NULL;
0145 priv->dma_dev = NULL;
0146 }