0001
0002
0003
0004 #include <linux/host1x.h>
0005 #include <linux/iommu.h>
0006 #include <linux/list.h>
0007
0008 #include <drm/drm_drv.h>
0009 #include <drm/drm_file.h>
0010 #include <drm/drm_utils.h>
0011
0012 #include "drm.h"
0013 #include "uapi.h"
0014
0015 static void tegra_drm_mapping_release(struct kref *ref)
0016 {
0017 struct tegra_drm_mapping *mapping =
0018 container_of(ref, struct tegra_drm_mapping, ref);
0019
0020 host1x_bo_unpin(mapping->map);
0021 host1x_bo_put(mapping->bo);
0022
0023 kfree(mapping);
0024 }
0025
0026 void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping)
0027 {
0028 kref_put(&mapping->ref, tegra_drm_mapping_release);
0029 }
0030
0031 static void tegra_drm_channel_context_close(struct tegra_drm_context *context)
0032 {
0033 struct tegra_drm_mapping *mapping;
0034 unsigned long id;
0035
0036 if (context->memory_context)
0037 host1x_memory_context_put(context->memory_context);
0038
0039 xa_for_each(&context->mappings, id, mapping)
0040 tegra_drm_mapping_put(mapping);
0041
0042 xa_destroy(&context->mappings);
0043
0044 host1x_channel_put(context->channel);
0045
0046 kfree(context);
0047 }
0048
0049 void tegra_drm_uapi_close_file(struct tegra_drm_file *file)
0050 {
0051 struct tegra_drm_context *context;
0052 struct host1x_syncpt *sp;
0053 unsigned long id;
0054
0055 xa_for_each(&file->contexts, id, context)
0056 tegra_drm_channel_context_close(context);
0057
0058 xa_for_each(&file->syncpoints, id, sp)
0059 host1x_syncpt_put(sp);
0060
0061 xa_destroy(&file->contexts);
0062 xa_destroy(&file->syncpoints);
0063 }
0064
0065 static struct tegra_drm_client *tegra_drm_find_client(struct tegra_drm *tegra, u32 class)
0066 {
0067 struct tegra_drm_client *client;
0068
0069 list_for_each_entry(client, &tegra->clients, list)
0070 if (client->base.class == class)
0071 return client;
0072
0073 return NULL;
0074 }
0075
0076 int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_file *file)
0077 {
0078 struct host1x *host = tegra_drm_to_host1x(drm->dev_private);
0079 struct tegra_drm_file *fpriv = file->driver_priv;
0080 struct tegra_drm *tegra = drm->dev_private;
0081 struct drm_tegra_channel_open *args = data;
0082 struct tegra_drm_client *client = NULL;
0083 struct tegra_drm_context *context;
0084 int err;
0085
0086 if (args->flags)
0087 return -EINVAL;
0088
0089 context = kzalloc(sizeof(*context), GFP_KERNEL);
0090 if (!context)
0091 return -ENOMEM;
0092
0093 client = tegra_drm_find_client(tegra, args->host1x_class);
0094 if (!client) {
0095 err = -ENODEV;
0096 goto free;
0097 }
0098
0099 if (client->shared_channel) {
0100 context->channel = host1x_channel_get(client->shared_channel);
0101 } else {
0102 context->channel = host1x_channel_request(&client->base);
0103 if (!context->channel) {
0104 err = -EBUSY;
0105 goto free;
0106 }
0107 }
0108
0109
0110 if (device_iommu_mapped(client->base.dev) && client->ops->can_use_memory_ctx) {
0111 bool supported;
0112
0113 err = client->ops->can_use_memory_ctx(client, &supported);
0114 if (err)
0115 goto put_channel;
0116
0117 if (supported)
0118 context->memory_context = host1x_memory_context_alloc(
0119 host, get_task_pid(current, PIDTYPE_TGID));
0120
0121 if (IS_ERR(context->memory_context)) {
0122 if (PTR_ERR(context->memory_context) != -EOPNOTSUPP) {
0123 err = PTR_ERR(context->memory_context);
0124 goto put_channel;
0125 } else {
0126
0127
0128
0129
0130 context->memory_context = NULL;
0131 }
0132 }
0133 }
0134
0135 err = xa_alloc(&fpriv->contexts, &args->context, context, XA_LIMIT(1, U32_MAX),
0136 GFP_KERNEL);
0137 if (err < 0)
0138 goto put_memctx;
0139
0140 context->client = client;
0141 xa_init_flags(&context->mappings, XA_FLAGS_ALLOC1);
0142
0143 args->version = client->version;
0144 args->capabilities = 0;
0145
0146 if (device_get_dma_attr(client->base.dev) == DEV_DMA_COHERENT)
0147 args->capabilities |= DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT;
0148
0149 return 0;
0150
0151 put_memctx:
0152 if (context->memory_context)
0153 host1x_memory_context_put(context->memory_context);
0154 put_channel:
0155 host1x_channel_put(context->channel);
0156 free:
0157 kfree(context);
0158
0159 return err;
0160 }
0161
0162 int tegra_drm_ioctl_channel_close(struct drm_device *drm, void *data, struct drm_file *file)
0163 {
0164 struct tegra_drm_file *fpriv = file->driver_priv;
0165 struct drm_tegra_channel_close *args = data;
0166 struct tegra_drm_context *context;
0167
0168 mutex_lock(&fpriv->lock);
0169
0170 context = xa_load(&fpriv->contexts, args->context);
0171 if (!context) {
0172 mutex_unlock(&fpriv->lock);
0173 return -EINVAL;
0174 }
0175
0176 xa_erase(&fpriv->contexts, args->context);
0177
0178 mutex_unlock(&fpriv->lock);
0179
0180 tegra_drm_channel_context_close(context);
0181
0182 return 0;
0183 }
0184
0185 int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_file *file)
0186 {
0187 struct tegra_drm_file *fpriv = file->driver_priv;
0188 struct drm_tegra_channel_map *args = data;
0189 struct tegra_drm_mapping *mapping;
0190 struct tegra_drm_context *context;
0191 enum dma_data_direction direction;
0192 struct device *mapping_dev;
0193 int err = 0;
0194
0195 if (args->flags & ~DRM_TEGRA_CHANNEL_MAP_READ_WRITE)
0196 return -EINVAL;
0197
0198 mutex_lock(&fpriv->lock);
0199
0200 context = xa_load(&fpriv->contexts, args->context);
0201 if (!context) {
0202 mutex_unlock(&fpriv->lock);
0203 return -EINVAL;
0204 }
0205
0206 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
0207 if (!mapping) {
0208 err = -ENOMEM;
0209 goto unlock;
0210 }
0211
0212 kref_init(&mapping->ref);
0213
0214 if (context->memory_context)
0215 mapping_dev = &context->memory_context->dev;
0216 else
0217 mapping_dev = context->client->base.dev;
0218
0219 mapping->bo = tegra_gem_lookup(file, args->handle);
0220 if (!mapping->bo) {
0221 err = -EINVAL;
0222 goto free;
0223 }
0224
0225 switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) {
0226 case DRM_TEGRA_CHANNEL_MAP_READ_WRITE:
0227 direction = DMA_BIDIRECTIONAL;
0228 break;
0229
0230 case DRM_TEGRA_CHANNEL_MAP_WRITE:
0231 direction = DMA_FROM_DEVICE;
0232 break;
0233
0234 case DRM_TEGRA_CHANNEL_MAP_READ:
0235 direction = DMA_TO_DEVICE;
0236 break;
0237
0238 default:
0239 err = -EINVAL;
0240 goto put_gem;
0241 }
0242
0243 mapping->map = host1x_bo_pin(mapping_dev, mapping->bo, direction, NULL);
0244 if (IS_ERR(mapping->map)) {
0245 err = PTR_ERR(mapping->map);
0246 goto put_gem;
0247 }
0248
0249 mapping->iova = mapping->map->phys;
0250 mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size;
0251
0252 err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
0253 GFP_KERNEL);
0254 if (err < 0)
0255 goto unpin;
0256
0257 mutex_unlock(&fpriv->lock);
0258
0259 return 0;
0260
0261 unpin:
0262 host1x_bo_unpin(mapping->map);
0263 put_gem:
0264 host1x_bo_put(mapping->bo);
0265 free:
0266 kfree(mapping);
0267 unlock:
0268 mutex_unlock(&fpriv->lock);
0269 return err;
0270 }
0271
0272 int tegra_drm_ioctl_channel_unmap(struct drm_device *drm, void *data, struct drm_file *file)
0273 {
0274 struct tegra_drm_file *fpriv = file->driver_priv;
0275 struct drm_tegra_channel_unmap *args = data;
0276 struct tegra_drm_mapping *mapping;
0277 struct tegra_drm_context *context;
0278
0279 mutex_lock(&fpriv->lock);
0280
0281 context = xa_load(&fpriv->contexts, args->context);
0282 if (!context) {
0283 mutex_unlock(&fpriv->lock);
0284 return -EINVAL;
0285 }
0286
0287 mapping = xa_erase(&context->mappings, args->mapping);
0288
0289 mutex_unlock(&fpriv->lock);
0290
0291 if (!mapping)
0292 return -EINVAL;
0293
0294 tegra_drm_mapping_put(mapping);
0295 return 0;
0296 }
0297
0298 int tegra_drm_ioctl_syncpoint_allocate(struct drm_device *drm, void *data, struct drm_file *file)
0299 {
0300 struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
0301 struct tegra_drm_file *fpriv = file->driver_priv;
0302 struct drm_tegra_syncpoint_allocate *args = data;
0303 struct host1x_syncpt *sp;
0304 int err;
0305
0306 if (args->id)
0307 return -EINVAL;
0308
0309 sp = host1x_syncpt_alloc(host1x, HOST1X_SYNCPT_CLIENT_MANAGED, current->comm);
0310 if (!sp)
0311 return -EBUSY;
0312
0313 args->id = host1x_syncpt_id(sp);
0314
0315 err = xa_insert(&fpriv->syncpoints, args->id, sp, GFP_KERNEL);
0316 if (err) {
0317 host1x_syncpt_put(sp);
0318 return err;
0319 }
0320
0321 return 0;
0322 }
0323
0324 int tegra_drm_ioctl_syncpoint_free(struct drm_device *drm, void *data, struct drm_file *file)
0325 {
0326 struct tegra_drm_file *fpriv = file->driver_priv;
0327 struct drm_tegra_syncpoint_allocate *args = data;
0328 struct host1x_syncpt *sp;
0329
0330 mutex_lock(&fpriv->lock);
0331 sp = xa_erase(&fpriv->syncpoints, args->id);
0332 mutex_unlock(&fpriv->lock);
0333
0334 if (!sp)
0335 return -EINVAL;
0336
0337 host1x_syncpt_put(sp);
0338
0339 return 0;
0340 }
0341
0342 int tegra_drm_ioctl_syncpoint_wait(struct drm_device *drm, void *data, struct drm_file *file)
0343 {
0344 struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
0345 struct drm_tegra_syncpoint_wait *args = data;
0346 signed long timeout_jiffies;
0347 struct host1x_syncpt *sp;
0348
0349 if (args->padding != 0)
0350 return -EINVAL;
0351
0352 sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
0353 if (!sp)
0354 return -EINVAL;
0355
0356 timeout_jiffies = drm_timeout_abs_to_jiffies(args->timeout_ns);
0357
0358 return host1x_syncpt_wait(sp, args->threshold, timeout_jiffies, &args->value);
0359 }