0001
0002
0003
0004
0005
0006 #include <linux/device.h>
0007 #include <linux/kref.h>
0008 #include <linux/of.h>
0009 #include <linux/of_platform.h>
0010 #include <linux/pid.h>
0011 #include <linux/slab.h>
0012
0013 #include "context.h"
0014 #include "dev.h"
0015
0016 int host1x_memory_context_list_init(struct host1x *host1x)
0017 {
0018 struct host1x_memory_context_list *cdl = &host1x->context_list;
0019 struct device_node *node = host1x->dev->of_node;
0020 struct host1x_memory_context *ctx;
0021 unsigned int i;
0022 int err;
0023
0024 cdl->devs = NULL;
0025 cdl->len = 0;
0026 mutex_init(&cdl->lock);
0027
0028 err = of_property_count_u32_elems(node, "iommu-map");
0029 if (err < 0)
0030 return 0;
0031
0032 cdl->devs = kcalloc(err, sizeof(*cdl->devs), GFP_KERNEL);
0033 if (!cdl->devs)
0034 return -ENOMEM;
0035 cdl->len = err / 4;
0036
0037 for (i = 0; i < cdl->len; i++) {
0038 struct iommu_fwspec *fwspec;
0039
0040 ctx = &cdl->devs[i];
0041
0042 ctx->host = host1x;
0043
0044 device_initialize(&ctx->dev);
0045
0046
0047
0048
0049
0050 ctx->dma_mask = DMA_BIT_MASK(38);
0051 ctx->dev.dma_mask = &ctx->dma_mask;
0052 ctx->dev.coherent_dma_mask = ctx->dma_mask;
0053 dev_set_name(&ctx->dev, "host1x-ctx.%d", i);
0054 ctx->dev.bus = &host1x_context_device_bus_type;
0055 ctx->dev.parent = host1x->dev;
0056
0057 dma_set_max_seg_size(&ctx->dev, UINT_MAX);
0058
0059 err = device_add(&ctx->dev);
0060 if (err) {
0061 dev_err(host1x->dev, "could not add context device %d: %d\n", i, err);
0062 goto del_devices;
0063 }
0064
0065 err = of_dma_configure_id(&ctx->dev, node, true, &i);
0066 if (err) {
0067 dev_err(host1x->dev, "IOMMU configuration failed for context device %d: %d\n",
0068 i, err);
0069 device_del(&ctx->dev);
0070 goto del_devices;
0071 }
0072
0073 fwspec = dev_iommu_fwspec_get(&ctx->dev);
0074 if (!fwspec || !device_iommu_mapped(&ctx->dev)) {
0075 dev_err(host1x->dev, "Context device %d has no IOMMU!\n", i);
0076 device_del(&ctx->dev);
0077 goto del_devices;
0078 }
0079
0080 ctx->stream_id = fwspec->ids[0] & 0xffff;
0081 }
0082
0083 return 0;
0084
0085 del_devices:
0086 while (i--)
0087 device_del(&cdl->devs[i].dev);
0088
0089 kfree(cdl->devs);
0090 cdl->len = 0;
0091
0092 return err;
0093 }
0094
0095 void host1x_memory_context_list_free(struct host1x_memory_context_list *cdl)
0096 {
0097 unsigned int i;
0098
0099 for (i = 0; i < cdl->len; i++)
0100 device_del(&cdl->devs[i].dev);
0101
0102 kfree(cdl->devs);
0103 cdl->len = 0;
0104 }
0105
0106 struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
0107 struct pid *pid)
0108 {
0109 struct host1x_memory_context_list *cdl = &host1x->context_list;
0110 struct host1x_memory_context *free = NULL;
0111 int i;
0112
0113 if (!cdl->len)
0114 return ERR_PTR(-EOPNOTSUPP);
0115
0116 mutex_lock(&cdl->lock);
0117
0118 for (i = 0; i < cdl->len; i++) {
0119 struct host1x_memory_context *cd = &cdl->devs[i];
0120
0121 if (cd->owner == pid) {
0122 refcount_inc(&cd->ref);
0123 mutex_unlock(&cdl->lock);
0124 return cd;
0125 } else if (!cd->owner && !free) {
0126 free = cd;
0127 }
0128 }
0129
0130 if (!free) {
0131 mutex_unlock(&cdl->lock);
0132 return ERR_PTR(-EBUSY);
0133 }
0134
0135 refcount_set(&free->ref, 1);
0136 free->owner = get_pid(pid);
0137
0138 mutex_unlock(&cdl->lock);
0139
0140 return free;
0141 }
0142 EXPORT_SYMBOL_GPL(host1x_memory_context_alloc);
0143
0144 void host1x_memory_context_get(struct host1x_memory_context *cd)
0145 {
0146 refcount_inc(&cd->ref);
0147 }
0148 EXPORT_SYMBOL_GPL(host1x_memory_context_get);
0149
0150 void host1x_memory_context_put(struct host1x_memory_context *cd)
0151 {
0152 struct host1x_memory_context_list *cdl = &cd->host->context_list;
0153
0154 if (refcount_dec_and_mutex_lock(&cd->ref, &cdl->lock)) {
0155 put_pid(cd->owner);
0156 cd->owner = NULL;
0157 mutex_unlock(&cdl->lock);
0158 }
0159 }
0160 EXPORT_SYMBOL_GPL(host1x_memory_context_put);