0001
0002
0003
0004
0005
0006 #ifndef __LINUX_HOST1X_H
0007 #define __LINUX_HOST1X_H
0008
0009 #include <linux/device.h>
0010 #include <linux/dma-direction.h>
0011 #include <linux/spinlock.h>
0012 #include <linux/types.h>
0013
0014 enum host1x_class {
0015 HOST1X_CLASS_HOST1X = 0x1,
0016 HOST1X_CLASS_GR2D = 0x51,
0017 HOST1X_CLASS_GR2D_SB = 0x52,
0018 HOST1X_CLASS_VIC = 0x5D,
0019 HOST1X_CLASS_GR3D = 0x60,
0020 HOST1X_CLASS_NVDEC = 0xF0,
0021 HOST1X_CLASS_NVDEC1 = 0xF5,
0022 };
0023
0024 struct host1x;
0025 struct host1x_client;
0026 struct iommu_group;
0027
0028 u64 host1x_get_dma_mask(struct host1x *host1x);
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040 struct host1x_bo_cache {
0041 struct list_head mappings;
0042 struct mutex lock;
0043 };
0044
0045 static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache)
0046 {
0047 INIT_LIST_HEAD(&cache->mappings);
0048 mutex_init(&cache->lock);
0049 }
0050
0051 static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache)
0052 {
0053
0054 mutex_destroy(&cache->lock);
0055 }
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066 struct host1x_client_ops {
0067 int (*early_init)(struct host1x_client *client);
0068 int (*init)(struct host1x_client *client);
0069 int (*exit)(struct host1x_client *client);
0070 int (*late_exit)(struct host1x_client *client);
0071 int (*suspend)(struct host1x_client *client);
0072 int (*resume)(struct host1x_client *client);
0073 };
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091 struct host1x_client {
0092 struct list_head list;
0093 struct device *host;
0094 struct device *dev;
0095 struct iommu_group *group;
0096
0097 const struct host1x_client_ops *ops;
0098
0099 enum host1x_class class;
0100 struct host1x_channel *channel;
0101
0102 struct host1x_syncpt **syncpts;
0103 unsigned int num_syncpts;
0104
0105 struct host1x_client *parent;
0106 unsigned int usecount;
0107 struct mutex lock;
0108
0109 struct host1x_bo_cache cache;
0110 };
0111
0112
0113
0114
0115
0116 struct host1x_bo;
0117 struct sg_table;
0118
0119 struct host1x_bo_mapping {
0120 struct kref ref;
0121 struct dma_buf_attachment *attach;
0122 enum dma_data_direction direction;
0123 struct list_head list;
0124 struct host1x_bo *bo;
0125 struct sg_table *sgt;
0126 unsigned int chunks;
0127 struct device *dev;
0128 dma_addr_t phys;
0129 size_t size;
0130
0131 struct host1x_bo_cache *cache;
0132 struct list_head entry;
0133 };
0134
0135 static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref)
0136 {
0137 return container_of(ref, struct host1x_bo_mapping, ref);
0138 }
0139
0140 struct host1x_bo_ops {
0141 struct host1x_bo *(*get)(struct host1x_bo *bo);
0142 void (*put)(struct host1x_bo *bo);
0143 struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo,
0144 enum dma_data_direction dir);
0145 void (*unpin)(struct host1x_bo_mapping *map);
0146 void *(*mmap)(struct host1x_bo *bo);
0147 void (*munmap)(struct host1x_bo *bo, void *addr);
0148 };
0149
0150 struct host1x_bo {
0151 const struct host1x_bo_ops *ops;
0152 struct list_head mappings;
0153 spinlock_t lock;
0154 };
0155
0156 static inline void host1x_bo_init(struct host1x_bo *bo,
0157 const struct host1x_bo_ops *ops)
0158 {
0159 INIT_LIST_HEAD(&bo->mappings);
0160 spin_lock_init(&bo->lock);
0161 bo->ops = ops;
0162 }
0163
0164 static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
0165 {
0166 return bo->ops->get(bo);
0167 }
0168
0169 static inline void host1x_bo_put(struct host1x_bo *bo)
0170 {
0171 bo->ops->put(bo);
0172 }
0173
0174 struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
0175 enum dma_data_direction dir,
0176 struct host1x_bo_cache *cache);
0177 void host1x_bo_unpin(struct host1x_bo_mapping *map);
0178
0179 static inline void *host1x_bo_mmap(struct host1x_bo *bo)
0180 {
0181 return bo->ops->mmap(bo);
0182 }
0183
0184 static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
0185 {
0186 bo->ops->munmap(bo, addr);
0187 }
0188
0189
0190
0191
0192
0193 #define HOST1X_SYNCPT_CLIENT_MANAGED (1 << 0)
0194 #define HOST1X_SYNCPT_HAS_BASE (1 << 1)
0195
0196 struct host1x_syncpt_base;
0197 struct host1x_syncpt;
0198 struct host1x;
0199
0200 struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id);
0201 struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id);
0202 struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp);
0203 u32 host1x_syncpt_id(struct host1x_syncpt *sp);
0204 u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
0205 u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
0206 u32 host1x_syncpt_read(struct host1x_syncpt *sp);
0207 int host1x_syncpt_incr(struct host1x_syncpt *sp);
0208 u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
0209 int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
0210 u32 *value);
0211 struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
0212 unsigned long flags);
0213 void host1x_syncpt_put(struct host1x_syncpt *sp);
0214 struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
0215 unsigned long flags,
0216 const char *name);
0217
0218 struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
0219 u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
0220
0221 void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
0222 u32 syncpt_id);
0223
0224 struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold);
0225
0226
0227
0228
0229
0230 struct host1x_channel;
0231 struct host1x_job;
0232
0233 struct host1x_channel *host1x_channel_request(struct host1x_client *client);
0234 struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
0235 void host1x_channel_stop(struct host1x_channel *channel);
0236 void host1x_channel_put(struct host1x_channel *channel);
0237 int host1x_job_submit(struct host1x_job *job);
0238
0239
0240
0241
0242
0243 #define HOST1X_RELOC_READ (1 << 0)
0244 #define HOST1X_RELOC_WRITE (1 << 1)
0245
0246 struct host1x_reloc {
0247 struct {
0248 struct host1x_bo *bo;
0249 unsigned long offset;
0250 } cmdbuf;
0251 struct {
0252 struct host1x_bo *bo;
0253 unsigned long offset;
0254 } target;
0255 unsigned long shift;
0256 unsigned long flags;
0257 };
0258
0259 struct host1x_job {
0260
0261 struct kref ref;
0262
0263
0264 struct list_head list;
0265
0266
0267 struct host1x_channel *channel;
0268
0269
0270 struct host1x_client *client;
0271
0272
0273 struct host1x_job_cmd *cmds;
0274 unsigned int num_cmds;
0275
0276
0277 struct host1x_reloc *relocs;
0278 unsigned int num_relocs;
0279 struct host1x_job_unpin_data *unpins;
0280 unsigned int num_unpins;
0281
0282 dma_addr_t *addr_phys;
0283 dma_addr_t *gather_addr_phys;
0284 dma_addr_t *reloc_addr_phys;
0285
0286
0287 struct host1x_syncpt *syncpt;
0288 u32 syncpt_incrs;
0289 u32 syncpt_end;
0290
0291
0292 void *waiter;
0293
0294
0295 unsigned int timeout;
0296
0297
0298 bool cancelled;
0299
0300
0301 unsigned int first_get;
0302 unsigned int num_slots;
0303
0304
0305 size_t gather_copy_size;
0306 dma_addr_t gather_copy;
0307 u8 *gather_copy_mapped;
0308
0309
0310 int (*is_addr_reg)(struct device *dev, u32 class, u32 reg);
0311
0312
0313 int (*is_valid_class)(u32 class);
0314
0315
0316 u32 class;
0317
0318
0319 bool serialize;
0320
0321
0322 bool syncpt_recovery;
0323
0324
0325 void (*release)(struct host1x_job *job);
0326 void *user_data;
0327
0328
0329 bool enable_firewall;
0330
0331
0332
0333 struct host1x_memory_context *memory_context;
0334
0335 u32 engine_fallback_streamid;
0336
0337 u32 engine_streamid_offset;
0338 };
0339
0340 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
0341 u32 num_cmdbufs, u32 num_relocs,
0342 bool skip_firewall);
0343 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
0344 unsigned int words, unsigned int offset);
0345 void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
0346 bool relative, u32 next_class);
0347 struct host1x_job *host1x_job_get(struct host1x_job *job);
0348 void host1x_job_put(struct host1x_job *job);
0349 int host1x_job_pin(struct host1x_job *job, struct device *dev);
0350 void host1x_job_unpin(struct host1x_job *job);
0351
0352
0353
0354
0355
0356 struct host1x_device;
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367 struct host1x_driver {
0368 struct device_driver driver;
0369
0370 const struct of_device_id *subdevs;
0371 struct list_head list;
0372
0373 int (*probe)(struct host1x_device *device);
0374 int (*remove)(struct host1x_device *device);
0375 void (*shutdown)(struct host1x_device *device);
0376 };
0377
0378 static inline struct host1x_driver *
0379 to_host1x_driver(struct device_driver *driver)
0380 {
0381 return container_of(driver, struct host1x_driver, driver);
0382 }
0383
0384 int host1x_driver_register_full(struct host1x_driver *driver,
0385 struct module *owner);
0386 void host1x_driver_unregister(struct host1x_driver *driver);
0387
0388 #define host1x_driver_register(driver) \
0389 host1x_driver_register_full(driver, THIS_MODULE)
0390
0391 struct host1x_device {
0392 struct host1x_driver *driver;
0393 struct list_head list;
0394 struct device dev;
0395
0396 struct mutex subdevs_lock;
0397 struct list_head subdevs;
0398 struct list_head active;
0399
0400 struct mutex clients_lock;
0401 struct list_head clients;
0402
0403 bool registered;
0404
0405 struct device_dma_parameters dma_parms;
0406 };
0407
0408 static inline struct host1x_device *to_host1x_device(struct device *dev)
0409 {
0410 return container_of(dev, struct host1x_device, dev);
0411 }
0412
0413 int host1x_device_init(struct host1x_device *device);
0414 int host1x_device_exit(struct host1x_device *device);
0415
0416 void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
0417 void host1x_client_exit(struct host1x_client *client);
0418
0419 #define host1x_client_init(client) \
0420 ({ \
0421 static struct lock_class_key __key; \
0422 __host1x_client_init(client, &__key); \
0423 })
0424
0425 int __host1x_client_register(struct host1x_client *client);
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435 #define host1x_client_register(client) \
0436 ({ \
0437 static struct lock_class_key __key; \
0438 __host1x_client_init(client, &__key); \
0439 __host1x_client_register(client); \
0440 })
0441
0442 int host1x_client_unregister(struct host1x_client *client);
0443
0444 int host1x_client_suspend(struct host1x_client *client);
0445 int host1x_client_resume(struct host1x_client *client);
0446
0447 struct tegra_mipi_device;
0448
0449 struct tegra_mipi_device *tegra_mipi_request(struct device *device,
0450 struct device_node *np);
0451 void tegra_mipi_free(struct tegra_mipi_device *device);
0452 int tegra_mipi_enable(struct tegra_mipi_device *device);
0453 int tegra_mipi_disable(struct tegra_mipi_device *device);
0454 int tegra_mipi_start_calibration(struct tegra_mipi_device *device);
0455 int tegra_mipi_finish_calibration(struct tegra_mipi_device *device);
0456
0457
0458
0459 struct host1x_memory_context {
0460 struct host1x *host;
0461
0462 refcount_t ref;
0463 struct pid *owner;
0464
0465 struct device dev;
0466 u64 dma_mask;
0467 u32 stream_id;
0468 };
0469
0470 #ifdef CONFIG_IOMMU_API
0471 struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
0472 struct pid *pid);
0473 void host1x_memory_context_get(struct host1x_memory_context *cd);
0474 void host1x_memory_context_put(struct host1x_memory_context *cd);
0475 #else
0476 static inline struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
0477 struct pid *pid)
0478 {
0479 return NULL;
0480 }
0481
0482 static inline void host1x_memory_context_get(struct host1x_memory_context *cd)
0483 {
0484 }
0485
0486 static inline void host1x_memory_context_put(struct host1x_memory_context *cd)
0487 {
0488 }
0489 #endif
0490
0491 #endif