0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #define pr_fmt(fmt) "[TTM] " fmt
0034
0035 #include <drm/ttm/ttm_device.h>
0036 #include <drm/ttm/ttm_tt.h>
0037 #include <drm/ttm/ttm_resource.h>
0038 #include <linux/agp_backend.h>
0039 #include <linux/module.h>
0040 #include <linux/slab.h>
0041 #include <linux/io.h>
0042 #include <asm/agp.h>
0043
0044 struct ttm_agp_backend {
0045 struct ttm_tt ttm;
0046 struct agp_memory *mem;
0047 struct agp_bridge_data *bridge;
0048 };
0049
0050 int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
0051 {
0052 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
0053 struct page *dummy_read_page = ttm_glob.dummy_read_page;
0054 struct agp_memory *mem;
0055 int ret, cached = ttm->caching == ttm_cached;
0056 unsigned i;
0057
0058 if (agp_be->mem)
0059 return 0;
0060
0061 mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
0062 if (unlikely(mem == NULL))
0063 return -ENOMEM;
0064
0065 mem->page_count = 0;
0066 for (i = 0; i < ttm->num_pages; i++) {
0067 struct page *page = ttm->pages[i];
0068
0069 if (!page)
0070 page = dummy_read_page;
0071
0072 mem->pages[mem->page_count++] = page;
0073 }
0074 agp_be->mem = mem;
0075
0076 mem->is_flushed = 1;
0077 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
0078
0079 ret = agp_bind_memory(mem, bo_mem->start);
0080 if (ret)
0081 pr_err("AGP Bind memory failed\n");
0082
0083 return ret;
0084 }
0085 EXPORT_SYMBOL(ttm_agp_bind);
0086
0087 void ttm_agp_unbind(struct ttm_tt *ttm)
0088 {
0089 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
0090
0091 if (agp_be->mem) {
0092 if (agp_be->mem->is_bound) {
0093 agp_unbind_memory(agp_be->mem);
0094 return;
0095 }
0096 agp_free_memory(agp_be->mem);
0097 agp_be->mem = NULL;
0098 }
0099 }
0100 EXPORT_SYMBOL(ttm_agp_unbind);
0101
0102 bool ttm_agp_is_bound(struct ttm_tt *ttm)
0103 {
0104 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
0105
0106 if (!ttm)
0107 return false;
0108
0109 return (agp_be->mem != NULL);
0110 }
0111 EXPORT_SYMBOL(ttm_agp_is_bound);
0112
0113 void ttm_agp_destroy(struct ttm_tt *ttm)
0114 {
0115 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
0116
0117 if (agp_be->mem)
0118 ttm_agp_unbind(ttm);
0119 ttm_tt_fini(ttm);
0120 kfree(agp_be);
0121 }
0122 EXPORT_SYMBOL(ttm_agp_destroy);
0123
0124 struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
0125 struct agp_bridge_data *bridge,
0126 uint32_t page_flags)
0127 {
0128 struct ttm_agp_backend *agp_be;
0129
0130 agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
0131 if (!agp_be)
0132 return NULL;
0133
0134 agp_be->mem = NULL;
0135 agp_be->bridge = bridge;
0136
0137 if (ttm_tt_init(&agp_be->ttm, bo, page_flags, ttm_write_combined, 0)) {
0138 kfree(agp_be);
0139 return NULL;
0140 }
0141
0142 return &agp_be->ttm;
0143 }
0144 EXPORT_SYMBOL(ttm_agp_tt_create);