0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #include "vmwgfx_drv.h"
0032 #include <drm/ttm/ttm_bo_driver.h>
0033 #include <drm/ttm/ttm_placement.h>
0034 #include <linux/idr.h>
0035 #include <linux/spinlock.h>
0036 #include <linux/kernel.h>
0037
0038 struct vmwgfx_gmrid_man {
0039 struct ttm_resource_manager manager;
0040 spinlock_t lock;
0041 struct ida gmr_ida;
0042 uint32_t max_gmr_ids;
0043 uint32_t max_gmr_pages;
0044 uint32_t used_gmr_pages;
0045 uint8_t type;
0046 };
0047
0048 static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *man)
0049 {
0050 return container_of(man, struct vmwgfx_gmrid_man, manager);
0051 }
0052
0053 static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
0054 struct ttm_buffer_object *bo,
0055 const struct ttm_place *place,
0056 struct ttm_resource **res)
0057 {
0058 struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
0059 int id;
0060
0061 *res = kmalloc(sizeof(**res), GFP_KERNEL);
0062 if (!*res)
0063 return -ENOMEM;
0064
0065 ttm_resource_init(bo, place, *res);
0066
0067 id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
0068 if (id < 0)
0069 return id;
0070
0071 spin_lock(&gman->lock);
0072
0073 if (gman->max_gmr_pages > 0) {
0074 gman->used_gmr_pages += (*res)->num_pages;
0075
0076
0077
0078
0079
0080
0081
0082 if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) {
0083 const unsigned long max_graphics_pages = totalram_pages() / 2;
0084 uint32_t new_max_pages = 0;
0085
0086 DRM_WARN("vmwgfx: mob memory overflow. Consider increasing guest RAM and graphicsMemory.\n");
0087 vmw_host_printf("vmwgfx, warning: mob memory overflow. Consider increasing guest RAM and graphicsMemory.\n");
0088
0089 if (gman->max_gmr_pages > (max_graphics_pages / 2)) {
0090 DRM_WARN("vmwgfx: guest requires more than half of RAM for graphics.\n");
0091 new_max_pages = max_graphics_pages;
0092 } else
0093 new_max_pages = gman->max_gmr_pages * 2;
0094 if (new_max_pages > gman->max_gmr_pages && new_max_pages >= gman->used_gmr_pages) {
0095 DRM_WARN("vmwgfx: increasing guest mob limits to %u kB.\n",
0096 ((new_max_pages) << (PAGE_SHIFT - 10)));
0097
0098 gman->max_gmr_pages = new_max_pages;
0099 } else {
0100 char buf[256];
0101 snprintf(buf, sizeof(buf),
0102 "vmwgfx, error: guest graphics is out of memory (mob limit at: %ukB).\n",
0103 ((gman->max_gmr_pages) << (PAGE_SHIFT - 10)));
0104 vmw_host_printf(buf);
0105 DRM_WARN("%s", buf);
0106 goto nospace;
0107 }
0108 }
0109 }
0110
0111 (*res)->start = id;
0112
0113 spin_unlock(&gman->lock);
0114 return 0;
0115
0116 nospace:
0117 gman->used_gmr_pages -= (*res)->num_pages;
0118 spin_unlock(&gman->lock);
0119 ida_free(&gman->gmr_ida, id);
0120 ttm_resource_fini(man, *res);
0121 kfree(*res);
0122 return -ENOSPC;
0123 }
0124
0125 static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
0126 struct ttm_resource *res)
0127 {
0128 struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
0129
0130 ida_free(&gman->gmr_ida, res->start);
0131 spin_lock(&gman->lock);
0132 gman->used_gmr_pages -= res->num_pages;
0133 spin_unlock(&gman->lock);
0134 ttm_resource_fini(man, res);
0135 kfree(res);
0136 }
0137
0138 static void vmw_gmrid_man_debug(struct ttm_resource_manager *man,
0139 struct drm_printer *printer)
0140 {
0141 struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
0142
0143 BUG_ON(gman->type != VMW_PL_GMR && gman->type != VMW_PL_MOB);
0144
0145 drm_printf(printer, "%s's used: %u pages, max: %u pages, %u id's\n",
0146 (gman->type == VMW_PL_MOB) ? "Mob" : "GMR",
0147 gman->used_gmr_pages, gman->max_gmr_pages, gman->max_gmr_ids);
0148 }
0149
0150 static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
0151
0152 int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
0153 {
0154 struct ttm_resource_manager *man;
0155 struct vmwgfx_gmrid_man *gman =
0156 kzalloc(sizeof(*gman), GFP_KERNEL);
0157
0158 if (unlikely(!gman))
0159 return -ENOMEM;
0160
0161 man = &gman->manager;
0162
0163 man->func = &vmw_gmrid_manager_func;
0164 man->use_tt = true;
0165 ttm_resource_manager_init(man, &dev_priv->bdev, 0);
0166 spin_lock_init(&gman->lock);
0167 gman->used_gmr_pages = 0;
0168 ida_init(&gman->gmr_ida);
0169 gman->type = type;
0170
0171 switch (type) {
0172 case VMW_PL_GMR:
0173 gman->max_gmr_ids = dev_priv->max_gmr_ids;
0174 gman->max_gmr_pages = dev_priv->max_gmr_pages;
0175 break;
0176 case VMW_PL_MOB:
0177 gman->max_gmr_ids = VMWGFX_NUM_MOB;
0178 gman->max_gmr_pages = dev_priv->max_mob_pages;
0179 break;
0180 default:
0181 BUG();
0182 }
0183 ttm_set_driver_manager(&dev_priv->bdev, type, &gman->manager);
0184 ttm_resource_manager_set_used(man, true);
0185 return 0;
0186 }
0187
0188 void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type)
0189 {
0190 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, type);
0191 struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
0192
0193 ttm_resource_manager_set_used(man, false);
0194
0195 ttm_resource_manager_evict_all(&dev_priv->bdev, man);
0196
0197 ttm_resource_manager_cleanup(man);
0198
0199 ttm_set_driver_manager(&dev_priv->bdev, type, NULL);
0200 ida_destroy(&gman->gmr_ida);
0201 kfree(gman);
0202
0203 }
0204
0205 static const struct ttm_resource_manager_func vmw_gmrid_manager_func = {
0206 .alloc = vmw_gmrid_man_get_node,
0207 .free = vmw_gmrid_man_put_node,
0208 .debug = vmw_gmrid_man_debug
0209 };