0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027 #include "vmwgfx_drv.h"
0028
0029
0030
0031
0032
0033
0034
0035 enum vmw_bo_dirty_method {
0036 VMW_BO_DIRTY_PAGETABLE,
0037 VMW_BO_DIRTY_MKWRITE,
0038 };
0039
0040
0041
0042
0043
0044
0045
0046 #define VMW_DIRTY_NUM_CHANGE_TRIGGERS 2
0047
0048
0049 #define VMW_DIRTY_PERCENTAGE 10
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063 struct vmw_bo_dirty {
0064 unsigned long start;
0065 unsigned long end;
0066 enum vmw_bo_dirty_method method;
0067 unsigned int change_count;
0068 unsigned int ref_count;
0069 unsigned long bitmap_size;
0070 unsigned long bitmap[];
0071 };
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081 static void vmw_bo_dirty_scan_pagetable(struct vmw_buffer_object *vbo)
0082 {
0083 struct vmw_bo_dirty *dirty = vbo->dirty;
0084 pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node);
0085 struct address_space *mapping = vbo->base.bdev->dev_mapping;
0086 pgoff_t num_marked;
0087
0088 num_marked = clean_record_shared_mapping_range
0089 (mapping,
0090 offset, dirty->bitmap_size,
0091 offset, &dirty->bitmap[0],
0092 &dirty->start, &dirty->end);
0093 if (num_marked == 0)
0094 dirty->change_count++;
0095 else
0096 dirty->change_count = 0;
0097
0098 if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
0099 dirty->change_count = 0;
0100 dirty->method = VMW_BO_DIRTY_MKWRITE;
0101 wp_shared_mapping_range(mapping,
0102 offset, dirty->bitmap_size);
0103 clean_record_shared_mapping_range(mapping,
0104 offset, dirty->bitmap_size,
0105 offset, &dirty->bitmap[0],
0106 &dirty->start, &dirty->end);
0107 }
0108 }
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119 static void vmw_bo_dirty_scan_mkwrite(struct vmw_buffer_object *vbo)
0120 {
0121 struct vmw_bo_dirty *dirty = vbo->dirty;
0122 unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
0123 struct address_space *mapping = vbo->base.bdev->dev_mapping;
0124 pgoff_t num_marked;
0125
0126 if (dirty->end <= dirty->start)
0127 return;
0128
0129 num_marked = wp_shared_mapping_range(vbo->base.bdev->dev_mapping,
0130 dirty->start + offset,
0131 dirty->end - dirty->start);
0132
0133 if (100UL * num_marked / dirty->bitmap_size >
0134 VMW_DIRTY_PERCENTAGE) {
0135 dirty->change_count++;
0136 } else {
0137 dirty->change_count = 0;
0138 }
0139
0140 if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
0141 pgoff_t start = 0;
0142 pgoff_t end = dirty->bitmap_size;
0143
0144 dirty->method = VMW_BO_DIRTY_PAGETABLE;
0145 clean_record_shared_mapping_range(mapping, offset, end, offset,
0146 &dirty->bitmap[0],
0147 &start, &end);
0148 bitmap_clear(&dirty->bitmap[0], 0, dirty->bitmap_size);
0149 if (dirty->start < dirty->end)
0150 bitmap_set(&dirty->bitmap[0], dirty->start,
0151 dirty->end - dirty->start);
0152 dirty->change_count = 0;
0153 }
0154 }
0155
0156
0157
0158
0159
0160
0161
0162
0163 void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo)
0164 {
0165 struct vmw_bo_dirty *dirty = vbo->dirty;
0166
0167 if (dirty->method == VMW_BO_DIRTY_PAGETABLE)
0168 vmw_bo_dirty_scan_pagetable(vbo);
0169 else
0170 vmw_bo_dirty_scan_mkwrite(vbo);
0171 }
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184 static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo,
0185 pgoff_t start, pgoff_t end)
0186 {
0187 struct vmw_bo_dirty *dirty = vbo->dirty;
0188 unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
0189 struct address_space *mapping = vbo->base.bdev->dev_mapping;
0190
0191 if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end)
0192 return;
0193
0194 wp_shared_mapping_range(mapping, start + offset, end - start);
0195 clean_record_shared_mapping_range(mapping, start + offset,
0196 end - start, offset,
0197 &dirty->bitmap[0], &dirty->start,
0198 &dirty->end);
0199 }
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209 void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
0210 pgoff_t start, pgoff_t end)
0211 {
0212 unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
0213 struct address_space *mapping = vbo->base.bdev->dev_mapping;
0214
0215 vmw_bo_dirty_pre_unmap(vbo, start, end);
0216 unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT,
0217 (loff_t) (end - start) << PAGE_SHIFT);
0218 }
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230 int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
0231 {
0232 struct vmw_bo_dirty *dirty = vbo->dirty;
0233 pgoff_t num_pages = vbo->base.resource->num_pages;
0234 size_t size;
0235 int ret;
0236
0237 if (dirty) {
0238 dirty->ref_count++;
0239 return 0;
0240 }
0241
0242 size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long);
0243 dirty = kvzalloc(size, GFP_KERNEL);
0244 if (!dirty) {
0245 ret = -ENOMEM;
0246 goto out_no_dirty;
0247 }
0248
0249 dirty->bitmap_size = num_pages;
0250 dirty->start = dirty->bitmap_size;
0251 dirty->end = 0;
0252 dirty->ref_count = 1;
0253 if (num_pages < PAGE_SIZE / sizeof(pte_t)) {
0254 dirty->method = VMW_BO_DIRTY_PAGETABLE;
0255 } else {
0256 struct address_space *mapping = vbo->base.bdev->dev_mapping;
0257 pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node);
0258
0259 dirty->method = VMW_BO_DIRTY_MKWRITE;
0260
0261
0262 wp_shared_mapping_range(mapping, offset, num_pages);
0263 clean_record_shared_mapping_range(mapping, offset, num_pages,
0264 offset,
0265 &dirty->bitmap[0],
0266 &dirty->start, &dirty->end);
0267 }
0268
0269 vbo->dirty = dirty;
0270
0271 return 0;
0272
0273 out_no_dirty:
0274 return ret;
0275 }
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287 void vmw_bo_dirty_release(struct vmw_buffer_object *vbo)
0288 {
0289 struct vmw_bo_dirty *dirty = vbo->dirty;
0290
0291 if (dirty && --dirty->ref_count == 0) {
0292 kvfree(dirty);
0293 vbo->dirty = NULL;
0294 }
0295 }
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307 void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
0308 {
0309 struct vmw_buffer_object *vbo = res->backup;
0310 struct vmw_bo_dirty *dirty = vbo->dirty;
0311 pgoff_t start, cur, end;
0312 unsigned long res_start = res->backup_offset;
0313 unsigned long res_end = res->backup_offset + res->backup_size;
0314
0315 WARN_ON_ONCE(res_start & ~PAGE_MASK);
0316 res_start >>= PAGE_SHIFT;
0317 res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
0318
0319 if (res_start >= dirty->end || res_end <= dirty->start)
0320 return;
0321
0322 cur = max(res_start, dirty->start);
0323 res_end = max(res_end, dirty->end);
0324 while (cur < res_end) {
0325 unsigned long num;
0326
0327 start = find_next_bit(&dirty->bitmap[0], res_end, cur);
0328 if (start >= res_end)
0329 break;
0330
0331 end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1);
0332 cur = end + 1;
0333 num = end - start;
0334 bitmap_clear(&dirty->bitmap[0], start, num);
0335 vmw_resource_dirty_update(res, start, end);
0336 }
0337
0338 if (res_start <= dirty->start && res_end > dirty->start)
0339 dirty->start = res_end;
0340 if (res_start < dirty->end && res_end >= dirty->end)
0341 dirty->end = res_start;
0342 }
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352 void vmw_bo_dirty_clear_res(struct vmw_resource *res)
0353 {
0354 unsigned long res_start = res->backup_offset;
0355 unsigned long res_end = res->backup_offset + res->backup_size;
0356 struct vmw_buffer_object *vbo = res->backup;
0357 struct vmw_bo_dirty *dirty = vbo->dirty;
0358
0359 res_start >>= PAGE_SHIFT;
0360 res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
0361
0362 if (res_start >= dirty->end || res_end <= dirty->start)
0363 return;
0364
0365 res_start = max(res_start, dirty->start);
0366 res_end = min(res_end, dirty->end);
0367 bitmap_clear(&dirty->bitmap[0], res_start, res_end - res_start);
0368
0369 if (res_start <= dirty->start && res_end > dirty->start)
0370 dirty->start = res_end;
0371 if (res_start < dirty->end && res_end >= dirty->end)
0372 dirty->end = res_start;
0373 }
0374
0375 vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
0376 {
0377 struct vm_area_struct *vma = vmf->vma;
0378 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
0379 vma->vm_private_data;
0380 vm_fault_t ret;
0381 unsigned long page_offset;
0382 unsigned int save_flags;
0383 struct vmw_buffer_object *vbo =
0384 container_of(bo, typeof(*vbo), base);
0385
0386
0387
0388
0389
0390 save_flags = vmf->flags;
0391 vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY;
0392 ret = ttm_bo_vm_reserve(bo, vmf);
0393 vmf->flags = save_flags;
0394 if (ret)
0395 return ret;
0396
0397 page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
0398 if (unlikely(page_offset >= bo->resource->num_pages)) {
0399 ret = VM_FAULT_SIGBUS;
0400 goto out_unlock;
0401 }
0402
0403 if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE &&
0404 !test_bit(page_offset, &vbo->dirty->bitmap[0])) {
0405 struct vmw_bo_dirty *dirty = vbo->dirty;
0406
0407 __set_bit(page_offset, &dirty->bitmap[0]);
0408 dirty->start = min(dirty->start, page_offset);
0409 dirty->end = max(dirty->end, page_offset + 1);
0410 }
0411
0412 out_unlock:
0413 dma_resv_unlock(bo->base.resv);
0414 return ret;
0415 }
0416
0417 vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
0418 {
0419 struct vm_area_struct *vma = vmf->vma;
0420 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
0421 vma->vm_private_data;
0422 struct vmw_buffer_object *vbo =
0423 container_of(bo, struct vmw_buffer_object, base);
0424 pgoff_t num_prefault;
0425 pgprot_t prot;
0426 vm_fault_t ret;
0427
0428 ret = ttm_bo_vm_reserve(bo, vmf);
0429 if (ret)
0430 return ret;
0431
0432 num_prefault = (vma->vm_flags & VM_RAND_READ) ? 1 :
0433 TTM_BO_VM_NUM_PREFAULT;
0434
0435 if (vbo->dirty) {
0436 pgoff_t allowed_prefault;
0437 unsigned long page_offset;
0438
0439 page_offset = vmf->pgoff -
0440 drm_vma_node_start(&bo->base.vma_node);
0441 if (page_offset >= bo->resource->num_pages ||
0442 vmw_resources_clean(vbo, page_offset,
0443 page_offset + PAGE_SIZE,
0444 &allowed_prefault)) {
0445 ret = VM_FAULT_SIGBUS;
0446 goto out_unlock;
0447 }
0448
0449 num_prefault = min(num_prefault, allowed_prefault);
0450 }
0451
0452
0453
0454
0455
0456
0457 if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE)
0458 prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
0459 else
0460 prot = vm_get_page_prot(vma->vm_flags);
0461
0462 ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault);
0463 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
0464 return ret;
0465
0466 out_unlock:
0467 dma_resv_unlock(bo->base.resv);
0468
0469 return ret;
0470 }