0001
0002
0003
0004
0005
0006 #include <linux/highmem.h>
0007
0008 #include "i915_drv.h"
0009 #include "i915_reg.h"
0010 #include "i915_scatterlist.h"
0011 #include "i915_pvinfo.h"
0012 #include "i915_vgpu.h"
0013 #include "intel_gt_regs.h"
0014 #include "intel_mchbar_regs.h"
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 #define pipelined 0
0047
0048 static struct drm_i915_private *fence_to_i915(struct i915_fence_reg *fence)
0049 {
0050 return fence->ggtt->vm.i915;
0051 }
0052
0053 static struct intel_uncore *fence_to_uncore(struct i915_fence_reg *fence)
0054 {
0055 return fence->ggtt->vm.gt->uncore;
0056 }
0057
0058 static void i965_write_fence_reg(struct i915_fence_reg *fence)
0059 {
0060 i915_reg_t fence_reg_lo, fence_reg_hi;
0061 int fence_pitch_shift;
0062 u64 val;
0063
0064 if (GRAPHICS_VER(fence_to_i915(fence)) >= 6) {
0065 fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
0066 fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
0067 fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
0068
0069 } else {
0070 fence_reg_lo = FENCE_REG_965_LO(fence->id);
0071 fence_reg_hi = FENCE_REG_965_HI(fence->id);
0072 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
0073 }
0074
0075 val = 0;
0076 if (fence->tiling) {
0077 unsigned int stride = fence->stride;
0078
0079 GEM_BUG_ON(!IS_ALIGNED(stride, 128));
0080
0081 val = fence->start + fence->size - I965_FENCE_PAGE;
0082 val <<= 32;
0083 val |= fence->start;
0084 val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
0085 if (fence->tiling == I915_TILING_Y)
0086 val |= BIT(I965_FENCE_TILING_Y_SHIFT);
0087 val |= I965_FENCE_REG_VALID;
0088 }
0089
0090 if (!pipelined) {
0091 struct intel_uncore *uncore = fence_to_uncore(fence);
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103 intel_uncore_write_fw(uncore, fence_reg_lo, 0);
0104 intel_uncore_posting_read_fw(uncore, fence_reg_lo);
0105
0106 intel_uncore_write_fw(uncore, fence_reg_hi, upper_32_bits(val));
0107 intel_uncore_write_fw(uncore, fence_reg_lo, lower_32_bits(val));
0108 intel_uncore_posting_read_fw(uncore, fence_reg_lo);
0109 }
0110 }
0111
0112 static void i915_write_fence_reg(struct i915_fence_reg *fence)
0113 {
0114 u32 val;
0115
0116 val = 0;
0117 if (fence->tiling) {
0118 unsigned int stride = fence->stride;
0119 unsigned int tiling = fence->tiling;
0120 bool is_y_tiled = tiling == I915_TILING_Y;
0121
0122 if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence_to_i915(fence)))
0123 stride /= 128;
0124 else
0125 stride /= 512;
0126 GEM_BUG_ON(!is_power_of_2(stride));
0127
0128 val = fence->start;
0129 if (is_y_tiled)
0130 val |= BIT(I830_FENCE_TILING_Y_SHIFT);
0131 val |= I915_FENCE_SIZE_BITS(fence->size);
0132 val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
0133
0134 val |= I830_FENCE_REG_VALID;
0135 }
0136
0137 if (!pipelined) {
0138 struct intel_uncore *uncore = fence_to_uncore(fence);
0139 i915_reg_t reg = FENCE_REG(fence->id);
0140
0141 intel_uncore_write_fw(uncore, reg, val);
0142 intel_uncore_posting_read_fw(uncore, reg);
0143 }
0144 }
0145
0146 static void i830_write_fence_reg(struct i915_fence_reg *fence)
0147 {
0148 u32 val;
0149
0150 val = 0;
0151 if (fence->tiling) {
0152 unsigned int stride = fence->stride;
0153
0154 val = fence->start;
0155 if (fence->tiling == I915_TILING_Y)
0156 val |= BIT(I830_FENCE_TILING_Y_SHIFT);
0157 val |= I830_FENCE_SIZE_BITS(fence->size);
0158 val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
0159 val |= I830_FENCE_REG_VALID;
0160 }
0161
0162 if (!pipelined) {
0163 struct intel_uncore *uncore = fence_to_uncore(fence);
0164 i915_reg_t reg = FENCE_REG(fence->id);
0165
0166 intel_uncore_write_fw(uncore, reg, val);
0167 intel_uncore_posting_read_fw(uncore, reg);
0168 }
0169 }
0170
0171 static void fence_write(struct i915_fence_reg *fence)
0172 {
0173 struct drm_i915_private *i915 = fence_to_i915(fence);
0174
0175
0176
0177
0178
0179
0180
0181 if (GRAPHICS_VER(i915) == 2)
0182 i830_write_fence_reg(fence);
0183 else if (GRAPHICS_VER(i915) == 3)
0184 i915_write_fence_reg(fence);
0185 else
0186 i965_write_fence_reg(fence);
0187
0188
0189
0190
0191
0192 }
0193
0194 static bool gpu_uses_fence_registers(struct i915_fence_reg *fence)
0195 {
0196 return GRAPHICS_VER(fence_to_i915(fence)) < 4;
0197 }
0198
0199 static int fence_update(struct i915_fence_reg *fence,
0200 struct i915_vma *vma)
0201 {
0202 struct i915_ggtt *ggtt = fence->ggtt;
0203 struct intel_uncore *uncore = fence_to_uncore(fence);
0204 intel_wakeref_t wakeref;
0205 struct i915_vma *old;
0206 int ret;
0207
0208 fence->tiling = 0;
0209 if (vma) {
0210 GEM_BUG_ON(!i915_gem_object_get_stride(vma->obj) ||
0211 !i915_gem_object_get_tiling(vma->obj));
0212
0213 if (!i915_vma_is_map_and_fenceable(vma))
0214 return -EINVAL;
0215
0216 if (gpu_uses_fence_registers(fence)) {
0217
0218 ret = i915_vma_sync(vma);
0219 if (ret)
0220 return ret;
0221 }
0222
0223 fence->start = vma->node.start;
0224 fence->size = vma->fence_size;
0225 fence->stride = i915_gem_object_get_stride(vma->obj);
0226 fence->tiling = i915_gem_object_get_tiling(vma->obj);
0227 }
0228 WRITE_ONCE(fence->dirty, false);
0229
0230 old = xchg(&fence->vma, NULL);
0231 if (old) {
0232
0233 ret = i915_active_wait(&fence->active);
0234 if (ret) {
0235 fence->vma = old;
0236 return ret;
0237 }
0238
0239 i915_vma_flush_writes(old);
0240
0241
0242
0243
0244
0245 if (old != vma) {
0246 GEM_BUG_ON(old->fence != fence);
0247 i915_vma_revoke_mmap(old);
0248 old->fence = NULL;
0249 }
0250
0251 list_move(&fence->link, &ggtt->fence_list);
0252 }
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264 wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm);
0265 if (!wakeref) {
0266 GEM_BUG_ON(vma);
0267 return 0;
0268 }
0269
0270 WRITE_ONCE(fence->vma, vma);
0271 fence_write(fence);
0272
0273 if (vma) {
0274 vma->fence = fence;
0275 list_move_tail(&fence->link, &ggtt->fence_list);
0276 }
0277
0278 intel_runtime_pm_put(uncore->rpm, wakeref);
0279 return 0;
0280 }
0281
0282
0283
0284
0285
0286
0287
0288
0289 void i915_vma_revoke_fence(struct i915_vma *vma)
0290 {
0291 struct i915_fence_reg *fence = vma->fence;
0292 intel_wakeref_t wakeref;
0293
0294 lockdep_assert_held(&vma->vm->mutex);
0295 if (!fence)
0296 return;
0297
0298 GEM_BUG_ON(fence->vma != vma);
0299 GEM_BUG_ON(!i915_active_is_idle(&fence->active));
0300 GEM_BUG_ON(atomic_read(&fence->pin_count));
0301
0302 fence->tiling = 0;
0303 WRITE_ONCE(fence->vma, NULL);
0304 vma->fence = NULL;
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317 with_intel_runtime_pm_if_active(fence_to_uncore(fence)->rpm, wakeref)
0318 fence_write(fence);
0319 }
0320
0321 static bool fence_is_active(const struct i915_fence_reg *fence)
0322 {
0323 return fence->vma && i915_vma_is_active(fence->vma);
0324 }
0325
0326 static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
0327 {
0328 struct i915_fence_reg *active = NULL;
0329 struct i915_fence_reg *fence, *fn;
0330
0331 list_for_each_entry_safe(fence, fn, &ggtt->fence_list, link) {
0332 GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
0333
0334 if (fence == active)
0335 active = ERR_PTR(-EAGAIN);
0336
0337
0338 if (active != ERR_PTR(-EAGAIN) && fence_is_active(fence)) {
0339 if (!active)
0340 active = fence;
0341
0342 list_move_tail(&fence->link, &ggtt->fence_list);
0343 continue;
0344 }
0345
0346 if (atomic_read(&fence->pin_count))
0347 continue;
0348
0349 return fence;
0350 }
0351
0352
0353 if (intel_has_pending_fb_unpin(ggtt->vm.i915))
0354 return ERR_PTR(-EAGAIN);
0355
0356 return ERR_PTR(-ENOBUFS);
0357 }
0358
0359 int __i915_vma_pin_fence(struct i915_vma *vma)
0360 {
0361 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
0362 struct i915_fence_reg *fence;
0363 struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
0364 int err;
0365
0366 lockdep_assert_held(&vma->vm->mutex);
0367
0368
0369 if (vma->fence) {
0370 fence = vma->fence;
0371 GEM_BUG_ON(fence->vma != vma);
0372 atomic_inc(&fence->pin_count);
0373 if (!fence->dirty) {
0374 list_move_tail(&fence->link, &ggtt->fence_list);
0375 return 0;
0376 }
0377 } else if (set) {
0378 fence = fence_find(ggtt);
0379 if (IS_ERR(fence))
0380 return PTR_ERR(fence);
0381
0382 GEM_BUG_ON(atomic_read(&fence->pin_count));
0383 atomic_inc(&fence->pin_count);
0384 } else {
0385 return 0;
0386 }
0387
0388 err = fence_update(fence, set);
0389 if (err)
0390 goto out_unpin;
0391
0392 GEM_BUG_ON(fence->vma != set);
0393 GEM_BUG_ON(vma->fence != (set ? fence : NULL));
0394
0395 if (set)
0396 return 0;
0397
0398 out_unpin:
0399 atomic_dec(&fence->pin_count);
0400 return err;
0401 }
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421 int i915_vma_pin_fence(struct i915_vma *vma)
0422 {
0423 int err;
0424
0425 if (!vma->fence && !i915_gem_object_is_tiled(vma->obj))
0426 return 0;
0427
0428
0429
0430
0431
0432 assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
0433 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
0434
0435 err = mutex_lock_interruptible(&vma->vm->mutex);
0436 if (err)
0437 return err;
0438
0439 err = __i915_vma_pin_fence(vma);
0440 mutex_unlock(&vma->vm->mutex);
0441
0442 return err;
0443 }
0444
0445
0446
0447
0448
0449
0450
0451
0452 struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt)
0453 {
0454 struct i915_fence_reg *fence;
0455 int count;
0456 int ret;
0457
0458 lockdep_assert_held(&ggtt->vm.mutex);
0459
0460
0461 count = 0;
0462 list_for_each_entry(fence, &ggtt->fence_list, link)
0463 count += !atomic_read(&fence->pin_count);
0464 if (count <= 1)
0465 return ERR_PTR(-ENOSPC);
0466
0467 fence = fence_find(ggtt);
0468 if (IS_ERR(fence))
0469 return fence;
0470
0471 if (fence->vma) {
0472
0473 ret = fence_update(fence, NULL);
0474 if (ret)
0475 return ERR_PTR(ret);
0476 }
0477
0478 list_del(&fence->link);
0479
0480 return fence;
0481 }
0482
0483
0484
0485
0486
0487
0488
0489 void i915_unreserve_fence(struct i915_fence_reg *fence)
0490 {
0491 struct i915_ggtt *ggtt = fence->ggtt;
0492
0493 lockdep_assert_held(&ggtt->vm.mutex);
0494
0495 list_add(&fence->link, &ggtt->fence_list);
0496 }
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506 void intel_ggtt_restore_fences(struct i915_ggtt *ggtt)
0507 {
0508 int i;
0509
0510 for (i = 0; i < ggtt->num_fences; i++)
0511 fence_write(&ggtt->fence_regs[i]);
0512 }
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569 static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
0570 {
0571 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
0572 struct drm_i915_private *i915 = ggtt->vm.i915;
0573 u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
0574 u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
0575
0576 if (GRAPHICS_VER(i915) >= 8 || IS_VALLEYVIEW(i915)) {
0577
0578
0579
0580
0581
0582
0583
0584 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
0585 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
0586 } else if (GRAPHICS_VER(i915) >= 6) {
0587 if (i915->preserve_bios_swizzle) {
0588 if (intel_uncore_read(uncore, DISP_ARB_CTL) &
0589 DISP_TILE_SURFACE_SWIZZLING) {
0590 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
0591 swizzle_y = I915_BIT_6_SWIZZLE_9;
0592 } else {
0593 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
0594 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
0595 }
0596 } else {
0597 u32 dimm_c0, dimm_c1;
0598
0599 dimm_c0 = intel_uncore_read(uncore, MAD_DIMM_C0);
0600 dimm_c1 = intel_uncore_read(uncore, MAD_DIMM_C1);
0601 dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
0602 dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
0603
0604
0605
0606
0607
0608
0609
0610 if (dimm_c0 == dimm_c1) {
0611 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
0612 swizzle_y = I915_BIT_6_SWIZZLE_9;
0613 } else {
0614 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
0615 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
0616 }
0617 }
0618 } else if (GRAPHICS_VER(i915) == 5) {
0619
0620
0621
0622
0623 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
0624 swizzle_y = I915_BIT_6_SWIZZLE_9;
0625 } else if (GRAPHICS_VER(i915) == 2) {
0626
0627
0628
0629
0630 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
0631 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
0632 } else if (IS_G45(i915) || IS_I965G(i915) || IS_G33(i915)) {
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660 if (intel_uncore_read16(uncore, C0DRB3_BW) ==
0661 intel_uncore_read16(uncore, C1DRB3_BW)) {
0662 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
0663 swizzle_y = I915_BIT_6_SWIZZLE_9;
0664 }
0665 } else {
0666 u32 dcc = intel_uncore_read(uncore, DCC);
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677 switch (dcc & DCC_ADDRESSING_MODE_MASK) {
0678 case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
0679 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
0680 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
0681 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
0682 break;
0683 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
0684 if (dcc & DCC_CHANNEL_XOR_DISABLE) {
0685
0686
0687
0688
0689 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
0690 swizzle_y = I915_BIT_6_SWIZZLE_9;
0691 } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
0692
0693 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
0694 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
0695 } else {
0696
0697 swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
0698 swizzle_y = I915_BIT_6_SWIZZLE_9_17;
0699 }
0700 break;
0701 }
0702
0703
0704 if (GRAPHICS_VER(i915) == 4 &&
0705 !(intel_uncore_read(uncore, DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
0706 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
0707 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
0708 }
0709
0710 if (dcc == 0xffffffff) {
0711 drm_err(&i915->drm, "Couldn't read from MCHBAR. "
0712 "Disabling tiling.\n");
0713 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
0714 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
0715 }
0716 }
0717
0718 if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
0719 swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730 i915->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
0731 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
0732 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
0733 }
0734
0735 to_gt(i915)->ggtt->bit_6_swizzle_x = swizzle_x;
0736 to_gt(i915)->ggtt->bit_6_swizzle_y = swizzle_y;
0737 }
0738
0739
0740
0741
0742
0743
0744 static void swizzle_page(struct page *page)
0745 {
0746 char temp[64];
0747 char *vaddr;
0748 int i;
0749
0750 vaddr = kmap(page);
0751
0752 for (i = 0; i < PAGE_SIZE; i += 128) {
0753 memcpy(temp, &vaddr[i], 64);
0754 memcpy(&vaddr[i], &vaddr[i + 64], 64);
0755 memcpy(&vaddr[i + 64], temp, 64);
0756 }
0757
0758 kunmap(page);
0759 }
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774 void
0775 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
0776 struct sg_table *pages)
0777 {
0778 struct sgt_iter sgt_iter;
0779 struct page *page;
0780 int i;
0781
0782 if (obj->bit_17 == NULL)
0783 return;
0784
0785 i = 0;
0786 for_each_sgt_page(page, sgt_iter, pages) {
0787 char new_bit_17 = page_to_phys(page) >> 17;
0788
0789 if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
0790 swizzle_page(page);
0791 set_page_dirty(page);
0792 }
0793
0794 i++;
0795 }
0796 }
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807 void
0808 i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
0809 struct sg_table *pages)
0810 {
0811 const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
0812 struct sgt_iter sgt_iter;
0813 struct page *page;
0814 int i;
0815
0816 if (obj->bit_17 == NULL) {
0817 obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
0818 if (obj->bit_17 == NULL) {
0819 DRM_ERROR("Failed to allocate memory for bit 17 "
0820 "record\n");
0821 return;
0822 }
0823 }
0824
0825 i = 0;
0826
0827 for_each_sgt_page(page, sgt_iter, pages) {
0828 if (page_to_phys(page) & (1 << 17))
0829 __set_bit(i, obj->bit_17);
0830 else
0831 __clear_bit(i, obj->bit_17);
0832 i++;
0833 }
0834 }
0835
0836 void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
0837 {
0838 struct drm_i915_private *i915 = ggtt->vm.i915;
0839 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
0840 int num_fences;
0841 int i;
0842
0843 INIT_LIST_HEAD(&ggtt->fence_list);
0844 INIT_LIST_HEAD(&ggtt->userfault_list);
0845 intel_wakeref_auto_init(&ggtt->userfault_wakeref, uncore->rpm);
0846
0847 detect_bit_6_swizzle(ggtt);
0848
0849 if (!i915_ggtt_has_aperture(ggtt))
0850 num_fences = 0;
0851 else if (GRAPHICS_VER(i915) >= 7 &&
0852 !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
0853 num_fences = 32;
0854 else if (GRAPHICS_VER(i915) >= 4 ||
0855 IS_I945G(i915) || IS_I945GM(i915) ||
0856 IS_G33(i915) || IS_PINEVIEW(i915))
0857 num_fences = 16;
0858 else
0859 num_fences = 8;
0860
0861 if (intel_vgpu_active(i915))
0862 num_fences = intel_uncore_read(uncore,
0863 vgtif_reg(avail_rs.fence_num));
0864 ggtt->fence_regs = kcalloc(num_fences,
0865 sizeof(*ggtt->fence_regs),
0866 GFP_KERNEL);
0867 if (!ggtt->fence_regs)
0868 num_fences = 0;
0869
0870
0871 for (i = 0; i < num_fences; i++) {
0872 struct i915_fence_reg *fence = &ggtt->fence_regs[i];
0873
0874 i915_active_init(&fence->active, NULL, NULL, 0);
0875 fence->ggtt = ggtt;
0876 fence->id = i;
0877 list_add_tail(&fence->link, &ggtt->fence_list);
0878 }
0879 ggtt->num_fences = num_fences;
0880
0881 intel_ggtt_restore_fences(ggtt);
0882 }
0883
0884 void intel_ggtt_fini_fences(struct i915_ggtt *ggtt)
0885 {
0886 int i;
0887
0888 for (i = 0; i < ggtt->num_fences; i++) {
0889 struct i915_fence_reg *fence = &ggtt->fence_regs[i];
0890
0891 i915_active_fini(&fence->active);
0892 }
0893
0894 kfree(ggtt->fence_regs);
0895 }
0896
0897 void intel_gt_init_swizzling(struct intel_gt *gt)
0898 {
0899 struct drm_i915_private *i915 = gt->i915;
0900 struct intel_uncore *uncore = gt->uncore;
0901
0902 if (GRAPHICS_VER(i915) < 5 ||
0903 to_gt(i915)->ggtt->bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
0904 return;
0905
0906 intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING);
0907
0908 if (GRAPHICS_VER(i915) == 5)
0909 return;
0910
0911 intel_uncore_rmw(uncore, TILECTL, 0, TILECTL_SWZCTL);
0912
0913 if (GRAPHICS_VER(i915) == 6)
0914 intel_uncore_write(uncore,
0915 ARB_MODE,
0916 _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
0917 else if (GRAPHICS_VER(i915) == 7)
0918 intel_uncore_write(uncore,
0919 ARB_MODE,
0920 _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
0921 else if (GRAPHICS_VER(i915) == 8)
0922 intel_uncore_write(uncore,
0923 GAMTARBMODE,
0924 _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
0925 else
0926 MISSING_CASE(GRAPHICS_VER(i915));
0927 }