Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * SPDX-License-Identifier: MIT
0003  *
0004  * Copyright © 2008-2012 Intel Corporation
0005  */
0006 
0007 #include <linux/errno.h>
0008 #include <linux/mutex.h>
0009 
0010 #include <drm/drm_mm.h>
0011 #include <drm/i915_drm.h>
0012 
0013 #include "gem/i915_gem_lmem.h"
0014 #include "gem/i915_gem_region.h"
0015 #include "gt/intel_gt.h"
0016 #include "gt/intel_gt_mcr.h"
0017 #include "gt/intel_gt_regs.h"
0018 #include "gt/intel_region_lmem.h"
0019 #include "i915_drv.h"
0020 #include "i915_gem_stolen.h"
0021 #include "i915_reg.h"
0022 #include "i915_utils.h"
0023 #include "i915_vgpu.h"
0024 #include "intel_mchbar_regs.h"
0025 
0026 /*
0027  * The BIOS typically reserves some of the system's memory for the exclusive
0028  * use of the integrated graphics. This memory is no longer available for
0029  * use by the OS and so the user finds that his system has less memory
0030  * available than he put in. We refer to this memory as stolen.
0031  *
0032  * The BIOS will allocate its framebuffer from the stolen memory. Our
0033  * goal is try to reuse that object for our own fbcon which must always
0034  * be available for panics. Anything else we can reuse the stolen memory
0035  * for is a boon.
0036  */
0037 
0038 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
0039                      struct drm_mm_node *node, u64 size,
0040                      unsigned alignment, u64 start, u64 end)
0041 {
0042     int ret;
0043 
0044     if (!drm_mm_initialized(&i915->mm.stolen))
0045         return -ENODEV;
0046 
0047     /* WaSkipStolenMemoryFirstPage:bdw+ */
0048     if (GRAPHICS_VER(i915) >= 8 && start < 4096)
0049         start = 4096;
0050 
0051     mutex_lock(&i915->mm.stolen_lock);
0052     ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
0053                       size, alignment, 0,
0054                       start, end, DRM_MM_INSERT_BEST);
0055     mutex_unlock(&i915->mm.stolen_lock);
0056 
0057     return ret;
0058 }
0059 
0060 int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
0061                 struct drm_mm_node *node, u64 size,
0062                 unsigned alignment)
0063 {
0064     return i915_gem_stolen_insert_node_in_range(i915, node,
0065                             size, alignment,
0066                             I915_GEM_STOLEN_BIAS,
0067                             U64_MAX);
0068 }
0069 
0070 void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
0071                  struct drm_mm_node *node)
0072 {
0073     mutex_lock(&i915->mm.stolen_lock);
0074     drm_mm_remove_node(node);
0075     mutex_unlock(&i915->mm.stolen_lock);
0076 }
0077 
0078 static int i915_adjust_stolen(struct drm_i915_private *i915,
0079                   struct resource *dsm)
0080 {
0081     struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
0082     struct intel_uncore *uncore = ggtt->vm.gt->uncore;
0083     struct resource *r;
0084 
0085     if (dsm->start == 0 || dsm->end <= dsm->start)
0086         return -EINVAL;
0087 
0088     /*
0089      * TODO: We have yet too encounter the case where the GTT wasn't at the
0090      * end of stolen. With that assumption we could simplify this.
0091      */
0092 
0093     /* Make sure we don't clobber the GTT if it's within stolen memory */
0094     if (GRAPHICS_VER(i915) <= 4 &&
0095         !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
0096         struct resource stolen[2] = {*dsm, *dsm};
0097         struct resource ggtt_res;
0098         resource_size_t ggtt_start;
0099 
0100         ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
0101         if (GRAPHICS_VER(i915) == 4)
0102             ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
0103                      (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
0104         else
0105             ggtt_start &= PGTBL_ADDRESS_LO_MASK;
0106 
0107         ggtt_res =
0108             (struct resource) DEFINE_RES_MEM(ggtt_start,
0109                              ggtt_total_entries(ggtt) * 4);
0110 
0111         if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
0112             stolen[0].end = ggtt_res.start;
0113         if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
0114             stolen[1].start = ggtt_res.end;
0115 
0116         /* Pick the larger of the two chunks */
0117         if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
0118             *dsm = stolen[0];
0119         else
0120             *dsm = stolen[1];
0121 
0122         if (stolen[0].start != stolen[1].start ||
0123             stolen[0].end != stolen[1].end) {
0124             drm_dbg(&i915->drm,
0125                 "GTT within stolen memory at %pR\n",
0126                 &ggtt_res);
0127             drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
0128                 dsm);
0129         }
0130     }
0131 
0132     /*
0133      * With stolen lmem, we don't need to check if the address range
0134      * overlaps with the non-stolen system memory range, since lmem is local
0135      * to the gpu.
0136      */
0137     if (HAS_LMEM(i915))
0138         return 0;
0139 
0140     /*
0141      * Verify that nothing else uses this physical address. Stolen
0142      * memory should be reserved by the BIOS and hidden from the
0143      * kernel. So if the region is already marked as busy, something
0144      * is seriously wrong.
0145      */
0146     r = devm_request_mem_region(i915->drm.dev, dsm->start,
0147                     resource_size(dsm),
0148                     "Graphics Stolen Memory");
0149     if (r == NULL) {
0150         /*
0151          * One more attempt but this time requesting region from
0152          * start + 1, as we have seen that this resolves the region
0153          * conflict with the PCI Bus.
0154          * This is a BIOS w/a: Some BIOS wrap stolen in the root
0155          * PCI bus, but have an off-by-one error. Hence retry the
0156          * reservation starting from 1 instead of 0.
0157          * There's also BIOS with off-by-one on the other end.
0158          */
0159         r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
0160                         resource_size(dsm) - 2,
0161                         "Graphics Stolen Memory");
0162         /*
0163          * GEN3 firmware likes to smash pci bridges into the stolen
0164          * range. Apparently this works.
0165          */
0166         if (!r && GRAPHICS_VER(i915) != 3) {
0167             drm_err(&i915->drm,
0168                 "conflict detected with stolen region: %pR\n",
0169                 dsm);
0170 
0171             return -EBUSY;
0172         }
0173     }
0174 
0175     return 0;
0176 }
0177 
0178 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
0179 {
0180     if (!drm_mm_initialized(&i915->mm.stolen))
0181         return;
0182 
0183     drm_mm_takedown(&i915->mm.stolen);
0184 }
0185 
0186 static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
0187                     struct intel_uncore *uncore,
0188                     resource_size_t *base,
0189                     resource_size_t *size)
0190 {
0191     u32 reg_val = intel_uncore_read(uncore,
0192                     IS_GM45(i915) ?
0193                     CTG_STOLEN_RESERVED :
0194                     ELK_STOLEN_RESERVED);
0195     resource_size_t stolen_top = i915->dsm.end + 1;
0196 
0197     drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
0198         IS_GM45(i915) ? "CTG" : "ELK", reg_val);
0199 
0200     if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
0201         return;
0202 
0203     /*
0204      * Whether ILK really reuses the ELK register for this is unclear.
0205      * Let's see if we catch anyone with this supposedly enabled on ILK.
0206      */
0207     drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,
0208          "ILK stolen reserved found? 0x%08x\n",
0209          reg_val);
0210 
0211     if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
0212         return;
0213 
0214     *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
0215     drm_WARN_ON(&i915->drm,
0216             (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
0217 
0218     *size = stolen_top - *base;
0219 }
0220 
0221 static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
0222                      struct intel_uncore *uncore,
0223                      resource_size_t *base,
0224                      resource_size_t *size)
0225 {
0226     u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
0227 
0228     drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
0229 
0230     if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
0231         return;
0232 
0233     *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
0234 
0235     switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
0236     case GEN6_STOLEN_RESERVED_1M:
0237         *size = 1024 * 1024;
0238         break;
0239     case GEN6_STOLEN_RESERVED_512K:
0240         *size = 512 * 1024;
0241         break;
0242     case GEN6_STOLEN_RESERVED_256K:
0243         *size = 256 * 1024;
0244         break;
0245     case GEN6_STOLEN_RESERVED_128K:
0246         *size = 128 * 1024;
0247         break;
0248     default:
0249         *size = 1024 * 1024;
0250         MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
0251     }
0252 }
0253 
0254 static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
0255                     struct intel_uncore *uncore,
0256                     resource_size_t *base,
0257                     resource_size_t *size)
0258 {
0259     u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
0260     resource_size_t stolen_top = i915->dsm.end + 1;
0261 
0262     drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
0263 
0264     if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
0265         return;
0266 
0267     switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
0268     default:
0269         MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
0270         fallthrough;
0271     case GEN7_STOLEN_RESERVED_1M:
0272         *size = 1024 * 1024;
0273         break;
0274     }
0275 
0276     /*
0277      * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
0278      * reserved location as (top - size).
0279      */
0280     *base = stolen_top - *size;
0281 }
0282 
0283 static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
0284                      struct intel_uncore *uncore,
0285                      resource_size_t *base,
0286                      resource_size_t *size)
0287 {
0288     u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
0289 
0290     drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
0291 
0292     if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
0293         return;
0294 
0295     *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
0296 
0297     switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
0298     case GEN7_STOLEN_RESERVED_1M:
0299         *size = 1024 * 1024;
0300         break;
0301     case GEN7_STOLEN_RESERVED_256K:
0302         *size = 256 * 1024;
0303         break;
0304     default:
0305         *size = 1024 * 1024;
0306         MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
0307     }
0308 }
0309 
0310 static void chv_get_stolen_reserved(struct drm_i915_private *i915,
0311                     struct intel_uncore *uncore,
0312                     resource_size_t *base,
0313                     resource_size_t *size)
0314 {
0315     u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
0316 
0317     drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
0318 
0319     if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
0320         return;
0321 
0322     *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
0323 
0324     switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
0325     case GEN8_STOLEN_RESERVED_1M:
0326         *size = 1024 * 1024;
0327         break;
0328     case GEN8_STOLEN_RESERVED_2M:
0329         *size = 2 * 1024 * 1024;
0330         break;
0331     case GEN8_STOLEN_RESERVED_4M:
0332         *size = 4 * 1024 * 1024;
0333         break;
0334     case GEN8_STOLEN_RESERVED_8M:
0335         *size = 8 * 1024 * 1024;
0336         break;
0337     default:
0338         *size = 8 * 1024 * 1024;
0339         MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
0340     }
0341 }
0342 
0343 static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
0344                     struct intel_uncore *uncore,
0345                     resource_size_t *base,
0346                     resource_size_t *size)
0347 {
0348     u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
0349     resource_size_t stolen_top = i915->dsm.end + 1;
0350 
0351     drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
0352 
0353     if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
0354         return;
0355 
0356     if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
0357         return;
0358 
0359     *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
0360     *size = stolen_top - *base;
0361 }
0362 
0363 static void icl_get_stolen_reserved(struct drm_i915_private *i915,
0364                     struct intel_uncore *uncore,
0365                     resource_size_t *base,
0366                     resource_size_t *size)
0367 {
0368     u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
0369 
0370     drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
0371 
0372     *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
0373 
0374     switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
0375     case GEN8_STOLEN_RESERVED_1M:
0376         *size = 1024 * 1024;
0377         break;
0378     case GEN8_STOLEN_RESERVED_2M:
0379         *size = 2 * 1024 * 1024;
0380         break;
0381     case GEN8_STOLEN_RESERVED_4M:
0382         *size = 4 * 1024 * 1024;
0383         break;
0384     case GEN8_STOLEN_RESERVED_8M:
0385         *size = 8 * 1024 * 1024;
0386         break;
0387     default:
0388         *size = 8 * 1024 * 1024;
0389         MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
0390     }
0391 }
0392 
0393 static int i915_gem_init_stolen(struct intel_memory_region *mem)
0394 {
0395     struct drm_i915_private *i915 = mem->i915;
0396     struct intel_uncore *uncore = &i915->uncore;
0397     resource_size_t reserved_base, stolen_top;
0398     resource_size_t reserved_total, reserved_size;
0399 
0400     mutex_init(&i915->mm.stolen_lock);
0401 
0402     if (intel_vgpu_active(i915)) {
0403         drm_notice(&i915->drm,
0404                "%s, disabling use of stolen memory\n",
0405                "iGVT-g active");
0406         return 0;
0407     }
0408 
0409     if (i915_vtd_active(i915) && GRAPHICS_VER(i915) < 8) {
0410         drm_notice(&i915->drm,
0411                "%s, disabling use of stolen memory\n",
0412                "DMAR active");
0413         return 0;
0414     }
0415 
0416     if (resource_size(&mem->region) == 0)
0417         return 0;
0418 
0419     i915->dsm = mem->region;
0420 
0421     if (i915_adjust_stolen(i915, &i915->dsm))
0422         return 0;
0423 
0424     GEM_BUG_ON(i915->dsm.start == 0);
0425     GEM_BUG_ON(i915->dsm.end <= i915->dsm.start);
0426 
0427     stolen_top = i915->dsm.end + 1;
0428     reserved_base = stolen_top;
0429     reserved_size = 0;
0430 
0431     switch (GRAPHICS_VER(i915)) {
0432     case 2:
0433     case 3:
0434         break;
0435     case 4:
0436         if (!IS_G4X(i915))
0437             break;
0438         fallthrough;
0439     case 5:
0440         g4x_get_stolen_reserved(i915, uncore,
0441                     &reserved_base, &reserved_size);
0442         break;
0443     case 6:
0444         gen6_get_stolen_reserved(i915, uncore,
0445                      &reserved_base, &reserved_size);
0446         break;
0447     case 7:
0448         if (IS_VALLEYVIEW(i915))
0449             vlv_get_stolen_reserved(i915, uncore,
0450                         &reserved_base, &reserved_size);
0451         else
0452             gen7_get_stolen_reserved(i915, uncore,
0453                          &reserved_base, &reserved_size);
0454         break;
0455     case 8:
0456     case 9:
0457         if (IS_LP(i915))
0458             chv_get_stolen_reserved(i915, uncore,
0459                         &reserved_base, &reserved_size);
0460         else
0461             bdw_get_stolen_reserved(i915, uncore,
0462                         &reserved_base, &reserved_size);
0463         break;
0464     default:
0465         MISSING_CASE(GRAPHICS_VER(i915));
0466         fallthrough;
0467     case 11:
0468     case 12:
0469         icl_get_stolen_reserved(i915, uncore,
0470                     &reserved_base,
0471                     &reserved_size);
0472         break;
0473     }
0474 
0475     /*
0476      * Our expectation is that the reserved space is at the top of the
0477      * stolen region and *never* at the bottom. If we see !reserved_base,
0478      * it likely means we failed to read the registers correctly.
0479      */
0480     if (!reserved_base) {
0481         drm_err(&i915->drm,
0482             "inconsistent reservation %pa + %pa; ignoring\n",
0483             &reserved_base, &reserved_size);
0484         reserved_base = stolen_top;
0485         reserved_size = 0;
0486     }
0487 
0488     i915->dsm_reserved =
0489         (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
0490 
0491     if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
0492         drm_err(&i915->drm,
0493             "Stolen reserved area %pR outside stolen memory %pR\n",
0494             &i915->dsm_reserved, &i915->dsm);
0495         return 0;
0496     }
0497 
0498     /* Exclude the reserved region from driver use */
0499     mem->region.end = reserved_base - 1;
0500     mem->io_size = min(mem->io_size, resource_size(&mem->region));
0501 
0502     /* It is possible for the reserved area to end before the end of stolen
0503      * memory, so just consider the start. */
0504     reserved_total = stolen_top - reserved_base;
0505 
0506     i915->stolen_usable_size =
0507         resource_size(&i915->dsm) - reserved_total;
0508 
0509     drm_dbg(&i915->drm,
0510         "Memory reserved for graphics device: %lluK, usable: %lluK\n",
0511         (u64)resource_size(&i915->dsm) >> 10,
0512         (u64)i915->stolen_usable_size >> 10);
0513 
0514     if (i915->stolen_usable_size == 0)
0515         return 0;
0516 
0517     /* Basic memrange allocator for stolen space. */
0518     drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
0519 
0520     return 0;
0521 }
0522 
0523 static void dbg_poison(struct i915_ggtt *ggtt,
0524                dma_addr_t addr, resource_size_t size,
0525                u8 x)
0526 {
0527 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
0528     if (!drm_mm_node_allocated(&ggtt->error_capture))
0529         return;
0530 
0531     if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
0532         return; /* beware stop_machine() inversion */
0533 
0534     GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
0535 
0536     mutex_lock(&ggtt->error_mutex);
0537     while (size) {
0538         void __iomem *s;
0539 
0540         ggtt->vm.insert_page(&ggtt->vm, addr,
0541                      ggtt->error_capture.start,
0542                      I915_CACHE_NONE, 0);
0543         mb();
0544 
0545         s = io_mapping_map_wc(&ggtt->iomap,
0546                       ggtt->error_capture.start,
0547                       PAGE_SIZE);
0548         memset_io(s, x, PAGE_SIZE);
0549         io_mapping_unmap(s);
0550 
0551         addr += PAGE_SIZE;
0552         size -= PAGE_SIZE;
0553     }
0554     mb();
0555     ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
0556     mutex_unlock(&ggtt->error_mutex);
0557 #endif
0558 }
0559 
0560 static struct sg_table *
0561 i915_pages_create_for_stolen(struct drm_device *dev,
0562                  resource_size_t offset, resource_size_t size)
0563 {
0564     struct drm_i915_private *i915 = to_i915(dev);
0565     struct sg_table *st;
0566     struct scatterlist *sg;
0567 
0568     GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
0569 
0570     /* We hide that we have no struct page backing our stolen object
0571      * by wrapping the contiguous physical allocation with a fake
0572      * dma mapping in a single scatterlist.
0573      */
0574 
0575     st = kmalloc(sizeof(*st), GFP_KERNEL);
0576     if (st == NULL)
0577         return ERR_PTR(-ENOMEM);
0578 
0579     if (sg_alloc_table(st, 1, GFP_KERNEL)) {
0580         kfree(st);
0581         return ERR_PTR(-ENOMEM);
0582     }
0583 
0584     sg = st->sgl;
0585     sg->offset = 0;
0586     sg->length = size;
0587 
0588     sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
0589     sg_dma_len(sg) = size;
0590 
0591     return st;
0592 }
0593 
0594 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
0595 {
0596     struct drm_i915_private *i915 = to_i915(obj->base.dev);
0597     struct sg_table *pages =
0598         i915_pages_create_for_stolen(obj->base.dev,
0599                          obj->stolen->start,
0600                          obj->stolen->size);
0601     if (IS_ERR(pages))
0602         return PTR_ERR(pages);
0603 
0604     dbg_poison(to_gt(i915)->ggtt,
0605            sg_dma_address(pages->sgl),
0606            sg_dma_len(pages->sgl),
0607            POISON_INUSE);
0608 
0609     __i915_gem_object_set_pages(obj, pages, obj->stolen->size);
0610 
0611     return 0;
0612 }
0613 
0614 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
0615                          struct sg_table *pages)
0616 {
0617     struct drm_i915_private *i915 = to_i915(obj->base.dev);
0618     /* Should only be called from i915_gem_object_release_stolen() */
0619 
0620     dbg_poison(to_gt(i915)->ggtt,
0621            sg_dma_address(pages->sgl),
0622            sg_dma_len(pages->sgl),
0623            POISON_FREE);
0624 
0625     sg_free_table(pages);
0626     kfree(pages);
0627 }
0628 
0629 static void
0630 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
0631 {
0632     struct drm_i915_private *i915 = to_i915(obj->base.dev);
0633     struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
0634 
0635     GEM_BUG_ON(!stolen);
0636     i915_gem_stolen_remove_node(i915, stolen);
0637     kfree(stolen);
0638 
0639     i915_gem_object_release_memory_region(obj);
0640 }
0641 
0642 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
0643     .name = "i915_gem_object_stolen",
0644     .get_pages = i915_gem_object_get_pages_stolen,
0645     .put_pages = i915_gem_object_put_pages_stolen,
0646     .release = i915_gem_object_release_stolen,
0647 };
0648 
0649 static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
0650                        struct drm_i915_gem_object *obj,
0651                        struct drm_mm_node *stolen)
0652 {
0653     static struct lock_class_key lock_class;
0654     unsigned int cache_level;
0655     unsigned int flags;
0656     int err;
0657 
0658     /*
0659      * Stolen objects are always physically contiguous since we just
0660      * allocate one big block underneath using the drm_mm range allocator.
0661      */
0662     flags = I915_BO_ALLOC_CONTIGUOUS;
0663 
0664     drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
0665     i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
0666 
0667     obj->stolen = stolen;
0668     obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
0669     cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
0670     i915_gem_object_set_cache_coherency(obj, cache_level);
0671 
0672     if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
0673         return -EBUSY;
0674 
0675     i915_gem_object_init_memory_region(obj, mem);
0676 
0677     err = i915_gem_object_pin_pages(obj);
0678     if (err)
0679         i915_gem_object_release_memory_region(obj);
0680     i915_gem_object_unlock(obj);
0681 
0682     return err;
0683 }
0684 
0685 static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
0686                     struct drm_i915_gem_object *obj,
0687                     resource_size_t offset,
0688                     resource_size_t size,
0689                     resource_size_t page_size,
0690                     unsigned int flags)
0691 {
0692     struct drm_i915_private *i915 = mem->i915;
0693     struct drm_mm_node *stolen;
0694     int ret;
0695 
0696     if (!drm_mm_initialized(&i915->mm.stolen))
0697         return -ENODEV;
0698 
0699     if (size == 0)
0700         return -EINVAL;
0701 
0702     /*
0703      * With discrete devices, where we lack a mappable aperture there is no
0704      * possible way to ever access this memory on the CPU side.
0705      */
0706     if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size &&
0707         !(flags & I915_BO_ALLOC_GPU_ONLY))
0708         return -ENOSPC;
0709 
0710     stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
0711     if (!stolen)
0712         return -ENOMEM;
0713 
0714     if (offset != I915_BO_INVALID_OFFSET) {
0715         drm_dbg(&i915->drm,
0716             "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
0717             &offset, &size);
0718 
0719         stolen->start = offset;
0720         stolen->size = size;
0721         mutex_lock(&i915->mm.stolen_lock);
0722         ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
0723         mutex_unlock(&i915->mm.stolen_lock);
0724     } else {
0725         ret = i915_gem_stolen_insert_node(i915, stolen, size,
0726                           mem->min_page_size);
0727     }
0728     if (ret)
0729         goto err_free;
0730 
0731     ret = __i915_gem_object_create_stolen(mem, obj, stolen);
0732     if (ret)
0733         goto err_remove;
0734 
0735     return 0;
0736 
0737 err_remove:
0738     i915_gem_stolen_remove_node(i915, stolen);
0739 err_free:
0740     kfree(stolen);
0741     return ret;
0742 }
0743 
0744 struct drm_i915_gem_object *
0745 i915_gem_object_create_stolen(struct drm_i915_private *i915,
0746                   resource_size_t size)
0747 {
0748     return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
0749 }
0750 
0751 static int init_stolen_smem(struct intel_memory_region *mem)
0752 {
0753     /*
0754      * Initialise stolen early so that we may reserve preallocated
0755      * objects for the BIOS to KMS transition.
0756      */
0757     return i915_gem_init_stolen(mem);
0758 }
0759 
0760 static int release_stolen_smem(struct intel_memory_region *mem)
0761 {
0762     i915_gem_cleanup_stolen(mem->i915);
0763     return 0;
0764 }
0765 
0766 static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
0767     .init = init_stolen_smem,
0768     .release = release_stolen_smem,
0769     .init_object = _i915_gem_object_stolen_init,
0770 };
0771 
0772 static int init_stolen_lmem(struct intel_memory_region *mem)
0773 {
0774     int err;
0775 
0776     if (GEM_WARN_ON(resource_size(&mem->region) == 0))
0777         return -ENODEV;
0778 
0779     /*
0780      * TODO: For stolen lmem we mostly just care about populating the dsm
0781      * related bits and setting up the drm_mm allocator for the range.
0782      * Perhaps split up i915_gem_init_stolen() for this.
0783      */
0784     err = i915_gem_init_stolen(mem);
0785     if (err)
0786         return err;
0787 
0788     if (mem->io_size && !io_mapping_init_wc(&mem->iomap,
0789                         mem->io_start,
0790                         mem->io_size)) {
0791         err = -EIO;
0792         goto err_cleanup;
0793     }
0794 
0795     return 0;
0796 
0797 err_cleanup:
0798     i915_gem_cleanup_stolen(mem->i915);
0799     return err;
0800 }
0801 
0802 static int release_stolen_lmem(struct intel_memory_region *mem)
0803 {
0804     if (mem->io_size)
0805         io_mapping_fini(&mem->iomap);
0806     i915_gem_cleanup_stolen(mem->i915);
0807     return 0;
0808 }
0809 
0810 static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
0811     .init = init_stolen_lmem,
0812     .release = release_stolen_lmem,
0813     .init_object = _i915_gem_object_stolen_init,
0814 };
0815 
0816 struct intel_memory_region *
0817 i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
0818                u16 instance)
0819 {
0820     struct intel_uncore *uncore = &i915->uncore;
0821     struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
0822     resource_size_t dsm_size, dsm_base, lmem_size;
0823     struct intel_memory_region *mem;
0824     resource_size_t io_start, io_size;
0825     resource_size_t min_page_size;
0826 
0827     if (WARN_ON_ONCE(instance))
0828         return ERR_PTR(-ENODEV);
0829 
0830     /* Use DSM base address instead for stolen memory */
0831     dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
0832     if (IS_DG1(uncore->i915)) {
0833         lmem_size = pci_resource_len(pdev, 2);
0834         if (WARN_ON(lmem_size < dsm_base))
0835             return ERR_PTR(-ENODEV);
0836     } else {
0837         resource_size_t lmem_range;
0838 
0839         lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
0840         lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
0841         lmem_size *= SZ_1G;
0842     }
0843 
0844     dsm_size = lmem_size - dsm_base;
0845     if (pci_resource_len(pdev, 2) < lmem_size) {
0846         io_start = 0;
0847         io_size = 0;
0848     } else {
0849         io_start = pci_resource_start(pdev, 2) + dsm_base;
0850         io_size = dsm_size;
0851     }
0852 
0853     min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
0854                         I915_GTT_PAGE_SIZE_4K;
0855 
0856     mem = intel_memory_region_create(i915, dsm_base, dsm_size,
0857                      min_page_size,
0858                      io_start, io_size,
0859                      type, instance,
0860                      &i915_region_stolen_lmem_ops);
0861     if (IS_ERR(mem))
0862         return mem;
0863 
0864     /*
0865      * TODO: consider creating common helper to just print all the
0866      * interesting stuff from intel_memory_region, which we can use for all
0867      * our probed regions.
0868      */
0869 
0870     drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
0871         &mem->io_start);
0872     drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &dsm_base);
0873 
0874     intel_memory_region_set_name(mem, "stolen-local");
0875 
0876     mem->private = true;
0877 
0878     return mem;
0879 }
0880 
0881 struct intel_memory_region*
0882 i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
0883                u16 instance)
0884 {
0885     struct intel_memory_region *mem;
0886 
0887     mem = intel_memory_region_create(i915,
0888                      intel_graphics_stolen_res.start,
0889                      resource_size(&intel_graphics_stolen_res),
0890                      PAGE_SIZE, 0, 0, type, instance,
0891                      &i915_region_stolen_smem_ops);
0892     if (IS_ERR(mem))
0893         return mem;
0894 
0895     intel_memory_region_set_name(mem, "stolen-system");
0896 
0897     mem->private = true;
0898     return mem;
0899 }
0900 
0901 bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
0902 {
0903     return obj->ops == &i915_gem_object_stolen_ops;
0904 }