Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * SPDX-License-Identifier: MIT
0003  *
0004  * Copyright © 2016 Intel Corporation
0005  */
0006 
0007 #include "i915_scatterlist.h"
0008 #include "i915_ttm_buddy_manager.h"
0009 
0010 #include <drm/drm_buddy.h>
0011 #include <drm/drm_mm.h>
0012 
0013 #include <linux/slab.h>
0014 
0015 bool i915_sg_trim(struct sg_table *orig_st)
0016 {
0017     struct sg_table new_st;
0018     struct scatterlist *sg, *new_sg;
0019     unsigned int i;
0020 
0021     if (orig_st->nents == orig_st->orig_nents)
0022         return false;
0023 
0024     if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
0025         return false;
0026 
0027     new_sg = new_st.sgl;
0028     for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
0029         sg_set_page(new_sg, sg_page(sg), sg->length, 0);
0030         sg_dma_address(new_sg) = sg_dma_address(sg);
0031         sg_dma_len(new_sg) = sg_dma_len(sg);
0032 
0033         new_sg = sg_next(new_sg);
0034     }
0035     GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
0036 
0037     sg_free_table(orig_st);
0038 
0039     *orig_st = new_st;
0040     return true;
0041 }
0042 
0043 static void i915_refct_sgt_release(struct kref *ref)
0044 {
0045     struct i915_refct_sgt *rsgt =
0046         container_of(ref, typeof(*rsgt), kref);
0047 
0048     sg_free_table(&rsgt->table);
0049     kfree(rsgt);
0050 }
0051 
0052 static const struct i915_refct_sgt_ops rsgt_ops = {
0053     .release = i915_refct_sgt_release
0054 };
0055 
0056 /**
0057  * i915_refct_sgt_init - Initialize a struct i915_refct_sgt with default ops
0058  * @rsgt: The struct i915_refct_sgt to initialize.
0059  * size: The size of the underlying memory buffer.
0060  */
0061 void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)
0062 {
0063     __i915_refct_sgt_init(rsgt, size, &rsgt_ops);
0064 }
0065 
0066 /**
0067  * i915_rsgt_from_mm_node - Create a refcounted sg_table from a struct
0068  * drm_mm_node
0069  * @node: The drm_mm_node.
0070  * @region_start: An offset to add to the dma addresses of the sg list.
0071  * @page_alignment: Required page alignment for each sg entry. Power of two.
0072  *
0073  * Create a struct sg_table, initializing it from a struct drm_mm_node,
0074  * taking a maximum segment length into account, splitting into segments
0075  * if necessary.
0076  *
0077  * Return: A pointer to a kmalloced struct i915_refct_sgt on success, negative
0078  * error code cast to an error pointer on failure.
0079  */
0080 struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
0081                           u64 region_start,
0082                           u32 page_alignment)
0083 {
0084     const u32 max_segment = round_down(UINT_MAX, page_alignment);
0085     const u32 segment_pages = max_segment >> PAGE_SHIFT;
0086     u64 block_size, offset, prev_end;
0087     struct i915_refct_sgt *rsgt;
0088     struct sg_table *st;
0089     struct scatterlist *sg;
0090 
0091     GEM_BUG_ON(!max_segment);
0092 
0093     rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
0094     if (!rsgt)
0095         return ERR_PTR(-ENOMEM);
0096 
0097     i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
0098     st = &rsgt->table;
0099     if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages),
0100                GFP_KERNEL)) {
0101         i915_refct_sgt_put(rsgt);
0102         return ERR_PTR(-ENOMEM);
0103     }
0104 
0105     sg = st->sgl;
0106     st->nents = 0;
0107     prev_end = (resource_size_t)-1;
0108     block_size = node->size << PAGE_SHIFT;
0109     offset = node->start << PAGE_SHIFT;
0110 
0111     while (block_size) {
0112         u64 len;
0113 
0114         if (offset != prev_end || sg->length >= max_segment) {
0115             if (st->nents)
0116                 sg = __sg_next(sg);
0117 
0118             sg_dma_address(sg) = region_start + offset;
0119             GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg),
0120                            page_alignment));
0121             sg_dma_len(sg) = 0;
0122             sg->length = 0;
0123             st->nents++;
0124         }
0125 
0126         len = min_t(u64, block_size, max_segment - sg->length);
0127         sg->length += len;
0128         sg_dma_len(sg) += len;
0129 
0130         offset += len;
0131         block_size -= len;
0132 
0133         prev_end = offset;
0134     }
0135 
0136     sg_mark_end(sg);
0137     i915_sg_trim(st);
0138 
0139     return rsgt;
0140 }
0141 
0142 /**
0143  * i915_rsgt_from_buddy_resource - Create a refcounted sg_table from a struct
0144  * i915_buddy_block list
0145  * @res: The struct i915_ttm_buddy_resource.
0146  * @region_start: An offset to add to the dma addresses of the sg list.
0147  * @page_alignment: Required page alignment for each sg entry. Power of two.
0148  *
0149  * Create a struct sg_table, initializing it from struct i915_buddy_block list,
0150  * taking a maximum segment length into account, splitting into segments
0151  * if necessary.
0152  *
0153  * Return: A pointer to a kmalloced struct i915_refct_sgts on success, negative
0154  * error code cast to an error pointer on failure.
0155  */
0156 struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
0157                              u64 region_start,
0158                              u32 page_alignment)
0159 {
0160     struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
0161     const u64 size = res->num_pages << PAGE_SHIFT;
0162     const u32 max_segment = round_down(UINT_MAX, page_alignment);
0163     struct drm_buddy *mm = bman_res->mm;
0164     struct list_head *blocks = &bman_res->blocks;
0165     struct drm_buddy_block *block;
0166     struct i915_refct_sgt *rsgt;
0167     struct scatterlist *sg;
0168     struct sg_table *st;
0169     resource_size_t prev_end;
0170 
0171     GEM_BUG_ON(list_empty(blocks));
0172     GEM_BUG_ON(!max_segment);
0173 
0174     rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
0175     if (!rsgt)
0176         return ERR_PTR(-ENOMEM);
0177 
0178     i915_refct_sgt_init(rsgt, size);
0179     st = &rsgt->table;
0180     if (sg_alloc_table(st, res->num_pages, GFP_KERNEL)) {
0181         i915_refct_sgt_put(rsgt);
0182         return ERR_PTR(-ENOMEM);
0183     }
0184 
0185     sg = st->sgl;
0186     st->nents = 0;
0187     prev_end = (resource_size_t)-1;
0188 
0189     list_for_each_entry(block, blocks, link) {
0190         u64 block_size, offset;
0191 
0192         block_size = min_t(u64, size, drm_buddy_block_size(mm, block));
0193         offset = drm_buddy_block_offset(block);
0194 
0195         while (block_size) {
0196             u64 len;
0197 
0198             if (offset != prev_end || sg->length >= max_segment) {
0199                 if (st->nents)
0200                     sg = __sg_next(sg);
0201 
0202                 sg_dma_address(sg) = region_start + offset;
0203                 GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg),
0204                                page_alignment));
0205                 sg_dma_len(sg) = 0;
0206                 sg->length = 0;
0207                 st->nents++;
0208             }
0209 
0210             len = min_t(u64, block_size, max_segment - sg->length);
0211             sg->length += len;
0212             sg_dma_len(sg) += len;
0213 
0214             offset += len;
0215             block_size -= len;
0216 
0217             prev_end = offset;
0218         }
0219     }
0220 
0221     sg_mark_end(sg);
0222     i915_sg_trim(st);
0223 
0224     return rsgt;
0225 }
0226 
0227 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0228 #include "selftests/scatterlist.c"
0229 #endif