0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #ifndef __AMDGPU_RES_CURSOR_H__
0027 #define __AMDGPU_RES_CURSOR_H__
0028
0029 #include <drm/drm_mm.h>
0030 #include <drm/ttm/ttm_resource.h>
0031 #include <drm/ttm/ttm_range_manager.h>
0032
0033 #include "amdgpu_vram_mgr.h"
0034
0035
0036 struct amdgpu_res_cursor {
0037 uint64_t start;
0038 uint64_t size;
0039 uint64_t remaining;
0040 void *node;
0041 uint32_t mem_type;
0042 };
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054 static inline void amdgpu_res_first(struct ttm_resource *res,
0055 uint64_t start, uint64_t size,
0056 struct amdgpu_res_cursor *cur)
0057 {
0058 struct drm_buddy_block *block;
0059 struct list_head *head, *next;
0060 struct drm_mm_node *node;
0061
0062 if (!res)
0063 goto fallback;
0064
0065 BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
0066
0067 cur->mem_type = res->mem_type;
0068
0069 switch (cur->mem_type) {
0070 case TTM_PL_VRAM:
0071 head = &to_amdgpu_vram_mgr_resource(res)->blocks;
0072
0073 block = list_first_entry_or_null(head,
0074 struct drm_buddy_block,
0075 link);
0076 if (!block)
0077 goto fallback;
0078
0079 while (start >= amdgpu_vram_mgr_block_size(block)) {
0080 start -= amdgpu_vram_mgr_block_size(block);
0081
0082 next = block->link.next;
0083 if (next != head)
0084 block = list_entry(next, struct drm_buddy_block, link);
0085 }
0086
0087 cur->start = amdgpu_vram_mgr_block_start(block) + start;
0088 cur->size = min(amdgpu_vram_mgr_block_size(block) - start, size);
0089 cur->remaining = size;
0090 cur->node = block;
0091 break;
0092 case TTM_PL_TT:
0093 node = to_ttm_range_mgr_node(res)->mm_nodes;
0094 while (start >= node->size << PAGE_SHIFT)
0095 start -= node++->size << PAGE_SHIFT;
0096
0097 cur->start = (node->start << PAGE_SHIFT) + start;
0098 cur->size = min((node->size << PAGE_SHIFT) - start, size);
0099 cur->remaining = size;
0100 cur->node = node;
0101 break;
0102 default:
0103 goto fallback;
0104 }
0105
0106 return;
0107
0108 fallback:
0109 cur->start = start;
0110 cur->size = size;
0111 cur->remaining = size;
0112 cur->node = NULL;
0113 WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
0114 return;
0115 }
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125 static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
0126 {
0127 struct drm_buddy_block *block;
0128 struct drm_mm_node *node;
0129 struct list_head *next;
0130
0131 BUG_ON(size > cur->remaining);
0132
0133 cur->remaining -= size;
0134 if (!cur->remaining)
0135 return;
0136
0137 cur->size -= size;
0138 if (cur->size) {
0139 cur->start += size;
0140 return;
0141 }
0142
0143 switch (cur->mem_type) {
0144 case TTM_PL_VRAM:
0145 block = cur->node;
0146
0147 next = block->link.next;
0148 block = list_entry(next, struct drm_buddy_block, link);
0149
0150 cur->node = block;
0151 cur->start = amdgpu_vram_mgr_block_start(block);
0152 cur->size = min(amdgpu_vram_mgr_block_size(block), cur->remaining);
0153 break;
0154 case TTM_PL_TT:
0155 node = cur->node;
0156
0157 cur->node = ++node;
0158 cur->start = node->start << PAGE_SHIFT;
0159 cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
0160 break;
0161 default:
0162 return;
0163 }
0164 }
0165
0166 #endif