1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
|
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include "xe_bo_evict.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_ggtt.h"
#include "xe_tile.h"
/**
* xe_bo_evict_all - evict all BOs from VRAM
*
* @xe: xe device
*
* Evict non-pinned user BOs first (via GPU), evict pinned external BOs next
* (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU.
* All eviction magic done via TTM calls.
*
* Evict == move VRAM BOs to temporary (typically system) memory.
*
* This function should be called before the device goes into a suspend state
* where the VRAM loses power.
*/
int xe_bo_evict_all(struct xe_device *xe)
{
struct ttm_device *bdev = &xe->ttm;
struct xe_bo *bo;
struct xe_tile *tile;
struct list_head still_in_list;
u32 mem_type;
u8 id;
int ret;
if (!IS_DGFX(xe))
return 0;
/* User memory */
for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
struct ttm_resource_manager *man =
ttm_manager_type(bdev, mem_type);
if (man) {
ret = ttm_resource_manager_evict_all(bdev, man);
if (ret)
return ret;
}
}
/* Pinned user memory in VRAM */
INIT_LIST_HEAD(&still_in_list);
spin_lock(&xe->pinned.lock);
for (;;) {
bo = list_first_entry_or_null(&xe->pinned.external_vram,
typeof(*bo), pinned_link);
if (!bo)
break;
xe_bo_get(bo);
list_move_tail(&bo->pinned_link, &still_in_list);
spin_unlock(&xe->pinned.lock);
xe_bo_lock(bo, false);
ret = xe_bo_evict_pinned(bo);
xe_bo_unlock(bo);
xe_bo_put(bo);
if (ret) {
spin_lock(&xe->pinned.lock);
list_splice_tail(&still_in_list,
&xe->pinned.external_vram);
spin_unlock(&xe->pinned.lock);
return ret;
}
spin_lock(&xe->pinned.lock);
}
list_splice_tail(&still_in_list, &xe->pinned.external_vram);
spin_unlock(&xe->pinned.lock);
/*
* Wait for all user BO to be evicted as those evictions depend on the
* memory moved below.
*/
for_each_tile(tile, xe, id)
xe_tile_migrate_wait(tile);
spin_lock(&xe->pinned.lock);
for (;;) {
bo = list_first_entry_or_null(&xe->pinned.kernel_bo_present,
typeof(*bo), pinned_link);
if (!bo)
break;
xe_bo_get(bo);
list_move_tail(&bo->pinned_link, &xe->pinned.evicted);
spin_unlock(&xe->pinned.lock);
xe_bo_lock(bo, false);
ret = xe_bo_evict_pinned(bo);
xe_bo_unlock(bo);
xe_bo_put(bo);
if (ret)
return ret;
spin_lock(&xe->pinned.lock);
}
spin_unlock(&xe->pinned.lock);
return 0;
}
/**
* xe_bo_restore_kernel - restore kernel BOs to VRAM
*
* @xe: xe device
*
* Move kernel BOs from temporary (typically system) memory to VRAM via CPU. All
* moves done via TTM calls.
*
* This function should be called early, before trying to init the GT, on device
* resume.
*/
int xe_bo_restore_kernel(struct xe_device *xe)
{
struct xe_bo *bo;
int ret;
if (!IS_DGFX(xe))
return 0;
spin_lock(&xe->pinned.lock);
for (;;) {
bo = list_first_entry_or_null(&xe->pinned.evicted,
typeof(*bo), pinned_link);
if (!bo)
break;
xe_bo_get(bo);
list_move_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
spin_unlock(&xe->pinned.lock);
xe_bo_lock(bo, false);
ret = xe_bo_restore_pinned(bo);
xe_bo_unlock(bo);
if (ret) {
xe_bo_put(bo);
return ret;
}
if (bo->flags & XE_BO_FLAG_GGTT) {
struct xe_tile *tile = bo->tile;
mutex_lock(&tile->mem.ggtt->lock);
xe_ggtt_map_bo(tile->mem.ggtt, bo);
mutex_unlock(&tile->mem.ggtt->lock);
}
/*
* We expect validate to trigger a move VRAM and our move code
* should setup the iosys map.
*/
xe_assert(xe, !iosys_map_is_null(&bo->vmap));
xe_assert(xe, xe_bo_is_vram(bo));
xe_bo_put(bo);
spin_lock(&xe->pinned.lock);
}
spin_unlock(&xe->pinned.lock);
return 0;
}
/**
* xe_bo_restore_user - restore pinned user BOs to VRAM
*
* @xe: xe device
*
* Move pinned user BOs from temporary (typically system) memory to VRAM via
* CPU. All moves done via TTM calls.
*
* This function should be called late, after GT init, on device resume.
*/
int xe_bo_restore_user(struct xe_device *xe)
{
struct xe_bo *bo;
struct xe_tile *tile;
struct list_head still_in_list;
u8 id;
int ret;
if (!IS_DGFX(xe))
return 0;
/* Pinned user memory in VRAM should be validated on resume */
INIT_LIST_HEAD(&still_in_list);
spin_lock(&xe->pinned.lock);
for (;;) {
bo = list_first_entry_or_null(&xe->pinned.external_vram,
typeof(*bo), pinned_link);
if (!bo)
break;
list_move_tail(&bo->pinned_link, &still_in_list);
xe_bo_get(bo);
spin_unlock(&xe->pinned.lock);
xe_bo_lock(bo, false);
ret = xe_bo_restore_pinned(bo);
xe_bo_unlock(bo);
xe_bo_put(bo);
if (ret) {
spin_lock(&xe->pinned.lock);
list_splice_tail(&still_in_list,
&xe->pinned.external_vram);
spin_unlock(&xe->pinned.lock);
return ret;
}
spin_lock(&xe->pinned.lock);
}
list_splice_tail(&still_in_list, &xe->pinned.external_vram);
spin_unlock(&xe->pinned.lock);
/* Wait for restore to complete */
for_each_tile(tile, xe, id)
xe_tile_migrate_wait(tile);
return 0;
}
|