freedreno/ir3: Fix assertion failures dumping CS high full regs.
[mesa.git] / src / freedreno / vulkan / tu_cs.c
1 /*
2 * Copyright © 2019 Google LLC
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "tu_cs.h"
25
26 /**
27 * Initialize a command stream.
28 */
29 void
30 tu_cs_init(struct tu_cs *cs,
31 struct tu_device *device,
32 enum tu_cs_mode mode,
33 uint32_t initial_size)
34 {
35 assert(mode != TU_CS_MODE_EXTERNAL);
36
37 memset(cs, 0, sizeof(*cs));
38
39 cs->device = device;
40 cs->mode = mode;
41 cs->next_bo_size = initial_size;
42 }
43
44 /**
45 * Initialize a command stream as a wrapper to an external buffer.
46 */
47 void
48 tu_cs_init_external(struct tu_cs *cs, uint32_t *start, uint32_t *end)
49 {
50 memset(cs, 0, sizeof(*cs));
51
52 cs->mode = TU_CS_MODE_EXTERNAL;
53 cs->start = cs->reserved_end = cs->cur = start;
54 cs->end = end;
55 }
56
57 /**
58 * Finish and release all resources owned by a command stream.
59 */
60 void
61 tu_cs_finish(struct tu_cs *cs)
62 {
63 for (uint32_t i = 0; i < cs->bo_count; ++i) {
64 tu_bo_finish(cs->device, cs->bos[i]);
65 free(cs->bos[i]);
66 }
67
68 free(cs->entries);
69 free(cs->bos);
70 }
71
72 /**
73 * Get the offset of the command packets emitted since the last call to
74 * tu_cs_add_entry.
75 */
76 static uint32_t
77 tu_cs_get_offset(const struct tu_cs *cs)
78 {
79 assert(cs->bo_count);
80 return cs->start - (uint32_t *) cs->bos[cs->bo_count - 1]->map;
81 }
82
83 /*
84 * Allocate and add a BO to a command stream. Following command packets will
85 * be emitted to the new BO.
86 */
87 static VkResult
88 tu_cs_add_bo(struct tu_cs *cs, uint32_t size)
89 {
90 /* no BO for TU_CS_MODE_EXTERNAL */
91 assert(cs->mode != TU_CS_MODE_EXTERNAL);
92
93 /* no dangling command packet */
94 assert(tu_cs_is_empty(cs));
95
96 /* grow cs->bos if needed */
97 if (cs->bo_count == cs->bo_capacity) {
98 uint32_t new_capacity = MAX2(4, 2 * cs->bo_capacity);
99 struct tu_bo **new_bos =
100 realloc(cs->bos, new_capacity * sizeof(struct tu_bo *));
101 if (!new_bos)
102 return VK_ERROR_OUT_OF_HOST_MEMORY;
103
104 cs->bo_capacity = new_capacity;
105 cs->bos = new_bos;
106 }
107
108 struct tu_bo *new_bo = malloc(sizeof(struct tu_bo));
109 if (!new_bo)
110 return VK_ERROR_OUT_OF_HOST_MEMORY;
111
112 VkResult result =
113 tu_bo_init_new(cs->device, new_bo, size * sizeof(uint32_t));
114 if (result != VK_SUCCESS) {
115 free(new_bo);
116 return result;
117 }
118
119 result = tu_bo_map(cs->device, new_bo);
120 if (result != VK_SUCCESS) {
121 tu_bo_finish(cs->device, new_bo);
122 free(new_bo);
123 return result;
124 }
125
126 cs->bos[cs->bo_count++] = new_bo;
127
128 cs->start = cs->cur = cs->reserved_end = (uint32_t *) new_bo->map;
129 cs->end = cs->start + new_bo->size / sizeof(uint32_t);
130
131 return VK_SUCCESS;
132 }
133
134 /**
135 * Reserve an IB entry.
136 */
137 static VkResult
138 tu_cs_reserve_entry(struct tu_cs *cs)
139 {
140 /* entries are only for TU_CS_MODE_GROW */
141 assert(cs->mode == TU_CS_MODE_GROW);
142
143 /* grow cs->entries if needed */
144 if (cs->entry_count == cs->entry_capacity) {
145 uint32_t new_capacity = MAX2(4, cs->entry_capacity * 2);
146 struct tu_cs_entry *new_entries =
147 realloc(cs->entries, new_capacity * sizeof(struct tu_cs_entry));
148 if (!new_entries)
149 return VK_ERROR_OUT_OF_HOST_MEMORY;
150
151 cs->entry_capacity = new_capacity;
152 cs->entries = new_entries;
153 }
154
155 return VK_SUCCESS;
156 }
157
158 /**
159 * Add an IB entry for the command packets emitted since the last call to this
160 * function.
161 */
162 static void
163 tu_cs_add_entry(struct tu_cs *cs)
164 {
165 /* entries are only for TU_CS_MODE_GROW */
166 assert(cs->mode == TU_CS_MODE_GROW);
167
168 /* disallow empty entry */
169 assert(!tu_cs_is_empty(cs));
170
171 /*
172 * because we disallow empty entry, tu_cs_add_bo and tu_cs_reserve_entry
173 * must both have been called
174 */
175 assert(cs->bo_count);
176 assert(cs->entry_count < cs->entry_capacity);
177
178 /* add an entry for [cs->start, cs->cur] */
179 cs->entries[cs->entry_count++] = (struct tu_cs_entry) {
180 .bo = cs->bos[cs->bo_count - 1],
181 .size = tu_cs_get_size(cs) * sizeof(uint32_t),
182 .offset = tu_cs_get_offset(cs) * sizeof(uint32_t),
183 };
184
185 cs->start = cs->cur;
186 }
187
188 /**
189 * same behavior as tu_cs_emit_call but without the indirect
190 */
191 VkResult
192 tu_cs_add_entries(struct tu_cs *cs, struct tu_cs *target)
193 {
194 VkResult result;
195
196 assert(cs->mode == TU_CS_MODE_GROW);
197 assert(target->mode == TU_CS_MODE_GROW);
198
199 if (!tu_cs_is_empty(cs))
200 tu_cs_add_entry(cs);
201
202 for (unsigned i = 0; i < target->entry_count; i++) {
203 result = tu_cs_reserve_entry(cs);
204 if (result != VK_SUCCESS)
205 return result;
206 cs->entries[cs->entry_count++] = target->entries[i];
207 }
208
209 return VK_SUCCESS;
210 }
211
212 /**
213 * Begin (or continue) command packet emission. This does nothing but sanity
214 * checks currently. \a cs must not be in TU_CS_MODE_SUB_STREAM mode.
215 */
216 void
217 tu_cs_begin(struct tu_cs *cs)
218 {
219 assert(cs->mode != TU_CS_MODE_SUB_STREAM);
220 assert(tu_cs_is_empty(cs));
221 }
222
223 /**
224 * End command packet emission. This adds an IB entry when \a cs is in
225 * TU_CS_MODE_GROW mode.
226 */
227 void
228 tu_cs_end(struct tu_cs *cs)
229 {
230 assert(cs->mode != TU_CS_MODE_SUB_STREAM);
231
232 if (cs->mode == TU_CS_MODE_GROW && !tu_cs_is_empty(cs))
233 tu_cs_add_entry(cs);
234 }
235
236 /**
237 * Begin command packet emission to a sub-stream. \a cs must be in
238 * TU_CS_MODE_SUB_STREAM mode.
239 *
240 * Return \a sub_cs which is in TU_CS_MODE_EXTERNAL mode. tu_cs_begin and
241 * tu_cs_reserve_space are implied and \a sub_cs is ready for command packet
242 * emission.
243 */
244 VkResult
245 tu_cs_begin_sub_stream(struct tu_cs *cs, uint32_t size, struct tu_cs *sub_cs)
246 {
247 assert(cs->mode == TU_CS_MODE_SUB_STREAM);
248 assert(size);
249
250 VkResult result = tu_cs_reserve_space(cs, size);
251 if (result != VK_SUCCESS)
252 return result;
253
254 tu_cs_init_external(sub_cs, cs->cur, cs->reserved_end);
255 tu_cs_begin(sub_cs);
256 result = tu_cs_reserve_space(sub_cs, size);
257 assert(result == VK_SUCCESS);
258
259 return VK_SUCCESS;
260 }
261
262 /**
263 * Allocate count*size dwords, aligned to size dwords.
264 * \a cs must be in TU_CS_MODE_SUB_STREAM mode.
265 *
266 */
267 VkResult
268 tu_cs_alloc(struct tu_cs *cs,
269 uint32_t count,
270 uint32_t size,
271 struct tu_cs_memory *memory)
272 {
273 assert(cs->mode == TU_CS_MODE_SUB_STREAM);
274 assert(size && size <= 1024);
275
276 if (!count)
277 return VK_SUCCESS;
278
279 /* TODO: smarter way to deal with alignment? */
280
281 VkResult result = tu_cs_reserve_space(cs, count * size + (size-1));
282 if (result != VK_SUCCESS)
283 return result;
284
285 struct tu_bo *bo = cs->bos[cs->bo_count - 1];
286 size_t offset = align(tu_cs_get_offset(cs), size);
287
288 memory->map = bo->map + offset * sizeof(uint32_t);
289 memory->iova = bo->iova + offset * sizeof(uint32_t);
290
291 cs->start = cs->cur = (uint32_t*) bo->map + offset + count * size;
292
293 return VK_SUCCESS;
294 }
295
296 /**
297 * End command packet emission to a sub-stream. \a sub_cs becomes invalid
298 * after this call.
299 *
300 * Return an IB entry for the sub-stream. The entry has the same lifetime as
301 * \a cs.
302 */
303 struct tu_cs_entry
304 tu_cs_end_sub_stream(struct tu_cs *cs, struct tu_cs *sub_cs)
305 {
306 assert(cs->mode == TU_CS_MODE_SUB_STREAM);
307 assert(cs->bo_count);
308 assert(sub_cs->start == cs->cur && sub_cs->end == cs->reserved_end);
309 tu_cs_sanity_check(sub_cs);
310
311 tu_cs_end(sub_cs);
312
313 cs->cur = sub_cs->cur;
314
315 struct tu_cs_entry entry = {
316 .bo = cs->bos[cs->bo_count - 1],
317 .size = tu_cs_get_size(cs) * sizeof(uint32_t),
318 .offset = tu_cs_get_offset(cs) * sizeof(uint32_t),
319 };
320
321 cs->start = cs->cur;
322
323 return entry;
324 }
325
326 /**
327 * Reserve space from a command stream for \a reserved_size uint32_t values.
328 * This never fails when \a cs has mode TU_CS_MODE_EXTERNAL.
329 */
330 VkResult
331 tu_cs_reserve_space(struct tu_cs *cs, uint32_t reserved_size)
332 {
333 if (tu_cs_get_space(cs) < reserved_size) {
334 if (cs->mode == TU_CS_MODE_EXTERNAL) {
335 unreachable("cannot grow external buffer");
336 return VK_ERROR_OUT_OF_HOST_MEMORY;
337 }
338
339 /* add an entry for the exiting command packets */
340 if (!tu_cs_is_empty(cs)) {
341 /* no direct command packet for TU_CS_MODE_SUB_STREAM */
342 assert(cs->mode != TU_CS_MODE_SUB_STREAM);
343
344 tu_cs_add_entry(cs);
345 }
346
347 if (cs->cond_flags) {
348 /* Subtract one here to account for the DWORD field itself. */
349 *cs->cond_dwords = cs->cur - cs->cond_dwords - 1;
350
351 /* space for CP_COND_REG_EXEC in next bo */
352 reserved_size += 3;
353 }
354
355 /* switch to a new BO */
356 uint32_t new_size = MAX2(cs->next_bo_size, reserved_size);
357 VkResult result = tu_cs_add_bo(cs, new_size);
358 if (result != VK_SUCCESS)
359 return result;
360
361 /* if inside a condition, emit a new CP_COND_REG_EXEC */
362 if (cs->cond_flags) {
363 cs->reserved_end = cs->cur + reserved_size;
364
365 tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
366 tu_cs_emit(cs, cs->cond_flags);
367
368 cs->cond_dwords = cs->cur;
369
370 /* Emit dummy DWORD field here */
371 tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(0));
372 }
373
374 /* double the size for the next bo */
375 new_size <<= 1;
376 if (cs->next_bo_size < new_size)
377 cs->next_bo_size = new_size;
378 }
379
380 assert(tu_cs_get_space(cs) >= reserved_size);
381 cs->reserved_end = cs->cur + reserved_size;
382
383 if (cs->mode == TU_CS_MODE_GROW) {
384 /* reserve an entry for the next call to this function or tu_cs_end */
385 return tu_cs_reserve_entry(cs);
386 }
387
388 return VK_SUCCESS;
389 }
390
391 /**
392 * Reset a command stream to its initial state. This discards all comand
393 * packets in \a cs, but does not necessarily release all resources.
394 */
395 void
396 tu_cs_reset(struct tu_cs *cs)
397 {
398 if (cs->mode == TU_CS_MODE_EXTERNAL) {
399 assert(!cs->bo_count && !cs->entry_count);
400 cs->reserved_end = cs->cur = cs->start;
401 return;
402 }
403
404 for (uint32_t i = 0; i + 1 < cs->bo_count; ++i) {
405 tu_bo_finish(cs->device, cs->bos[i]);
406 free(cs->bos[i]);
407 }
408
409 if (cs->bo_count) {
410 cs->bos[0] = cs->bos[cs->bo_count - 1];
411 cs->bo_count = 1;
412
413 cs->start = cs->cur = cs->reserved_end = (uint32_t *) cs->bos[0]->map;
414 cs->end = cs->start + cs->bos[0]->size / sizeof(uint32_t);
415 }
416
417 cs->entry_count = 0;
418 }