turnip: never fail tu_cs_begin/tu_cs_end
[mesa.git] / src / freedreno / vulkan / tu_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include "registers/adreno_pm4.xml.h"
31 #include "registers/adreno_common.xml.h"
32 #include "registers/a6xx.xml.h"
33
34 #include "vk_format.h"
35
36 #include "tu_cs.h"
37
38 void
39 tu_bo_list_init(struct tu_bo_list *list)
40 {
41 list->count = list->capacity = 0;
42 list->bo_infos = NULL;
43 }
44
45 void
46 tu_bo_list_destroy(struct tu_bo_list *list)
47 {
48 free(list->bo_infos);
49 }
50
51 void
52 tu_bo_list_reset(struct tu_bo_list *list)
53 {
54 list->count = 0;
55 }
56
57 /**
58 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
59 */
60 static uint32_t
61 tu_bo_list_add_info(struct tu_bo_list *list,
62 const struct drm_msm_gem_submit_bo *bo_info)
63 {
64 for (uint32_t i = 0; i < list->count; ++i) {
65 if (list->bo_infos[i].handle == bo_info->handle) {
66 assert(list->bo_infos[i].presumed == bo_info->presumed);
67 list->bo_infos[i].flags |= bo_info->flags;
68 return i;
69 }
70 }
71
72 /* grow list->bo_infos if needed */
73 if (list->count == list->capacity) {
74 uint32_t new_capacity = MAX2(2 * list->count, 16);
75 struct drm_msm_gem_submit_bo *new_bo_infos = realloc(
76 list->bo_infos, new_capacity * sizeof(struct drm_msm_gem_submit_bo));
77 if (!new_bo_infos)
78 return TU_BO_LIST_FAILED;
79 list->bo_infos = new_bo_infos;
80 list->capacity = new_capacity;
81 }
82
83 list->bo_infos[list->count] = *bo_info;
84 return list->count++;
85 }
86
87 uint32_t
88 tu_bo_list_add(struct tu_bo_list *list,
89 const struct tu_bo *bo,
90 uint32_t flags)
91 {
92 return tu_bo_list_add_info(list, &(struct drm_msm_gem_submit_bo) {
93 .flags = flags,
94 .handle = bo->gem_handle,
95 .presumed = bo->iova,
96 });
97 }
98
99 VkResult
100 tu_bo_list_merge(struct tu_bo_list *list, const struct tu_bo_list *other)
101 {
102 for (uint32_t i = 0; i < other->count; i++) {
103 if (tu_bo_list_add_info(list, other->bo_infos + i) == TU_BO_LIST_FAILED)
104 return VK_ERROR_OUT_OF_HOST_MEMORY;
105 }
106
107 return VK_SUCCESS;
108 }
109
110 static VkResult
111 tu_tiling_config_update_gmem_layout(struct tu_tiling_config *tiling,
112 const struct tu_device *dev)
113 {
114 const uint32_t gmem_size = dev->physical_device->gmem_size;
115 uint32_t offset = 0;
116
117 for (uint32_t i = 0; i < tiling->buffer_count; i++) {
118 /* 16KB-aligned */
119 offset = align(offset, 0x4000);
120
121 tiling->gmem_offsets[i] = offset;
122 offset += tiling->tile0.extent.width * tiling->tile0.extent.height *
123 tiling->buffer_cpp[i];
124 }
125
126 return offset <= gmem_size ? VK_SUCCESS : VK_ERROR_OUT_OF_DEVICE_MEMORY;
127 }
128
129 static void
130 tu_tiling_config_update_tile_layout(struct tu_tiling_config *tiling,
131 const struct tu_device *dev)
132 {
133 const uint32_t tile_align_w = dev->physical_device->tile_align_w;
134 const uint32_t tile_align_h = dev->physical_device->tile_align_h;
135 const uint32_t max_tile_width = 1024; /* A6xx */
136
137 tiling->tile0.offset = (VkOffset2D) {
138 .x = tiling->render_area.offset.x & ~(tile_align_w - 1),
139 .y = tiling->render_area.offset.y & ~(tile_align_h - 1),
140 };
141
142 const uint32_t ra_width =
143 tiling->render_area.extent.width +
144 (tiling->render_area.offset.x - tiling->tile0.offset.x);
145 const uint32_t ra_height =
146 tiling->render_area.extent.height +
147 (tiling->render_area.offset.y - tiling->tile0.offset.y);
148
149 /* start from 1 tile */
150 tiling->tile_count = (VkExtent2D) {
151 .width = 1,
152 .height = 1,
153 };
154 tiling->tile0.extent = (VkExtent2D) {
155 .width = align(ra_width, tile_align_w),
156 .height = align(ra_height, tile_align_h),
157 };
158
159 /* do not exceed max tile width */
160 while (tiling->tile0.extent.width > max_tile_width) {
161 tiling->tile_count.width++;
162 tiling->tile0.extent.width =
163 align(ra_width / tiling->tile_count.width, tile_align_w);
164 }
165
166 /* do not exceed gmem size */
167 while (tu_tiling_config_update_gmem_layout(tiling, dev) != VK_SUCCESS) {
168 if (tiling->tile0.extent.width > tiling->tile0.extent.height) {
169 tiling->tile_count.width++;
170 tiling->tile0.extent.width =
171 align(ra_width / tiling->tile_count.width, tile_align_w);
172 } else {
173 tiling->tile_count.height++;
174 tiling->tile0.extent.height =
175 align(ra_height / tiling->tile_count.height, tile_align_h);
176 }
177 }
178 }
179
180 static void
181 tu_tiling_config_update_pipe_layout(struct tu_tiling_config *tiling,
182 const struct tu_device *dev)
183 {
184 const uint32_t max_pipe_count = 32; /* A6xx */
185
186 /* start from 1 tile per pipe */
187 tiling->pipe0 = (VkExtent2D) {
188 .width = 1,
189 .height = 1,
190 };
191 tiling->pipe_count = tiling->tile_count;
192
193 /* do not exceed max pipe count vertically */
194 while (tiling->pipe_count.height > max_pipe_count) {
195 tiling->pipe0.height += 2;
196 tiling->pipe_count.height =
197 (tiling->tile_count.height + tiling->pipe0.height - 1) /
198 tiling->pipe0.height;
199 }
200
201 /* do not exceed max pipe count */
202 while (tiling->pipe_count.width * tiling->pipe_count.height >
203 max_pipe_count) {
204 tiling->pipe0.width += 1;
205 tiling->pipe_count.width =
206 (tiling->tile_count.width + tiling->pipe0.width - 1) /
207 tiling->pipe0.width;
208 }
209 }
210
211 static void
212 tu_tiling_config_update_pipes(struct tu_tiling_config *tiling,
213 const struct tu_device *dev)
214 {
215 const uint32_t max_pipe_count = 32; /* A6xx */
216 const uint32_t used_pipe_count =
217 tiling->pipe_count.width * tiling->pipe_count.height;
218 const VkExtent2D last_pipe = {
219 .width = tiling->tile_count.width % tiling->pipe0.width,
220 .height = tiling->tile_count.height % tiling->pipe0.height,
221 };
222
223 assert(used_pipe_count <= max_pipe_count);
224 assert(max_pipe_count <= ARRAY_SIZE(tiling->pipe_config));
225
226 for (uint32_t y = 0; y < tiling->pipe_count.height; y++) {
227 for (uint32_t x = 0; x < tiling->pipe_count.width; x++) {
228 const uint32_t pipe_x = tiling->pipe0.width * x;
229 const uint32_t pipe_y = tiling->pipe0.height * y;
230 const uint32_t pipe_w = (x == tiling->pipe_count.width - 1)
231 ? last_pipe.width
232 : tiling->pipe0.width;
233 const uint32_t pipe_h = (y == tiling->pipe_count.height - 1)
234 ? last_pipe.height
235 : tiling->pipe0.height;
236 const uint32_t n = tiling->pipe_count.width * y + x;
237
238 tiling->pipe_config[n] = A6XX_VSC_PIPE_CONFIG_REG_X(pipe_x) |
239 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe_y) |
240 A6XX_VSC_PIPE_CONFIG_REG_W(pipe_w) |
241 A6XX_VSC_PIPE_CONFIG_REG_H(pipe_h);
242 tiling->pipe_sizes[n] = CP_SET_BIN_DATA5_0_VSC_SIZE(pipe_w * pipe_h);
243 }
244 }
245
246 memset(tiling->pipe_config + used_pipe_count, 0,
247 sizeof(uint32_t) * (max_pipe_count - used_pipe_count));
248 }
249
250 static void
251 tu_tiling_config_update(struct tu_tiling_config *tiling,
252 const struct tu_device *dev,
253 const uint32_t *buffer_cpp,
254 uint32_t buffer_count,
255 const VkRect2D *render_area)
256 {
257 /* see if there is any real change */
258 const bool ra_changed =
259 render_area &&
260 memcmp(&tiling->render_area, render_area, sizeof(*render_area));
261 const bool buf_changed = tiling->buffer_count != buffer_count ||
262 memcmp(tiling->buffer_cpp, buffer_cpp,
263 sizeof(*buffer_cpp) * buffer_count);
264 if (!ra_changed && !buf_changed)
265 return;
266
267 if (ra_changed)
268 tiling->render_area = *render_area;
269
270 if (buf_changed) {
271 memcpy(tiling->buffer_cpp, buffer_cpp,
272 sizeof(*buffer_cpp) * buffer_count);
273 tiling->buffer_count = buffer_count;
274 }
275
276 tu_tiling_config_update_tile_layout(tiling, dev);
277 tu_tiling_config_update_pipe_layout(tiling, dev);
278 tu_tiling_config_update_pipes(tiling, dev);
279 }
280
281 static void
282 tu_tiling_config_get_tile(const struct tu_tiling_config *tiling,
283 const struct tu_device *dev,
284 uint32_t tx,
285 uint32_t ty,
286 struct tu_tile *tile)
287 {
288 /* find the pipe and the slot for tile (tx, ty) */
289 const uint32_t px = tx / tiling->pipe0.width;
290 const uint32_t py = ty / tiling->pipe0.height;
291 const uint32_t sx = tx - tiling->pipe0.width * px;
292 const uint32_t sy = ty - tiling->pipe0.height * py;
293
294 assert(tx < tiling->tile_count.width && ty < tiling->tile_count.height);
295 assert(px < tiling->pipe_count.width && py < tiling->pipe_count.height);
296 assert(sx < tiling->pipe0.width && sy < tiling->pipe0.height);
297
298 /* convert to 1D indices */
299 tile->pipe = tiling->pipe_count.width * py + px;
300 tile->slot = tiling->pipe0.width * sy + sx;
301
302 /* get the blit area for the tile */
303 tile->begin = (VkOffset2D) {
304 .x = tiling->tile0.offset.x + tiling->tile0.extent.width * tx,
305 .y = tiling->tile0.offset.y + tiling->tile0.extent.height * ty,
306 };
307 tile->end.x =
308 (tx == tiling->tile_count.width - 1)
309 ? tiling->render_area.offset.x + tiling->render_area.extent.width
310 : tile->begin.x + tiling->tile0.extent.width;
311 tile->end.y =
312 (ty == tiling->tile_count.height - 1)
313 ? tiling->render_area.offset.y + tiling->render_area.extent.height
314 : tile->begin.y + tiling->tile0.extent.height;
315 }
316
317 static void
318 tu6_emit_marker(struct tu_cmd_buffer *cmd)
319 {
320 tu_cs_emit_write_reg(cmd->cur_cs, cmd->marker_reg, ++cmd->marker_seqno);
321 }
322
323 static void
324 tu6_emit_event_write(struct tu_cmd_buffer *cmd,
325 enum vgt_event_type event,
326 bool need_seqno)
327 {
328 struct tu_cs *cs = cmd->cur_cs;
329
330 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, need_seqno ? 4 : 1);
331 tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(event));
332 if (need_seqno) {
333 tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
334 tu_cs_emit(cs, ++cmd->scratch_seqno);
335 }
336 }
337
338 static void
339 tu6_emit_cache_flush(struct tu_cmd_buffer *cmd)
340 {
341 tu6_emit_event_write(cmd, 0x31, false);
342 }
343
344 static void
345 tu6_init_hw(struct tu_cmd_buffer *cmd)
346 {
347 struct tu_cs *cs = cmd->cur_cs;
348
349 VkResult result = tu_cs_reserve_space(cmd->device, cs, 256);
350 if (result != VK_SUCCESS) {
351 cmd->record_result = result;
352 return;
353 }
354
355 tu6_emit_cache_flush(cmd);
356
357 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UPDATE_CNTL, 0xfffff);
358
359 tu_cs_emit_write_reg(cs, REG_A6XX_RB_CCU_CNTL, 0x7c400004);
360 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E04, 0x00100000);
361 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE04, 0x8);
362 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE00, 0);
363 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE0F, 0x3f);
364 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B605, 0x44);
365 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B600, 0x100000);
366 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE00, 0x80);
367 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE01, 0);
368
369 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9600, 0);
370 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8600, 0x880);
371 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE04, 0);
372 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE03, 0x00000410);
373 tu_cs_emit_write_reg(cs, REG_A6XX_SP_IBO_COUNT, 0);
374 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B182, 0);
375 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BB11, 0);
376 tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_UNKNOWN_0E12, 0x3200000);
377 tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_CLIENT_PF, 4);
378 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E01, 0x0);
379 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AB00, 0x5);
380 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_UNKNOWN_A009, 0x00000001);
381 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8811, 0x00000010);
382 tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x1f);
383
384 tu_cs_emit_write_reg(cs, REG_A6XX_RB_SRGB_CNTL, 0);
385
386 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8101, 0);
387 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8109, 0);
388 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8110, 0);
389
390 tu_cs_emit_write_reg(cs, REG_A6XX_RB_RENDER_CONTROL0, 0x401);
391 tu_cs_emit_write_reg(cs, REG_A6XX_RB_RENDER_CONTROL1, 0);
392 tu_cs_emit_write_reg(cs, REG_A6XX_RB_FS_OUTPUT_CNTL0, 0);
393 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8810, 0);
394 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8818, 0);
395 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8819, 0);
396 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881A, 0);
397 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881B, 0);
398 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881C, 0);
399 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881D, 0);
400 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881E, 0);
401 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_88F0, 0);
402
403 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9101, 0xffff00);
404 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9107, 0);
405
406 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9236, 1);
407 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9300, 0);
408
409 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_SO_OVERRIDE,
410 A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
411
412 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9801, 0);
413 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9806, 0);
414 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9980, 0);
415
416 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9B06, 0);
417 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9B06, 0);
418
419 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A81B, 0);
420
421 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B183, 0);
422
423 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8099, 0);
424 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_809B, 0);
425 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A0, 2);
426 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80AF, 0);
427 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9210, 0);
428 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9211, 0);
429 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9602, 0);
430 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9981, 0x3);
431 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9E72, 0);
432 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9108, 0x3);
433 tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B304, 0);
434 tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B309, 0x000000a2);
435 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8804, 0);
436 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A4, 0);
437 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A5, 0);
438 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A6, 0);
439 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8805, 0);
440 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8806, 0);
441 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8878, 0);
442 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8879, 0);
443 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_CONTROL_5_REG, 0xfc);
444
445 tu6_emit_marker(cmd);
446
447 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_MODE_CNTL, 0x00000000);
448
449 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_UNKNOWN_A008, 0);
450
451 tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x0000001f);
452
453 /* we don't use this yet.. probably best to disable.. */
454 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
455 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
456 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
457 CP_SET_DRAW_STATE__0_GROUP_ID(0));
458 tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
459 tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
460
461 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUFFER_BASE_LO(0), 3);
462 tu_cs_emit(cs, 0x00000000); /* VPC_SO_BUFFER_BASE_LO_0 */
463 tu_cs_emit(cs, 0x00000000); /* VPC_SO_BUFFER_BASE_HI_0 */
464 tu_cs_emit(cs, 0x00000000); /* VPC_SO_BUFFER_SIZE_0 */
465
466 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_FLUSH_BASE_LO(0), 2);
467 tu_cs_emit(cs, 0x00000000); /* VPC_SO_FLUSH_BASE_LO_0 */
468 tu_cs_emit(cs, 0x00000000); /* VPC_SO_FLUSH_BASE_HI_0 */
469
470 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUF_CNTL, 1);
471 tu_cs_emit(cs, 0x00000000); /* VPC_SO_BUF_CNTL */
472
473 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUFFER_OFFSET(0), 1);
474 tu_cs_emit(cs, 0x00000000); /* UNKNOWN_E2AB */
475
476 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUFFER_BASE_LO(1), 3);
477 tu_cs_emit(cs, 0x00000000);
478 tu_cs_emit(cs, 0x00000000);
479 tu_cs_emit(cs, 0x00000000);
480
481 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUFFER_OFFSET(1), 6);
482 tu_cs_emit(cs, 0x00000000);
483 tu_cs_emit(cs, 0x00000000);
484 tu_cs_emit(cs, 0x00000000);
485 tu_cs_emit(cs, 0x00000000);
486 tu_cs_emit(cs, 0x00000000);
487 tu_cs_emit(cs, 0x00000000);
488
489 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUFFER_OFFSET(2), 6);
490 tu_cs_emit(cs, 0x00000000);
491 tu_cs_emit(cs, 0x00000000);
492 tu_cs_emit(cs, 0x00000000);
493 tu_cs_emit(cs, 0x00000000);
494 tu_cs_emit(cs, 0x00000000);
495 tu_cs_emit(cs, 0x00000000);
496
497 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUFFER_OFFSET(3), 3);
498 tu_cs_emit(cs, 0x00000000);
499 tu_cs_emit(cs, 0x00000000);
500 tu_cs_emit(cs, 0x00000000);
501
502 tu_cs_emit_pkt4(cs, REG_A6XX_SP_HS_CTRL_REG0, 1);
503 tu_cs_emit(cs, 0x00000000);
504
505 tu_cs_emit_pkt4(cs, REG_A6XX_SP_GS_CTRL_REG0, 1);
506 tu_cs_emit(cs, 0x00000000);
507
508 tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_LRZ_CNTL, 1);
509 tu_cs_emit(cs, 0x00000000);
510
511 tu_cs_emit_pkt4(cs, REG_A6XX_RB_LRZ_CNTL, 1);
512 tu_cs_emit(cs, 0x00000000);
513
514 tu_cs_reserve_space_assert(cs);
515 }
516
517 static void
518 tu_cmd_render_tiles(struct tu_cmd_buffer *cmd)
519 {
520 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
521
522 for (uint32_t y = 0; y < tiling->tile_count.height; y++) {
523 for (uint32_t x = 0; x < tiling->tile_count.width; x++) {
524 struct tu_tile tile;
525 tu_tiling_config_get_tile(tiling, cmd->device, x, y, &tile);
526 /* TODO */
527 }
528 }
529 }
530
531 static void
532 tu_cmd_update_tiling_config(struct tu_cmd_buffer *cmd,
533 const VkRect2D *render_area)
534 {
535 const struct tu_device *dev = cmd->device;
536 const struct tu_render_pass *pass = cmd->state.pass;
537 const struct tu_subpass *subpass = cmd->state.subpass;
538 struct tu_tiling_config *tiling = &cmd->state.tiling_config;
539
540 uint32_t buffer_cpp[MAX_RTS + 2];
541 uint32_t buffer_count = 0;
542
543 for (uint32_t i = 0; i < subpass->color_count; ++i) {
544 const uint32_t a = subpass->color_attachments[i].attachment;
545 if (a == VK_ATTACHMENT_UNUSED)
546 continue;
547
548 const struct tu_render_pass_attachment *att = &pass->attachments[a];
549 buffer_cpp[buffer_count++] =
550 vk_format_get_blocksize(att->format) * att->samples;
551 }
552
553 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
554 const uint32_t a = subpass->depth_stencil_attachment.attachment;
555 const struct tu_render_pass_attachment *att = &pass->attachments[a];
556
557 /* TODO */
558 assert(att->format != VK_FORMAT_D32_SFLOAT_S8_UINT);
559
560 buffer_cpp[buffer_count++] =
561 vk_format_get_blocksize(att->format) * att->samples;
562 }
563
564 tu_tiling_config_update(tiling, dev, buffer_cpp, buffer_count,
565 render_area);
566 }
567
568 const struct tu_dynamic_state default_dynamic_state = {
569 .viewport =
570 {
571 .count = 0,
572 },
573 .scissor =
574 {
575 .count = 0,
576 },
577 .line_width = 1.0f,
578 .depth_bias =
579 {
580 .bias = 0.0f,
581 .clamp = 0.0f,
582 .slope = 0.0f,
583 },
584 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
585 .depth_bounds =
586 {
587 .min = 0.0f,
588 .max = 1.0f,
589 },
590 .stencil_compare_mask =
591 {
592 .front = ~0u,
593 .back = ~0u,
594 },
595 .stencil_write_mask =
596 {
597 .front = ~0u,
598 .back = ~0u,
599 },
600 .stencil_reference =
601 {
602 .front = 0u,
603 .back = 0u,
604 },
605 };
606
607 static void UNUSED /* FINISHME */
608 tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer,
609 const struct tu_dynamic_state *src)
610 {
611 struct tu_dynamic_state *dest = &cmd_buffer->state.dynamic;
612 uint32_t copy_mask = src->mask;
613 uint32_t dest_mask = 0;
614
615 tu_use_args(cmd_buffer); /* FINISHME */
616
617 /* Make sure to copy the number of viewports/scissors because they can
618 * only be specified at pipeline creation time.
619 */
620 dest->viewport.count = src->viewport.count;
621 dest->scissor.count = src->scissor.count;
622 dest->discard_rectangle.count = src->discard_rectangle.count;
623
624 if (copy_mask & TU_DYNAMIC_VIEWPORT) {
625 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
626 src->viewport.count * sizeof(VkViewport))) {
627 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
628 src->viewport.count);
629 dest_mask |= TU_DYNAMIC_VIEWPORT;
630 }
631 }
632
633 if (copy_mask & TU_DYNAMIC_SCISSOR) {
634 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
635 src->scissor.count * sizeof(VkRect2D))) {
636 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
637 src->scissor.count);
638 dest_mask |= TU_DYNAMIC_SCISSOR;
639 }
640 }
641
642 if (copy_mask & TU_DYNAMIC_LINE_WIDTH) {
643 if (dest->line_width != src->line_width) {
644 dest->line_width = src->line_width;
645 dest_mask |= TU_DYNAMIC_LINE_WIDTH;
646 }
647 }
648
649 if (copy_mask & TU_DYNAMIC_DEPTH_BIAS) {
650 if (memcmp(&dest->depth_bias, &src->depth_bias,
651 sizeof(src->depth_bias))) {
652 dest->depth_bias = src->depth_bias;
653 dest_mask |= TU_DYNAMIC_DEPTH_BIAS;
654 }
655 }
656
657 if (copy_mask & TU_DYNAMIC_BLEND_CONSTANTS) {
658 if (memcmp(&dest->blend_constants, &src->blend_constants,
659 sizeof(src->blend_constants))) {
660 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
661 dest_mask |= TU_DYNAMIC_BLEND_CONSTANTS;
662 }
663 }
664
665 if (copy_mask & TU_DYNAMIC_DEPTH_BOUNDS) {
666 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
667 sizeof(src->depth_bounds))) {
668 dest->depth_bounds = src->depth_bounds;
669 dest_mask |= TU_DYNAMIC_DEPTH_BOUNDS;
670 }
671 }
672
673 if (copy_mask & TU_DYNAMIC_STENCIL_COMPARE_MASK) {
674 if (memcmp(&dest->stencil_compare_mask, &src->stencil_compare_mask,
675 sizeof(src->stencil_compare_mask))) {
676 dest->stencil_compare_mask = src->stencil_compare_mask;
677 dest_mask |= TU_DYNAMIC_STENCIL_COMPARE_MASK;
678 }
679 }
680
681 if (copy_mask & TU_DYNAMIC_STENCIL_WRITE_MASK) {
682 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
683 sizeof(src->stencil_write_mask))) {
684 dest->stencil_write_mask = src->stencil_write_mask;
685 dest_mask |= TU_DYNAMIC_STENCIL_WRITE_MASK;
686 }
687 }
688
689 if (copy_mask & TU_DYNAMIC_STENCIL_REFERENCE) {
690 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
691 sizeof(src->stencil_reference))) {
692 dest->stencil_reference = src->stencil_reference;
693 dest_mask |= TU_DYNAMIC_STENCIL_REFERENCE;
694 }
695 }
696
697 if (copy_mask & TU_DYNAMIC_DISCARD_RECTANGLE) {
698 if (memcmp(&dest->discard_rectangle.rectangles,
699 &src->discard_rectangle.rectangles,
700 src->discard_rectangle.count * sizeof(VkRect2D))) {
701 typed_memcpy(dest->discard_rectangle.rectangles,
702 src->discard_rectangle.rectangles,
703 src->discard_rectangle.count);
704 dest_mask |= TU_DYNAMIC_DISCARD_RECTANGLE;
705 }
706 }
707 }
708
709 static VkResult
710 tu_create_cmd_buffer(struct tu_device *device,
711 struct tu_cmd_pool *pool,
712 VkCommandBufferLevel level,
713 VkCommandBuffer *pCommandBuffer)
714 {
715 struct tu_cmd_buffer *cmd_buffer;
716 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
717 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
718 if (cmd_buffer == NULL)
719 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
720
721 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
722 cmd_buffer->device = device;
723 cmd_buffer->pool = pool;
724 cmd_buffer->level = level;
725
726 if (pool) {
727 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
728 cmd_buffer->queue_family_index = pool->queue_family_index;
729
730 } else {
731 /* Init the pool_link so we can safely call list_del when we destroy
732 * the command buffer
733 */
734 list_inithead(&cmd_buffer->pool_link);
735 cmd_buffer->queue_family_index = TU_QUEUE_GENERAL;
736 }
737
738 tu_bo_list_init(&cmd_buffer->bo_list);
739 tu_cs_init(&cmd_buffer->cs, 4096);
740
741 *pCommandBuffer = tu_cmd_buffer_to_handle(cmd_buffer);
742
743 list_inithead(&cmd_buffer->upload.list);
744
745 cmd_buffer->marker_reg = REG_A6XX_CP_SCRATCH_REG(
746 cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY ? 7 : 6);
747
748 VkResult result = tu_bo_init_new(device, &cmd_buffer->scratch_bo, 0x1000);
749 if (result != VK_SUCCESS)
750 return result;
751
752 return VK_SUCCESS;
753 }
754
755 static void
756 tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
757 {
758 tu_bo_finish(cmd_buffer->device, &cmd_buffer->scratch_bo);
759
760 list_del(&cmd_buffer->pool_link);
761
762 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
763 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
764
765 tu_cs_finish(cmd_buffer->device, &cmd_buffer->cs);
766 tu_bo_list_destroy(&cmd_buffer->bo_list);
767 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
768 }
769
770 static VkResult
771 tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
772 {
773 cmd_buffer->record_result = VK_SUCCESS;
774
775 tu_bo_list_reset(&cmd_buffer->bo_list);
776 tu_cs_reset(cmd_buffer->device, &cmd_buffer->cs);
777
778 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
779 cmd_buffer->descriptors[i].dirty = 0;
780 cmd_buffer->descriptors[i].valid = 0;
781 cmd_buffer->descriptors[i].push_dirty = false;
782 }
783
784 cmd_buffer->status = TU_CMD_BUFFER_STATUS_INITIAL;
785
786 return cmd_buffer->record_result;
787 }
788
789 static VkResult
790 tu_cmd_state_setup_attachments(struct tu_cmd_buffer *cmd_buffer,
791 struct tu_render_pass *pass,
792 const VkRenderPassBeginInfo *info)
793 {
794 struct tu_cmd_state *state = &cmd_buffer->state;
795
796 if (pass->attachment_count == 0) {
797 state->attachments = NULL;
798 return VK_SUCCESS;
799 }
800
801 state->attachments =
802 vk_alloc(&cmd_buffer->pool->alloc,
803 pass->attachment_count * sizeof(state->attachments[0]), 8,
804 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
805 if (state->attachments == NULL) {
806 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
807 return cmd_buffer->record_result;
808 }
809
810 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
811 const struct tu_render_pass_attachment *att = &pass->attachments[i];
812 VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
813 VkImageAspectFlags clear_aspects = 0;
814
815 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
816 /* color attachment */
817 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
818 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
819 }
820 } else {
821 /* depthstencil attachment */
822 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
823 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
824 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
825 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
826 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE)
827 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
828 }
829 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
830 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
831 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
832 }
833 }
834
835 state->attachments[i].pending_clear_aspects = clear_aspects;
836 state->attachments[i].cleared_views = 0;
837 if (clear_aspects && info) {
838 assert(info->clearValueCount > i);
839 state->attachments[i].clear_value = info->pClearValues[i];
840 }
841
842 state->attachments[i].current_layout = att->initial_layout;
843 }
844
845 return VK_SUCCESS;
846 }
847
848 VkResult
849 tu_AllocateCommandBuffers(VkDevice _device,
850 const VkCommandBufferAllocateInfo *pAllocateInfo,
851 VkCommandBuffer *pCommandBuffers)
852 {
853 TU_FROM_HANDLE(tu_device, device, _device);
854 TU_FROM_HANDLE(tu_cmd_pool, pool, pAllocateInfo->commandPool);
855
856 VkResult result = VK_SUCCESS;
857 uint32_t i;
858
859 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
860
861 if (!list_empty(&pool->free_cmd_buffers)) {
862 struct tu_cmd_buffer *cmd_buffer = list_first_entry(
863 &pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link);
864
865 list_del(&cmd_buffer->pool_link);
866 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
867
868 result = tu_reset_cmd_buffer(cmd_buffer);
869 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
870 cmd_buffer->level = pAllocateInfo->level;
871
872 pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
873 } else {
874 result = tu_create_cmd_buffer(device, pool, pAllocateInfo->level,
875 &pCommandBuffers[i]);
876 }
877 if (result != VK_SUCCESS)
878 break;
879 }
880
881 if (result != VK_SUCCESS) {
882 tu_FreeCommandBuffers(_device, pAllocateInfo->commandPool, i,
883 pCommandBuffers);
884
885 /* From the Vulkan 1.0.66 spec:
886 *
887 * "vkAllocateCommandBuffers can be used to create multiple
888 * command buffers. If the creation of any of those command
889 * buffers fails, the implementation must destroy all
890 * successfully created command buffer objects from this
891 * command, set all entries of the pCommandBuffers array to
892 * NULL and return the error."
893 */
894 memset(pCommandBuffers, 0,
895 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
896 }
897
898 return result;
899 }
900
901 void
902 tu_FreeCommandBuffers(VkDevice device,
903 VkCommandPool commandPool,
904 uint32_t commandBufferCount,
905 const VkCommandBuffer *pCommandBuffers)
906 {
907 for (uint32_t i = 0; i < commandBufferCount; i++) {
908 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
909
910 if (cmd_buffer) {
911 if (cmd_buffer->pool) {
912 list_del(&cmd_buffer->pool_link);
913 list_addtail(&cmd_buffer->pool_link,
914 &cmd_buffer->pool->free_cmd_buffers);
915 } else
916 tu_cmd_buffer_destroy(cmd_buffer);
917 }
918 }
919 }
920
921 VkResult
922 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer,
923 VkCommandBufferResetFlags flags)
924 {
925 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
926 return tu_reset_cmd_buffer(cmd_buffer);
927 }
928
929 VkResult
930 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
931 const VkCommandBufferBeginInfo *pBeginInfo)
932 {
933 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
934 VkResult result = VK_SUCCESS;
935
936 if (cmd_buffer->status != TU_CMD_BUFFER_STATUS_INITIAL) {
937 /* If the command buffer has already been resetted with
938 * vkResetCommandBuffer, no need to do it again.
939 */
940 result = tu_reset_cmd_buffer(cmd_buffer);
941 if (result != VK_SUCCESS)
942 return result;
943 }
944
945 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
946 cmd_buffer->usage_flags = pBeginInfo->flags;
947
948 tu_cs_begin(&cmd_buffer->cs);
949
950 cmd_buffer->marker_seqno = 0;
951 cmd_buffer->scratch_seqno = 0;
952
953 cmd_buffer->cur_cs = &cmd_buffer->cs;
954
955 /* setup initial configuration into command buffer */
956 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
957 switch (cmd_buffer->queue_family_index) {
958 case TU_QUEUE_GENERAL:
959 tu6_init_hw(cmd_buffer);
960 break;
961 default:
962 break;
963 }
964 }
965
966 cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
967
968 return VK_SUCCESS;
969 }
970
971 void
972 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
973 uint32_t firstBinding,
974 uint32_t bindingCount,
975 const VkBuffer *pBuffers,
976 const VkDeviceSize *pOffsets)
977 {
978 }
979
980 void
981 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,
982 VkBuffer buffer,
983 VkDeviceSize offset,
984 VkIndexType indexType)
985 {
986 }
987
988 void
989 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
990 VkPipelineBindPoint pipelineBindPoint,
991 VkPipelineLayout _layout,
992 uint32_t firstSet,
993 uint32_t descriptorSetCount,
994 const VkDescriptorSet *pDescriptorSets,
995 uint32_t dynamicOffsetCount,
996 const uint32_t *pDynamicOffsets)
997 {
998 }
999
1000 void
1001 tu_CmdPushConstants(VkCommandBuffer commandBuffer,
1002 VkPipelineLayout layout,
1003 VkShaderStageFlags stageFlags,
1004 uint32_t offset,
1005 uint32_t size,
1006 const void *pValues)
1007 {
1008 }
1009
1010 VkResult
1011 tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
1012 {
1013 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1014
1015 if (cmd_buffer->scratch_seqno) {
1016 tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->scratch_bo,
1017 MSM_SUBMIT_BO_WRITE);
1018 }
1019
1020 tu_cs_end(&cmd_buffer->cs);
1021
1022 assert(!cmd_buffer->state.attachments);
1023
1024 cmd_buffer->status = TU_CMD_BUFFER_STATUS_EXECUTABLE;
1025
1026 return cmd_buffer->record_result;
1027 }
1028
1029 void
1030 tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
1031 VkPipelineBindPoint pipelineBindPoint,
1032 VkPipeline _pipeline)
1033 {
1034 }
1035
1036 void
1037 tu_CmdSetViewport(VkCommandBuffer commandBuffer,
1038 uint32_t firstViewport,
1039 uint32_t viewportCount,
1040 const VkViewport *pViewports)
1041 {
1042 }
1043
1044 void
1045 tu_CmdSetScissor(VkCommandBuffer commandBuffer,
1046 uint32_t firstScissor,
1047 uint32_t scissorCount,
1048 const VkRect2D *pScissors)
1049 {
1050 }
1051
1052 void
1053 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
1054 {
1055 }
1056
1057 void
1058 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer,
1059 float depthBiasConstantFactor,
1060 float depthBiasClamp,
1061 float depthBiasSlopeFactor)
1062 {
1063 }
1064
1065 void
1066 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
1067 const float blendConstants[4])
1068 {
1069 }
1070
1071 void
1072 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
1073 float minDepthBounds,
1074 float maxDepthBounds)
1075 {
1076 }
1077
1078 void
1079 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
1080 VkStencilFaceFlags faceMask,
1081 uint32_t compareMask)
1082 {
1083 }
1084
1085 void
1086 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
1087 VkStencilFaceFlags faceMask,
1088 uint32_t writeMask)
1089 {
1090 }
1091
1092 void
1093 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer,
1094 VkStencilFaceFlags faceMask,
1095 uint32_t reference)
1096 {
1097 }
1098
1099 void
1100 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
1101 uint32_t commandBufferCount,
1102 const VkCommandBuffer *pCmdBuffers)
1103 {
1104 }
1105
1106 VkResult
1107 tu_CreateCommandPool(VkDevice _device,
1108 const VkCommandPoolCreateInfo *pCreateInfo,
1109 const VkAllocationCallbacks *pAllocator,
1110 VkCommandPool *pCmdPool)
1111 {
1112 TU_FROM_HANDLE(tu_device, device, _device);
1113 struct tu_cmd_pool *pool;
1114
1115 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
1116 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1117 if (pool == NULL)
1118 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1119
1120 if (pAllocator)
1121 pool->alloc = *pAllocator;
1122 else
1123 pool->alloc = device->alloc;
1124
1125 list_inithead(&pool->cmd_buffers);
1126 list_inithead(&pool->free_cmd_buffers);
1127
1128 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
1129
1130 *pCmdPool = tu_cmd_pool_to_handle(pool);
1131
1132 return VK_SUCCESS;
1133 }
1134
1135 void
1136 tu_DestroyCommandPool(VkDevice _device,
1137 VkCommandPool commandPool,
1138 const VkAllocationCallbacks *pAllocator)
1139 {
1140 TU_FROM_HANDLE(tu_device, device, _device);
1141 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
1142
1143 if (!pool)
1144 return;
1145
1146 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
1147 &pool->cmd_buffers, pool_link)
1148 {
1149 tu_cmd_buffer_destroy(cmd_buffer);
1150 }
1151
1152 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
1153 &pool->free_cmd_buffers, pool_link)
1154 {
1155 tu_cmd_buffer_destroy(cmd_buffer);
1156 }
1157
1158 vk_free2(&device->alloc, pAllocator, pool);
1159 }
1160
1161 VkResult
1162 tu_ResetCommandPool(VkDevice device,
1163 VkCommandPool commandPool,
1164 VkCommandPoolResetFlags flags)
1165 {
1166 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
1167 VkResult result;
1168
1169 list_for_each_entry(struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers,
1170 pool_link)
1171 {
1172 result = tu_reset_cmd_buffer(cmd_buffer);
1173 if (result != VK_SUCCESS)
1174 return result;
1175 }
1176
1177 return VK_SUCCESS;
1178 }
1179
1180 void
1181 tu_TrimCommandPool(VkDevice device,
1182 VkCommandPool commandPool,
1183 VkCommandPoolTrimFlagsKHR flags)
1184 {
1185 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
1186
1187 if (!pool)
1188 return;
1189
1190 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
1191 &pool->free_cmd_buffers, pool_link)
1192 {
1193 tu_cmd_buffer_destroy(cmd_buffer);
1194 }
1195 }
1196
1197 void
1198 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
1199 const VkRenderPassBeginInfo *pRenderPassBegin,
1200 VkSubpassContents contents)
1201 {
1202 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1203 TU_FROM_HANDLE(tu_render_pass, pass, pRenderPassBegin->renderPass);
1204 TU_FROM_HANDLE(tu_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
1205 VkResult result;
1206
1207 cmd_buffer->state.pass = pass;
1208 cmd_buffer->state.subpass = pass->subpasses;
1209 cmd_buffer->state.framebuffer = framebuffer;
1210
1211 result =
1212 tu_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin);
1213 if (result != VK_SUCCESS)
1214 return;
1215
1216 tu_cmd_update_tiling_config(cmd_buffer, &pRenderPassBegin->renderArea);
1217 }
1218
1219 void
1220 tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
1221 const VkRenderPassBeginInfo *pRenderPassBeginInfo,
1222 const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
1223 {
1224 tu_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
1225 pSubpassBeginInfo->contents);
1226 }
1227
1228 void
1229 tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
1230 {
1231 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1232
1233 tu_cmd_render_tiles(cmd);
1234
1235 cmd->state.subpass++;
1236 tu_cmd_update_tiling_config(cmd, NULL);
1237 }
1238
1239 void
1240 tu_CmdNextSubpass2KHR(VkCommandBuffer commandBuffer,
1241 const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
1242 const VkSubpassEndInfoKHR *pSubpassEndInfo)
1243 {
1244 tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
1245 }
1246
1247 struct tu_draw_info
1248 {
1249 /**
1250 * Number of vertices.
1251 */
1252 uint32_t count;
1253
1254 /**
1255 * Index of the first vertex.
1256 */
1257 int32_t vertex_offset;
1258
1259 /**
1260 * First instance id.
1261 */
1262 uint32_t first_instance;
1263
1264 /**
1265 * Number of instances.
1266 */
1267 uint32_t instance_count;
1268
1269 /**
1270 * First index (indexed draws only).
1271 */
1272 uint32_t first_index;
1273
1274 /**
1275 * Whether it's an indexed draw.
1276 */
1277 bool indexed;
1278
1279 /**
1280 * Indirect draw parameters resource.
1281 */
1282 struct tu_buffer *indirect;
1283 uint64_t indirect_offset;
1284 uint32_t stride;
1285
1286 /**
1287 * Draw count parameters resource.
1288 */
1289 struct tu_buffer *count_buffer;
1290 uint64_t count_buffer_offset;
1291 };
1292
1293 static void
1294 tu_draw(struct tu_cmd_buffer *cmd_buffer, const struct tu_draw_info *info)
1295 {
1296 }
1297
1298 void
1299 tu_CmdDraw(VkCommandBuffer commandBuffer,
1300 uint32_t vertexCount,
1301 uint32_t instanceCount,
1302 uint32_t firstVertex,
1303 uint32_t firstInstance)
1304 {
1305 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1306 struct tu_draw_info info = {};
1307
1308 info.count = vertexCount;
1309 info.instance_count = instanceCount;
1310 info.first_instance = firstInstance;
1311 info.vertex_offset = firstVertex;
1312
1313 tu_draw(cmd_buffer, &info);
1314 }
1315
1316 void
1317 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,
1318 uint32_t indexCount,
1319 uint32_t instanceCount,
1320 uint32_t firstIndex,
1321 int32_t vertexOffset,
1322 uint32_t firstInstance)
1323 {
1324 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1325 struct tu_draw_info info = {};
1326
1327 info.indexed = true;
1328 info.count = indexCount;
1329 info.instance_count = instanceCount;
1330 info.first_index = firstIndex;
1331 info.vertex_offset = vertexOffset;
1332 info.first_instance = firstInstance;
1333
1334 tu_draw(cmd_buffer, &info);
1335 }
1336
1337 void
1338 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
1339 VkBuffer _buffer,
1340 VkDeviceSize offset,
1341 uint32_t drawCount,
1342 uint32_t stride)
1343 {
1344 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1345 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1346 struct tu_draw_info info = {};
1347
1348 info.count = drawCount;
1349 info.indirect = buffer;
1350 info.indirect_offset = offset;
1351 info.stride = stride;
1352
1353 tu_draw(cmd_buffer, &info);
1354 }
1355
1356 void
1357 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
1358 VkBuffer _buffer,
1359 VkDeviceSize offset,
1360 uint32_t drawCount,
1361 uint32_t stride)
1362 {
1363 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1364 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1365 struct tu_draw_info info = {};
1366
1367 info.indexed = true;
1368 info.count = drawCount;
1369 info.indirect = buffer;
1370 info.indirect_offset = offset;
1371 info.stride = stride;
1372
1373 tu_draw(cmd_buffer, &info);
1374 }
1375
1376 struct tu_dispatch_info
1377 {
1378 /**
1379 * Determine the layout of the grid (in block units) to be used.
1380 */
1381 uint32_t blocks[3];
1382
1383 /**
1384 * A starting offset for the grid. If unaligned is set, the offset
1385 * must still be aligned.
1386 */
1387 uint32_t offsets[3];
1388 /**
1389 * Whether it's an unaligned compute dispatch.
1390 */
1391 bool unaligned;
1392
1393 /**
1394 * Indirect compute parameters resource.
1395 */
1396 struct tu_buffer *indirect;
1397 uint64_t indirect_offset;
1398 };
1399
1400 static void
1401 tu_dispatch(struct tu_cmd_buffer *cmd_buffer,
1402 const struct tu_dispatch_info *info)
1403 {
1404 }
1405
1406 void
1407 tu_CmdDispatchBase(VkCommandBuffer commandBuffer,
1408 uint32_t base_x,
1409 uint32_t base_y,
1410 uint32_t base_z,
1411 uint32_t x,
1412 uint32_t y,
1413 uint32_t z)
1414 {
1415 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1416 struct tu_dispatch_info info = {};
1417
1418 info.blocks[0] = x;
1419 info.blocks[1] = y;
1420 info.blocks[2] = z;
1421
1422 info.offsets[0] = base_x;
1423 info.offsets[1] = base_y;
1424 info.offsets[2] = base_z;
1425 tu_dispatch(cmd_buffer, &info);
1426 }
1427
1428 void
1429 tu_CmdDispatch(VkCommandBuffer commandBuffer,
1430 uint32_t x,
1431 uint32_t y,
1432 uint32_t z)
1433 {
1434 tu_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
1435 }
1436
1437 void
1438 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
1439 VkBuffer _buffer,
1440 VkDeviceSize offset)
1441 {
1442 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1443 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1444 struct tu_dispatch_info info = {};
1445
1446 info.indirect = buffer;
1447 info.indirect_offset = offset;
1448
1449 tu_dispatch(cmd_buffer, &info);
1450 }
1451
1452 void
1453 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer)
1454 {
1455 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1456
1457 tu_cmd_render_tiles(cmd_buffer);
1458
1459 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
1460 cmd_buffer->state.attachments = NULL;
1461
1462 cmd_buffer->state.pass = NULL;
1463 cmd_buffer->state.subpass = NULL;
1464 cmd_buffer->state.framebuffer = NULL;
1465 }
1466
1467 void
1468 tu_CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
1469 const VkSubpassEndInfoKHR *pSubpassEndInfo)
1470 {
1471 tu_CmdEndRenderPass(commandBuffer);
1472 }
1473
1474 struct tu_barrier_info
1475 {
1476 uint32_t eventCount;
1477 const VkEvent *pEvents;
1478 VkPipelineStageFlags srcStageMask;
1479 };
1480
1481 static void
1482 tu_barrier(struct tu_cmd_buffer *cmd_buffer,
1483 uint32_t memoryBarrierCount,
1484 const VkMemoryBarrier *pMemoryBarriers,
1485 uint32_t bufferMemoryBarrierCount,
1486 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1487 uint32_t imageMemoryBarrierCount,
1488 const VkImageMemoryBarrier *pImageMemoryBarriers,
1489 const struct tu_barrier_info *info)
1490 {
1491 }
1492
1493 void
1494 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
1495 VkPipelineStageFlags srcStageMask,
1496 VkPipelineStageFlags destStageMask,
1497 VkBool32 byRegion,
1498 uint32_t memoryBarrierCount,
1499 const VkMemoryBarrier *pMemoryBarriers,
1500 uint32_t bufferMemoryBarrierCount,
1501 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1502 uint32_t imageMemoryBarrierCount,
1503 const VkImageMemoryBarrier *pImageMemoryBarriers)
1504 {
1505 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1506 struct tu_barrier_info info;
1507
1508 info.eventCount = 0;
1509 info.pEvents = NULL;
1510 info.srcStageMask = srcStageMask;
1511
1512 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
1513 bufferMemoryBarrierCount, pBufferMemoryBarriers,
1514 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
1515 }
1516
1517 static void
1518 write_event(struct tu_cmd_buffer *cmd_buffer,
1519 struct tu_event *event,
1520 VkPipelineStageFlags stageMask,
1521 unsigned value)
1522 {
1523 }
1524
1525 void
1526 tu_CmdSetEvent(VkCommandBuffer commandBuffer,
1527 VkEvent _event,
1528 VkPipelineStageFlags stageMask)
1529 {
1530 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1531 TU_FROM_HANDLE(tu_event, event, _event);
1532
1533 write_event(cmd_buffer, event, stageMask, 1);
1534 }
1535
1536 void
1537 tu_CmdResetEvent(VkCommandBuffer commandBuffer,
1538 VkEvent _event,
1539 VkPipelineStageFlags stageMask)
1540 {
1541 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1542 TU_FROM_HANDLE(tu_event, event, _event);
1543
1544 write_event(cmd_buffer, event, stageMask, 0);
1545 }
1546
1547 void
1548 tu_CmdWaitEvents(VkCommandBuffer commandBuffer,
1549 uint32_t eventCount,
1550 const VkEvent *pEvents,
1551 VkPipelineStageFlags srcStageMask,
1552 VkPipelineStageFlags dstStageMask,
1553 uint32_t memoryBarrierCount,
1554 const VkMemoryBarrier *pMemoryBarriers,
1555 uint32_t bufferMemoryBarrierCount,
1556 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1557 uint32_t imageMemoryBarrierCount,
1558 const VkImageMemoryBarrier *pImageMemoryBarriers)
1559 {
1560 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1561 struct tu_barrier_info info;
1562
1563 info.eventCount = eventCount;
1564 info.pEvents = pEvents;
1565 info.srcStageMask = 0;
1566
1567 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
1568 bufferMemoryBarrierCount, pBufferMemoryBarriers,
1569 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
1570 }
1571
1572 void
1573 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
1574 {
1575 /* No-op */
1576 }