turnip: specify initial size in tu_cs_init
[mesa.git] / src / freedreno / vulkan / tu_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include "registers/adreno_pm4.xml.h"
31 #include "registers/adreno_common.xml.h"
32 #include "registers/a6xx.xml.h"
33
34 #include "vk_format.h"
35
36 #include "tu_cs.h"
37
38 void
39 tu_bo_list_init(struct tu_bo_list *list)
40 {
41 list->count = list->capacity = 0;
42 list->bo_infos = NULL;
43 }
44
45 void
46 tu_bo_list_destroy(struct tu_bo_list *list)
47 {
48 free(list->bo_infos);
49 }
50
51 void
52 tu_bo_list_reset(struct tu_bo_list *list)
53 {
54 list->count = 0;
55 }
56
57 /**
58 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
59 */
60 static uint32_t
61 tu_bo_list_add_info(struct tu_bo_list *list,
62 const struct drm_msm_gem_submit_bo *bo_info)
63 {
64 for (uint32_t i = 0; i < list->count; ++i) {
65 if (list->bo_infos[i].handle == bo_info->handle) {
66 assert(list->bo_infos[i].presumed == bo_info->presumed);
67 list->bo_infos[i].flags |= bo_info->flags;
68 return i;
69 }
70 }
71
72 /* grow list->bo_infos if needed */
73 if (list->count == list->capacity) {
74 uint32_t new_capacity = MAX2(2 * list->count, 16);
75 struct drm_msm_gem_submit_bo *new_bo_infos = realloc(
76 list->bo_infos, new_capacity * sizeof(struct drm_msm_gem_submit_bo));
77 if (!new_bo_infos)
78 return TU_BO_LIST_FAILED;
79 list->bo_infos = new_bo_infos;
80 list->capacity = new_capacity;
81 }
82
83 list->bo_infos[list->count] = *bo_info;
84 return list->count++;
85 }
86
87 uint32_t
88 tu_bo_list_add(struct tu_bo_list *list,
89 const struct tu_bo *bo,
90 uint32_t flags)
91 {
92 return tu_bo_list_add_info(list, &(struct drm_msm_gem_submit_bo) {
93 .flags = flags,
94 .handle = bo->gem_handle,
95 .presumed = bo->iova,
96 });
97 }
98
99 VkResult
100 tu_bo_list_merge(struct tu_bo_list *list, const struct tu_bo_list *other)
101 {
102 for (uint32_t i = 0; i < other->count; i++) {
103 if (tu_bo_list_add_info(list, other->bo_infos + i) == TU_BO_LIST_FAILED)
104 return VK_ERROR_OUT_OF_HOST_MEMORY;
105 }
106
107 return VK_SUCCESS;
108 }
109
110 static VkResult
111 tu_tiling_config_update_gmem_layout(struct tu_tiling_config *tiling,
112 const struct tu_device *dev)
113 {
114 const uint32_t gmem_size = dev->physical_device->gmem_size;
115 uint32_t offset = 0;
116
117 for (uint32_t i = 0; i < tiling->buffer_count; i++) {
118 /* 16KB-aligned */
119 offset = align(offset, 0x4000);
120
121 tiling->gmem_offsets[i] = offset;
122 offset += tiling->tile0.extent.width * tiling->tile0.extent.height *
123 tiling->buffer_cpp[i];
124 }
125
126 return offset <= gmem_size ? VK_SUCCESS : VK_ERROR_OUT_OF_DEVICE_MEMORY;
127 }
128
129 static void
130 tu_tiling_config_update_tile_layout(struct tu_tiling_config *tiling,
131 const struct tu_device *dev)
132 {
133 const uint32_t tile_align_w = dev->physical_device->tile_align_w;
134 const uint32_t tile_align_h = dev->physical_device->tile_align_h;
135 const uint32_t max_tile_width = 1024; /* A6xx */
136
137 tiling->tile0.offset = (VkOffset2D) {
138 .x = tiling->render_area.offset.x & ~(tile_align_w - 1),
139 .y = tiling->render_area.offset.y & ~(tile_align_h - 1),
140 };
141
142 const uint32_t ra_width =
143 tiling->render_area.extent.width +
144 (tiling->render_area.offset.x - tiling->tile0.offset.x);
145 const uint32_t ra_height =
146 tiling->render_area.extent.height +
147 (tiling->render_area.offset.y - tiling->tile0.offset.y);
148
149 /* start from 1 tile */
150 tiling->tile_count = (VkExtent2D) {
151 .width = 1,
152 .height = 1,
153 };
154 tiling->tile0.extent = (VkExtent2D) {
155 .width = align(ra_width, tile_align_w),
156 .height = align(ra_height, tile_align_h),
157 };
158
159 /* do not exceed max tile width */
160 while (tiling->tile0.extent.width > max_tile_width) {
161 tiling->tile_count.width++;
162 tiling->tile0.extent.width =
163 align(ra_width / tiling->tile_count.width, tile_align_w);
164 }
165
166 /* do not exceed gmem size */
167 while (tu_tiling_config_update_gmem_layout(tiling, dev) != VK_SUCCESS) {
168 if (tiling->tile0.extent.width > tiling->tile0.extent.height) {
169 tiling->tile_count.width++;
170 tiling->tile0.extent.width =
171 align(ra_width / tiling->tile_count.width, tile_align_w);
172 } else {
173 tiling->tile_count.height++;
174 tiling->tile0.extent.height =
175 align(ra_height / tiling->tile_count.height, tile_align_h);
176 }
177 }
178 }
179
180 static void
181 tu_tiling_config_update_pipe_layout(struct tu_tiling_config *tiling,
182 const struct tu_device *dev)
183 {
184 const uint32_t max_pipe_count = 32; /* A6xx */
185
186 /* start from 1 tile per pipe */
187 tiling->pipe0 = (VkExtent2D) {
188 .width = 1,
189 .height = 1,
190 };
191 tiling->pipe_count = tiling->tile_count;
192
193 /* do not exceed max pipe count vertically */
194 while (tiling->pipe_count.height > max_pipe_count) {
195 tiling->pipe0.height += 2;
196 tiling->pipe_count.height =
197 (tiling->tile_count.height + tiling->pipe0.height - 1) /
198 tiling->pipe0.height;
199 }
200
201 /* do not exceed max pipe count */
202 while (tiling->pipe_count.width * tiling->pipe_count.height >
203 max_pipe_count) {
204 tiling->pipe0.width += 1;
205 tiling->pipe_count.width =
206 (tiling->tile_count.width + tiling->pipe0.width - 1) /
207 tiling->pipe0.width;
208 }
209 }
210
211 static void
212 tu_tiling_config_update_pipes(struct tu_tiling_config *tiling,
213 const struct tu_device *dev)
214 {
215 const uint32_t max_pipe_count = 32; /* A6xx */
216 const uint32_t used_pipe_count =
217 tiling->pipe_count.width * tiling->pipe_count.height;
218 const VkExtent2D last_pipe = {
219 .width = tiling->tile_count.width % tiling->pipe0.width,
220 .height = tiling->tile_count.height % tiling->pipe0.height,
221 };
222
223 assert(used_pipe_count <= max_pipe_count);
224 assert(max_pipe_count <= ARRAY_SIZE(tiling->pipe_config));
225
226 for (uint32_t y = 0; y < tiling->pipe_count.height; y++) {
227 for (uint32_t x = 0; x < tiling->pipe_count.width; x++) {
228 const uint32_t pipe_x = tiling->pipe0.width * x;
229 const uint32_t pipe_y = tiling->pipe0.height * y;
230 const uint32_t pipe_w = (x == tiling->pipe_count.width - 1)
231 ? last_pipe.width
232 : tiling->pipe0.width;
233 const uint32_t pipe_h = (y == tiling->pipe_count.height - 1)
234 ? last_pipe.height
235 : tiling->pipe0.height;
236 const uint32_t n = tiling->pipe_count.width * y + x;
237
238 tiling->pipe_config[n] = A6XX_VSC_PIPE_CONFIG_REG_X(pipe_x) |
239 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe_y) |
240 A6XX_VSC_PIPE_CONFIG_REG_W(pipe_w) |
241 A6XX_VSC_PIPE_CONFIG_REG_H(pipe_h);
242 tiling->pipe_sizes[n] = CP_SET_BIN_DATA5_0_VSC_SIZE(pipe_w * pipe_h);
243 }
244 }
245
246 memset(tiling->pipe_config + used_pipe_count, 0,
247 sizeof(uint32_t) * (max_pipe_count - used_pipe_count));
248 }
249
250 static void
251 tu_tiling_config_update(struct tu_tiling_config *tiling,
252 const struct tu_device *dev,
253 const uint32_t *buffer_cpp,
254 uint32_t buffer_count,
255 const VkRect2D *render_area)
256 {
257 /* see if there is any real change */
258 const bool ra_changed =
259 render_area &&
260 memcmp(&tiling->render_area, render_area, sizeof(*render_area));
261 const bool buf_changed = tiling->buffer_count != buffer_count ||
262 memcmp(tiling->buffer_cpp, buffer_cpp,
263 sizeof(*buffer_cpp) * buffer_count);
264 if (!ra_changed && !buf_changed)
265 return;
266
267 if (ra_changed)
268 tiling->render_area = *render_area;
269
270 if (buf_changed) {
271 memcpy(tiling->buffer_cpp, buffer_cpp,
272 sizeof(*buffer_cpp) * buffer_count);
273 tiling->buffer_count = buffer_count;
274 }
275
276 tu_tiling_config_update_tile_layout(tiling, dev);
277 tu_tiling_config_update_pipe_layout(tiling, dev);
278 tu_tiling_config_update_pipes(tiling, dev);
279 }
280
281 static void
282 tu_tiling_config_get_tile(const struct tu_tiling_config *tiling,
283 const struct tu_device *dev,
284 uint32_t tx,
285 uint32_t ty,
286 struct tu_tile *tile)
287 {
288 /* find the pipe and the slot for tile (tx, ty) */
289 const uint32_t px = tx / tiling->pipe0.width;
290 const uint32_t py = ty / tiling->pipe0.height;
291 const uint32_t sx = tx - tiling->pipe0.width * px;
292 const uint32_t sy = ty - tiling->pipe0.height * py;
293
294 assert(tx < tiling->tile_count.width && ty < tiling->tile_count.height);
295 assert(px < tiling->pipe_count.width && py < tiling->pipe_count.height);
296 assert(sx < tiling->pipe0.width && sy < tiling->pipe0.height);
297
298 /* convert to 1D indices */
299 tile->pipe = tiling->pipe_count.width * py + px;
300 tile->slot = tiling->pipe0.width * sy + sx;
301
302 /* get the blit area for the tile */
303 tile->begin = (VkOffset2D) {
304 .x = tiling->tile0.offset.x + tiling->tile0.extent.width * tx,
305 .y = tiling->tile0.offset.y + tiling->tile0.extent.height * ty,
306 };
307 tile->end.x =
308 (tx == tiling->tile_count.width - 1)
309 ? tiling->render_area.offset.x + tiling->render_area.extent.width
310 : tile->begin.x + tiling->tile0.extent.width;
311 tile->end.y =
312 (ty == tiling->tile_count.height - 1)
313 ? tiling->render_area.offset.y + tiling->render_area.extent.height
314 : tile->begin.y + tiling->tile0.extent.height;
315 }
316
317 static void
318 tu6_emit_marker(struct tu_cmd_buffer *cmd)
319 {
320 tu_cs_emit_write_reg(cmd->cur_cs, cmd->marker_reg, ++cmd->marker_seqno);
321 }
322
323 static void
324 tu6_emit_event_write(struct tu_cmd_buffer *cmd,
325 enum vgt_event_type event,
326 bool need_seqno)
327 {
328 struct tu_cs *cs = cmd->cur_cs;
329
330 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, need_seqno ? 4 : 1);
331 tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(event));
332 if (need_seqno) {
333 tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
334 tu_cs_emit(cs, ++cmd->scratch_seqno);
335 }
336 }
337
338 static void
339 tu6_emit_cache_flush(struct tu_cmd_buffer *cmd)
340 {
341 tu6_emit_event_write(cmd, 0x31, false);
342 }
343
344 static void
345 tu6_init_hw(struct tu_cmd_buffer *cmd)
346 {
347 struct tu_cs *cs = cmd->cur_cs;
348
349 VkResult result = tu_cs_reserve_space(cmd->device, cs, 256);
350 if (result != VK_SUCCESS) {
351 cmd->record_result = result;
352 return;
353 }
354
355 tu6_emit_cache_flush(cmd);
356
357 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UPDATE_CNTL, 0xfffff);
358
359 tu_cs_emit_write_reg(cs, REG_A6XX_RB_CCU_CNTL, 0x7c400004);
360 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E04, 0x00100000);
361 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE04, 0x8);
362 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE00, 0);
363 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE0F, 0x3f);
364 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B605, 0x44);
365 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B600, 0x100000);
366 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE00, 0x80);
367 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE01, 0);
368
369 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9600, 0);
370 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8600, 0x880);
371 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE04, 0);
372 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE03, 0x00000410);
373 tu_cs_emit_write_reg(cs, REG_A6XX_SP_IBO_COUNT, 0);
374 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B182, 0);
375 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BB11, 0);
376 tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_UNKNOWN_0E12, 0x3200000);
377 tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_CLIENT_PF, 4);
378 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E01, 0x0);
379 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AB00, 0x5);
380 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_UNKNOWN_A009, 0x00000001);
381 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8811, 0x00000010);
382 tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x1f);
383
384 tu_cs_emit_write_reg(cs, REG_A6XX_RB_SRGB_CNTL, 0);
385
386 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8101, 0);
387 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8109, 0);
388 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8110, 0);
389
390 tu_cs_emit_write_reg(cs, REG_A6XX_RB_RENDER_CONTROL0, 0x401);
391 tu_cs_emit_write_reg(cs, REG_A6XX_RB_RENDER_CONTROL1, 0);
392 tu_cs_emit_write_reg(cs, REG_A6XX_RB_FS_OUTPUT_CNTL0, 0);
393 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8810, 0);
394 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8818, 0);
395 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8819, 0);
396 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881A, 0);
397 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881B, 0);
398 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881C, 0);
399 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881D, 0);
400 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881E, 0);
401 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_88F0, 0);
402
403 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9101, 0xffff00);
404 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9107, 0);
405
406 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9236, 1);
407 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9300, 0);
408
409 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_SO_OVERRIDE,
410 A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
411
412 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9801, 0);
413 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9806, 0);
414 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9980, 0);
415
416 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9B06, 0);
417 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9B06, 0);
418
419 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A81B, 0);
420
421 tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B183, 0);
422
423 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8099, 0);
424 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_809B, 0);
425 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A0, 2);
426 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80AF, 0);
427 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9210, 0);
428 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9211, 0);
429 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9602, 0);
430 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9981, 0x3);
431 tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9E72, 0);
432 tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9108, 0x3);
433 tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B304, 0);
434 tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B309, 0x000000a2);
435 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8804, 0);
436 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A4, 0);
437 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A5, 0);
438 tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A6, 0);
439 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8805, 0);
440 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8806, 0);
441 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8878, 0);
442 tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8879, 0);
443 tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_CONTROL_5_REG, 0xfc);
444
445 tu6_emit_marker(cmd);
446
447 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_MODE_CNTL, 0x00000000);
448
449 tu_cs_emit_write_reg(cs, REG_A6XX_VFD_UNKNOWN_A008, 0);
450
451 tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x0000001f);
452
453 /* we don't use this yet.. probably best to disable.. */
454 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
455 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
456 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
457 CP_SET_DRAW_STATE__0_GROUP_ID(0));
458 tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
459 tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
460
461 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUFFER_BASE_LO(0), 3);
462 tu_cs_emit(cs, 0x00000000); /* VPC_SO_BUFFER_BASE_LO_0 */
463 tu_cs_emit(cs, 0x00000000); /* VPC_SO_BUFFER_BASE_HI_0 */
464 tu_cs_emit(cs, 0x00000000); /* VPC_SO_BUFFER_SIZE_0 */
465
466 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_FLUSH_BASE_LO(0), 2);
467 tu_cs_emit(cs, 0x00000000); /* VPC_SO_FLUSH_BASE_LO_0 */
468 tu_cs_emit(cs, 0x00000000); /* VPC_SO_FLUSH_BASE_HI_0 */
469
470 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUF_CNTL, 1);
471 tu_cs_emit(cs, 0x00000000); /* VPC_SO_BUF_CNTL */
472
473 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUFFER_OFFSET(0), 1);
474 tu_cs_emit(cs, 0x00000000); /* UNKNOWN_E2AB */
475
476 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUFFER_BASE_LO(1), 3);
477 tu_cs_emit(cs, 0x00000000);
478 tu_cs_emit(cs, 0x00000000);
479 tu_cs_emit(cs, 0x00000000);
480
481 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUFFER_OFFSET(1), 6);
482 tu_cs_emit(cs, 0x00000000);
483 tu_cs_emit(cs, 0x00000000);
484 tu_cs_emit(cs, 0x00000000);
485 tu_cs_emit(cs, 0x00000000);
486 tu_cs_emit(cs, 0x00000000);
487 tu_cs_emit(cs, 0x00000000);
488
489 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUFFER_OFFSET(2), 6);
490 tu_cs_emit(cs, 0x00000000);
491 tu_cs_emit(cs, 0x00000000);
492 tu_cs_emit(cs, 0x00000000);
493 tu_cs_emit(cs, 0x00000000);
494 tu_cs_emit(cs, 0x00000000);
495 tu_cs_emit(cs, 0x00000000);
496
497 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUFFER_OFFSET(3), 3);
498 tu_cs_emit(cs, 0x00000000);
499 tu_cs_emit(cs, 0x00000000);
500 tu_cs_emit(cs, 0x00000000);
501
502 tu_cs_emit_pkt4(cs, REG_A6XX_SP_HS_CTRL_REG0, 1);
503 tu_cs_emit(cs, 0x00000000);
504
505 tu_cs_emit_pkt4(cs, REG_A6XX_SP_GS_CTRL_REG0, 1);
506 tu_cs_emit(cs, 0x00000000);
507
508 tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_LRZ_CNTL, 1);
509 tu_cs_emit(cs, 0x00000000);
510
511 tu_cs_emit_pkt4(cs, REG_A6XX_RB_LRZ_CNTL, 1);
512 tu_cs_emit(cs, 0x00000000);
513
514 tu_cs_reserve_space_assert(cs);
515 }
516
517 static void
518 tu_cmd_render_tiles(struct tu_cmd_buffer *cmd)
519 {
520 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
521
522 for (uint32_t y = 0; y < tiling->tile_count.height; y++) {
523 for (uint32_t x = 0; x < tiling->tile_count.width; x++) {
524 struct tu_tile tile;
525 tu_tiling_config_get_tile(tiling, cmd->device, x, y, &tile);
526 /* TODO */
527 }
528 }
529 }
530
531 static void
532 tu_cmd_update_tiling_config(struct tu_cmd_buffer *cmd,
533 const VkRect2D *render_area)
534 {
535 const struct tu_device *dev = cmd->device;
536 const struct tu_render_pass *pass = cmd->state.pass;
537 const struct tu_subpass *subpass = cmd->state.subpass;
538 struct tu_tiling_config *tiling = &cmd->state.tiling_config;
539
540 uint32_t buffer_cpp[MAX_RTS + 2];
541 uint32_t buffer_count = 0;
542
543 for (uint32_t i = 0; i < subpass->color_count; ++i) {
544 const uint32_t a = subpass->color_attachments[i].attachment;
545 if (a == VK_ATTACHMENT_UNUSED)
546 continue;
547
548 const struct tu_render_pass_attachment *att = &pass->attachments[a];
549 buffer_cpp[buffer_count++] =
550 vk_format_get_blocksize(att->format) * att->samples;
551 }
552
553 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
554 const uint32_t a = subpass->depth_stencil_attachment.attachment;
555 const struct tu_render_pass_attachment *att = &pass->attachments[a];
556
557 /* TODO */
558 assert(att->format != VK_FORMAT_D32_SFLOAT_S8_UINT);
559
560 buffer_cpp[buffer_count++] =
561 vk_format_get_blocksize(att->format) * att->samples;
562 }
563
564 tu_tiling_config_update(tiling, dev, buffer_cpp, buffer_count,
565 render_area);
566 }
567
568 const struct tu_dynamic_state default_dynamic_state = {
569 .viewport =
570 {
571 .count = 0,
572 },
573 .scissor =
574 {
575 .count = 0,
576 },
577 .line_width = 1.0f,
578 .depth_bias =
579 {
580 .bias = 0.0f,
581 .clamp = 0.0f,
582 .slope = 0.0f,
583 },
584 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
585 .depth_bounds =
586 {
587 .min = 0.0f,
588 .max = 1.0f,
589 },
590 .stencil_compare_mask =
591 {
592 .front = ~0u,
593 .back = ~0u,
594 },
595 .stencil_write_mask =
596 {
597 .front = ~0u,
598 .back = ~0u,
599 },
600 .stencil_reference =
601 {
602 .front = 0u,
603 .back = 0u,
604 },
605 };
606
607 static void UNUSED /* FINISHME */
608 tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer,
609 const struct tu_dynamic_state *src)
610 {
611 struct tu_dynamic_state *dest = &cmd_buffer->state.dynamic;
612 uint32_t copy_mask = src->mask;
613 uint32_t dest_mask = 0;
614
615 tu_use_args(cmd_buffer); /* FINISHME */
616
617 /* Make sure to copy the number of viewports/scissors because they can
618 * only be specified at pipeline creation time.
619 */
620 dest->viewport.count = src->viewport.count;
621 dest->scissor.count = src->scissor.count;
622 dest->discard_rectangle.count = src->discard_rectangle.count;
623
624 if (copy_mask & TU_DYNAMIC_VIEWPORT) {
625 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
626 src->viewport.count * sizeof(VkViewport))) {
627 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
628 src->viewport.count);
629 dest_mask |= TU_DYNAMIC_VIEWPORT;
630 }
631 }
632
633 if (copy_mask & TU_DYNAMIC_SCISSOR) {
634 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
635 src->scissor.count * sizeof(VkRect2D))) {
636 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
637 src->scissor.count);
638 dest_mask |= TU_DYNAMIC_SCISSOR;
639 }
640 }
641
642 if (copy_mask & TU_DYNAMIC_LINE_WIDTH) {
643 if (dest->line_width != src->line_width) {
644 dest->line_width = src->line_width;
645 dest_mask |= TU_DYNAMIC_LINE_WIDTH;
646 }
647 }
648
649 if (copy_mask & TU_DYNAMIC_DEPTH_BIAS) {
650 if (memcmp(&dest->depth_bias, &src->depth_bias,
651 sizeof(src->depth_bias))) {
652 dest->depth_bias = src->depth_bias;
653 dest_mask |= TU_DYNAMIC_DEPTH_BIAS;
654 }
655 }
656
657 if (copy_mask & TU_DYNAMIC_BLEND_CONSTANTS) {
658 if (memcmp(&dest->blend_constants, &src->blend_constants,
659 sizeof(src->blend_constants))) {
660 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
661 dest_mask |= TU_DYNAMIC_BLEND_CONSTANTS;
662 }
663 }
664
665 if (copy_mask & TU_DYNAMIC_DEPTH_BOUNDS) {
666 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
667 sizeof(src->depth_bounds))) {
668 dest->depth_bounds = src->depth_bounds;
669 dest_mask |= TU_DYNAMIC_DEPTH_BOUNDS;
670 }
671 }
672
673 if (copy_mask & TU_DYNAMIC_STENCIL_COMPARE_MASK) {
674 if (memcmp(&dest->stencil_compare_mask, &src->stencil_compare_mask,
675 sizeof(src->stencil_compare_mask))) {
676 dest->stencil_compare_mask = src->stencil_compare_mask;
677 dest_mask |= TU_DYNAMIC_STENCIL_COMPARE_MASK;
678 }
679 }
680
681 if (copy_mask & TU_DYNAMIC_STENCIL_WRITE_MASK) {
682 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
683 sizeof(src->stencil_write_mask))) {
684 dest->stencil_write_mask = src->stencil_write_mask;
685 dest_mask |= TU_DYNAMIC_STENCIL_WRITE_MASK;
686 }
687 }
688
689 if (copy_mask & TU_DYNAMIC_STENCIL_REFERENCE) {
690 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
691 sizeof(src->stencil_reference))) {
692 dest->stencil_reference = src->stencil_reference;
693 dest_mask |= TU_DYNAMIC_STENCIL_REFERENCE;
694 }
695 }
696
697 if (copy_mask & TU_DYNAMIC_DISCARD_RECTANGLE) {
698 if (memcmp(&dest->discard_rectangle.rectangles,
699 &src->discard_rectangle.rectangles,
700 src->discard_rectangle.count * sizeof(VkRect2D))) {
701 typed_memcpy(dest->discard_rectangle.rectangles,
702 src->discard_rectangle.rectangles,
703 src->discard_rectangle.count);
704 dest_mask |= TU_DYNAMIC_DISCARD_RECTANGLE;
705 }
706 }
707 }
708
709 static VkResult
710 tu_create_cmd_buffer(struct tu_device *device,
711 struct tu_cmd_pool *pool,
712 VkCommandBufferLevel level,
713 VkCommandBuffer *pCommandBuffer)
714 {
715 struct tu_cmd_buffer *cmd_buffer;
716 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
717 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
718 if (cmd_buffer == NULL)
719 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
720
721 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
722 cmd_buffer->device = device;
723 cmd_buffer->pool = pool;
724 cmd_buffer->level = level;
725
726 if (pool) {
727 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
728 cmd_buffer->queue_family_index = pool->queue_family_index;
729
730 } else {
731 /* Init the pool_link so we can safely call list_del when we destroy
732 * the command buffer
733 */
734 list_inithead(&cmd_buffer->pool_link);
735 cmd_buffer->queue_family_index = TU_QUEUE_GENERAL;
736 }
737
738 tu_bo_list_init(&cmd_buffer->bo_list);
739 tu_cs_init(&cmd_buffer->cs, 4096);
740
741 *pCommandBuffer = tu_cmd_buffer_to_handle(cmd_buffer);
742
743 list_inithead(&cmd_buffer->upload.list);
744
745 cmd_buffer->marker_reg = REG_A6XX_CP_SCRATCH_REG(
746 cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY ? 7 : 6);
747
748 VkResult result = tu_bo_init_new(device, &cmd_buffer->scratch_bo, 0x1000);
749 if (result != VK_SUCCESS)
750 return result;
751
752 return VK_SUCCESS;
753 }
754
755 static void
756 tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
757 {
758 tu_bo_finish(cmd_buffer->device, &cmd_buffer->scratch_bo);
759
760 list_del(&cmd_buffer->pool_link);
761
762 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
763 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
764
765 tu_cs_finish(cmd_buffer->device, &cmd_buffer->cs);
766 tu_bo_list_destroy(&cmd_buffer->bo_list);
767 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
768 }
769
770 static VkResult
771 tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
772 {
773 cmd_buffer->record_result = VK_SUCCESS;
774
775 tu_bo_list_reset(&cmd_buffer->bo_list);
776 tu_cs_reset(cmd_buffer->device, &cmd_buffer->cs);
777
778 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
779 cmd_buffer->descriptors[i].dirty = 0;
780 cmd_buffer->descriptors[i].valid = 0;
781 cmd_buffer->descriptors[i].push_dirty = false;
782 }
783
784 cmd_buffer->status = TU_CMD_BUFFER_STATUS_INITIAL;
785
786 return cmd_buffer->record_result;
787 }
788
789 static VkResult
790 tu_cmd_state_setup_attachments(struct tu_cmd_buffer *cmd_buffer,
791 struct tu_render_pass *pass,
792 const VkRenderPassBeginInfo *info)
793 {
794 struct tu_cmd_state *state = &cmd_buffer->state;
795
796 if (pass->attachment_count == 0) {
797 state->attachments = NULL;
798 return VK_SUCCESS;
799 }
800
801 state->attachments =
802 vk_alloc(&cmd_buffer->pool->alloc,
803 pass->attachment_count * sizeof(state->attachments[0]), 8,
804 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
805 if (state->attachments == NULL) {
806 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
807 return cmd_buffer->record_result;
808 }
809
810 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
811 const struct tu_render_pass_attachment *att = &pass->attachments[i];
812 VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
813 VkImageAspectFlags clear_aspects = 0;
814
815 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
816 /* color attachment */
817 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
818 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
819 }
820 } else {
821 /* depthstencil attachment */
822 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
823 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
824 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
825 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
826 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE)
827 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
828 }
829 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
830 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
831 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
832 }
833 }
834
835 state->attachments[i].pending_clear_aspects = clear_aspects;
836 state->attachments[i].cleared_views = 0;
837 if (clear_aspects && info) {
838 assert(info->clearValueCount > i);
839 state->attachments[i].clear_value = info->pClearValues[i];
840 }
841
842 state->attachments[i].current_layout = att->initial_layout;
843 }
844
845 return VK_SUCCESS;
846 }
847
848 VkResult
849 tu_AllocateCommandBuffers(VkDevice _device,
850 const VkCommandBufferAllocateInfo *pAllocateInfo,
851 VkCommandBuffer *pCommandBuffers)
852 {
853 TU_FROM_HANDLE(tu_device, device, _device);
854 TU_FROM_HANDLE(tu_cmd_pool, pool, pAllocateInfo->commandPool);
855
856 VkResult result = VK_SUCCESS;
857 uint32_t i;
858
859 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
860
861 if (!list_empty(&pool->free_cmd_buffers)) {
862 struct tu_cmd_buffer *cmd_buffer = list_first_entry(
863 &pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link);
864
865 list_del(&cmd_buffer->pool_link);
866 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
867
868 result = tu_reset_cmd_buffer(cmd_buffer);
869 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
870 cmd_buffer->level = pAllocateInfo->level;
871
872 pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
873 } else {
874 result = tu_create_cmd_buffer(device, pool, pAllocateInfo->level,
875 &pCommandBuffers[i]);
876 }
877 if (result != VK_SUCCESS)
878 break;
879 }
880
881 if (result != VK_SUCCESS) {
882 tu_FreeCommandBuffers(_device, pAllocateInfo->commandPool, i,
883 pCommandBuffers);
884
885 /* From the Vulkan 1.0.66 spec:
886 *
887 * "vkAllocateCommandBuffers can be used to create multiple
888 * command buffers. If the creation of any of those command
889 * buffers fails, the implementation must destroy all
890 * successfully created command buffer objects from this
891 * command, set all entries of the pCommandBuffers array to
892 * NULL and return the error."
893 */
894 memset(pCommandBuffers, 0,
895 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
896 }
897
898 return result;
899 }
900
901 void
902 tu_FreeCommandBuffers(VkDevice device,
903 VkCommandPool commandPool,
904 uint32_t commandBufferCount,
905 const VkCommandBuffer *pCommandBuffers)
906 {
907 for (uint32_t i = 0; i < commandBufferCount; i++) {
908 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
909
910 if (cmd_buffer) {
911 if (cmd_buffer->pool) {
912 list_del(&cmd_buffer->pool_link);
913 list_addtail(&cmd_buffer->pool_link,
914 &cmd_buffer->pool->free_cmd_buffers);
915 } else
916 tu_cmd_buffer_destroy(cmd_buffer);
917 }
918 }
919 }
920
921 VkResult
922 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer,
923 VkCommandBufferResetFlags flags)
924 {
925 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
926 return tu_reset_cmd_buffer(cmd_buffer);
927 }
928
929 VkResult
930 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
931 const VkCommandBufferBeginInfo *pBeginInfo)
932 {
933 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
934 VkResult result = VK_SUCCESS;
935
936 if (cmd_buffer->status != TU_CMD_BUFFER_STATUS_INITIAL) {
937 /* If the command buffer has already been resetted with
938 * vkResetCommandBuffer, no need to do it again.
939 */
940 result = tu_reset_cmd_buffer(cmd_buffer);
941 if (result != VK_SUCCESS)
942 return result;
943 }
944
945 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
946 cmd_buffer->usage_flags = pBeginInfo->flags;
947
948 result = tu_cs_begin(cmd_buffer->device, &cmd_buffer->cs, 4096);
949 if (result != VK_SUCCESS)
950 return result;
951
952 cmd_buffer->marker_seqno = 0;
953 cmd_buffer->scratch_seqno = 0;
954
955 cmd_buffer->cur_cs = &cmd_buffer->cs;
956
957 /* setup initial configuration into command buffer */
958 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
959 switch (cmd_buffer->queue_family_index) {
960 case TU_QUEUE_GENERAL:
961 tu6_init_hw(cmd_buffer);
962 break;
963 default:
964 break;
965 }
966 }
967
968 cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
969
970 return VK_SUCCESS;
971 }
972
973 void
974 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
975 uint32_t firstBinding,
976 uint32_t bindingCount,
977 const VkBuffer *pBuffers,
978 const VkDeviceSize *pOffsets)
979 {
980 }
981
982 void
983 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,
984 VkBuffer buffer,
985 VkDeviceSize offset,
986 VkIndexType indexType)
987 {
988 }
989
990 void
991 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
992 VkPipelineBindPoint pipelineBindPoint,
993 VkPipelineLayout _layout,
994 uint32_t firstSet,
995 uint32_t descriptorSetCount,
996 const VkDescriptorSet *pDescriptorSets,
997 uint32_t dynamicOffsetCount,
998 const uint32_t *pDynamicOffsets)
999 {
1000 }
1001
1002 void
1003 tu_CmdPushConstants(VkCommandBuffer commandBuffer,
1004 VkPipelineLayout layout,
1005 VkShaderStageFlags stageFlags,
1006 uint32_t offset,
1007 uint32_t size,
1008 const void *pValues)
1009 {
1010 }
1011
1012 VkResult
1013 tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
1014 {
1015 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1016
1017 if (cmd_buffer->scratch_seqno) {
1018 tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->scratch_bo,
1019 MSM_SUBMIT_BO_WRITE);
1020 }
1021
1022 VkResult result = tu_cs_end(&cmd_buffer->cs);
1023 if (result != VK_SUCCESS)
1024 cmd_buffer->record_result = result;
1025
1026 assert(!cmd_buffer->state.attachments);
1027
1028 cmd_buffer->status = TU_CMD_BUFFER_STATUS_EXECUTABLE;
1029
1030 return cmd_buffer->record_result;
1031 }
1032
1033 void
1034 tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
1035 VkPipelineBindPoint pipelineBindPoint,
1036 VkPipeline _pipeline)
1037 {
1038 }
1039
1040 void
1041 tu_CmdSetViewport(VkCommandBuffer commandBuffer,
1042 uint32_t firstViewport,
1043 uint32_t viewportCount,
1044 const VkViewport *pViewports)
1045 {
1046 }
1047
1048 void
1049 tu_CmdSetScissor(VkCommandBuffer commandBuffer,
1050 uint32_t firstScissor,
1051 uint32_t scissorCount,
1052 const VkRect2D *pScissors)
1053 {
1054 }
1055
1056 void
1057 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
1058 {
1059 }
1060
1061 void
1062 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer,
1063 float depthBiasConstantFactor,
1064 float depthBiasClamp,
1065 float depthBiasSlopeFactor)
1066 {
1067 }
1068
1069 void
1070 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
1071 const float blendConstants[4])
1072 {
1073 }
1074
1075 void
1076 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
1077 float minDepthBounds,
1078 float maxDepthBounds)
1079 {
1080 }
1081
1082 void
1083 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
1084 VkStencilFaceFlags faceMask,
1085 uint32_t compareMask)
1086 {
1087 }
1088
1089 void
1090 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
1091 VkStencilFaceFlags faceMask,
1092 uint32_t writeMask)
1093 {
1094 }
1095
1096 void
1097 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer,
1098 VkStencilFaceFlags faceMask,
1099 uint32_t reference)
1100 {
1101 }
1102
1103 void
1104 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
1105 uint32_t commandBufferCount,
1106 const VkCommandBuffer *pCmdBuffers)
1107 {
1108 }
1109
1110 VkResult
1111 tu_CreateCommandPool(VkDevice _device,
1112 const VkCommandPoolCreateInfo *pCreateInfo,
1113 const VkAllocationCallbacks *pAllocator,
1114 VkCommandPool *pCmdPool)
1115 {
1116 TU_FROM_HANDLE(tu_device, device, _device);
1117 struct tu_cmd_pool *pool;
1118
1119 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
1120 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1121 if (pool == NULL)
1122 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1123
1124 if (pAllocator)
1125 pool->alloc = *pAllocator;
1126 else
1127 pool->alloc = device->alloc;
1128
1129 list_inithead(&pool->cmd_buffers);
1130 list_inithead(&pool->free_cmd_buffers);
1131
1132 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
1133
1134 *pCmdPool = tu_cmd_pool_to_handle(pool);
1135
1136 return VK_SUCCESS;
1137 }
1138
1139 void
1140 tu_DestroyCommandPool(VkDevice _device,
1141 VkCommandPool commandPool,
1142 const VkAllocationCallbacks *pAllocator)
1143 {
1144 TU_FROM_HANDLE(tu_device, device, _device);
1145 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
1146
1147 if (!pool)
1148 return;
1149
1150 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
1151 &pool->cmd_buffers, pool_link)
1152 {
1153 tu_cmd_buffer_destroy(cmd_buffer);
1154 }
1155
1156 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
1157 &pool->free_cmd_buffers, pool_link)
1158 {
1159 tu_cmd_buffer_destroy(cmd_buffer);
1160 }
1161
1162 vk_free2(&device->alloc, pAllocator, pool);
1163 }
1164
1165 VkResult
1166 tu_ResetCommandPool(VkDevice device,
1167 VkCommandPool commandPool,
1168 VkCommandPoolResetFlags flags)
1169 {
1170 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
1171 VkResult result;
1172
1173 list_for_each_entry(struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers,
1174 pool_link)
1175 {
1176 result = tu_reset_cmd_buffer(cmd_buffer);
1177 if (result != VK_SUCCESS)
1178 return result;
1179 }
1180
1181 return VK_SUCCESS;
1182 }
1183
1184 void
1185 tu_TrimCommandPool(VkDevice device,
1186 VkCommandPool commandPool,
1187 VkCommandPoolTrimFlagsKHR flags)
1188 {
1189 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
1190
1191 if (!pool)
1192 return;
1193
1194 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
1195 &pool->free_cmd_buffers, pool_link)
1196 {
1197 tu_cmd_buffer_destroy(cmd_buffer);
1198 }
1199 }
1200
1201 void
1202 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
1203 const VkRenderPassBeginInfo *pRenderPassBegin,
1204 VkSubpassContents contents)
1205 {
1206 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1207 TU_FROM_HANDLE(tu_render_pass, pass, pRenderPassBegin->renderPass);
1208 TU_FROM_HANDLE(tu_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
1209 VkResult result;
1210
1211 cmd_buffer->state.pass = pass;
1212 cmd_buffer->state.subpass = pass->subpasses;
1213 cmd_buffer->state.framebuffer = framebuffer;
1214
1215 result =
1216 tu_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin);
1217 if (result != VK_SUCCESS)
1218 return;
1219
1220 tu_cmd_update_tiling_config(cmd_buffer, &pRenderPassBegin->renderArea);
1221 }
1222
1223 void
1224 tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
1225 const VkRenderPassBeginInfo *pRenderPassBeginInfo,
1226 const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
1227 {
1228 tu_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
1229 pSubpassBeginInfo->contents);
1230 }
1231
1232 void
1233 tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
1234 {
1235 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1236
1237 tu_cmd_render_tiles(cmd);
1238
1239 cmd->state.subpass++;
1240 tu_cmd_update_tiling_config(cmd, NULL);
1241 }
1242
1243 void
1244 tu_CmdNextSubpass2KHR(VkCommandBuffer commandBuffer,
1245 const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
1246 const VkSubpassEndInfoKHR *pSubpassEndInfo)
1247 {
1248 tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
1249 }
1250
1251 struct tu_draw_info
1252 {
1253 /**
1254 * Number of vertices.
1255 */
1256 uint32_t count;
1257
1258 /**
1259 * Index of the first vertex.
1260 */
1261 int32_t vertex_offset;
1262
1263 /**
1264 * First instance id.
1265 */
1266 uint32_t first_instance;
1267
1268 /**
1269 * Number of instances.
1270 */
1271 uint32_t instance_count;
1272
1273 /**
1274 * First index (indexed draws only).
1275 */
1276 uint32_t first_index;
1277
1278 /**
1279 * Whether it's an indexed draw.
1280 */
1281 bool indexed;
1282
1283 /**
1284 * Indirect draw parameters resource.
1285 */
1286 struct tu_buffer *indirect;
1287 uint64_t indirect_offset;
1288 uint32_t stride;
1289
1290 /**
1291 * Draw count parameters resource.
1292 */
1293 struct tu_buffer *count_buffer;
1294 uint64_t count_buffer_offset;
1295 };
1296
1297 static void
1298 tu_draw(struct tu_cmd_buffer *cmd_buffer, const struct tu_draw_info *info)
1299 {
1300 }
1301
1302 void
1303 tu_CmdDraw(VkCommandBuffer commandBuffer,
1304 uint32_t vertexCount,
1305 uint32_t instanceCount,
1306 uint32_t firstVertex,
1307 uint32_t firstInstance)
1308 {
1309 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1310 struct tu_draw_info info = {};
1311
1312 info.count = vertexCount;
1313 info.instance_count = instanceCount;
1314 info.first_instance = firstInstance;
1315 info.vertex_offset = firstVertex;
1316
1317 tu_draw(cmd_buffer, &info);
1318 }
1319
1320 void
1321 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,
1322 uint32_t indexCount,
1323 uint32_t instanceCount,
1324 uint32_t firstIndex,
1325 int32_t vertexOffset,
1326 uint32_t firstInstance)
1327 {
1328 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1329 struct tu_draw_info info = {};
1330
1331 info.indexed = true;
1332 info.count = indexCount;
1333 info.instance_count = instanceCount;
1334 info.first_index = firstIndex;
1335 info.vertex_offset = vertexOffset;
1336 info.first_instance = firstInstance;
1337
1338 tu_draw(cmd_buffer, &info);
1339 }
1340
1341 void
1342 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
1343 VkBuffer _buffer,
1344 VkDeviceSize offset,
1345 uint32_t drawCount,
1346 uint32_t stride)
1347 {
1348 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1349 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1350 struct tu_draw_info info = {};
1351
1352 info.count = drawCount;
1353 info.indirect = buffer;
1354 info.indirect_offset = offset;
1355 info.stride = stride;
1356
1357 tu_draw(cmd_buffer, &info);
1358 }
1359
1360 void
1361 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
1362 VkBuffer _buffer,
1363 VkDeviceSize offset,
1364 uint32_t drawCount,
1365 uint32_t stride)
1366 {
1367 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1368 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1369 struct tu_draw_info info = {};
1370
1371 info.indexed = true;
1372 info.count = drawCount;
1373 info.indirect = buffer;
1374 info.indirect_offset = offset;
1375 info.stride = stride;
1376
1377 tu_draw(cmd_buffer, &info);
1378 }
1379
1380 struct tu_dispatch_info
1381 {
1382 /**
1383 * Determine the layout of the grid (in block units) to be used.
1384 */
1385 uint32_t blocks[3];
1386
1387 /**
1388 * A starting offset for the grid. If unaligned is set, the offset
1389 * must still be aligned.
1390 */
1391 uint32_t offsets[3];
1392 /**
1393 * Whether it's an unaligned compute dispatch.
1394 */
1395 bool unaligned;
1396
1397 /**
1398 * Indirect compute parameters resource.
1399 */
1400 struct tu_buffer *indirect;
1401 uint64_t indirect_offset;
1402 };
1403
1404 static void
1405 tu_dispatch(struct tu_cmd_buffer *cmd_buffer,
1406 const struct tu_dispatch_info *info)
1407 {
1408 }
1409
1410 void
1411 tu_CmdDispatchBase(VkCommandBuffer commandBuffer,
1412 uint32_t base_x,
1413 uint32_t base_y,
1414 uint32_t base_z,
1415 uint32_t x,
1416 uint32_t y,
1417 uint32_t z)
1418 {
1419 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1420 struct tu_dispatch_info info = {};
1421
1422 info.blocks[0] = x;
1423 info.blocks[1] = y;
1424 info.blocks[2] = z;
1425
1426 info.offsets[0] = base_x;
1427 info.offsets[1] = base_y;
1428 info.offsets[2] = base_z;
1429 tu_dispatch(cmd_buffer, &info);
1430 }
1431
1432 void
1433 tu_CmdDispatch(VkCommandBuffer commandBuffer,
1434 uint32_t x,
1435 uint32_t y,
1436 uint32_t z)
1437 {
1438 tu_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
1439 }
1440
1441 void
1442 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
1443 VkBuffer _buffer,
1444 VkDeviceSize offset)
1445 {
1446 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1447 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1448 struct tu_dispatch_info info = {};
1449
1450 info.indirect = buffer;
1451 info.indirect_offset = offset;
1452
1453 tu_dispatch(cmd_buffer, &info);
1454 }
1455
1456 void
1457 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer)
1458 {
1459 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1460
1461 tu_cmd_render_tiles(cmd_buffer);
1462
1463 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
1464 cmd_buffer->state.attachments = NULL;
1465
1466 cmd_buffer->state.pass = NULL;
1467 cmd_buffer->state.subpass = NULL;
1468 cmd_buffer->state.framebuffer = NULL;
1469 }
1470
1471 void
1472 tu_CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
1473 const VkSubpassEndInfoKHR *pSubpassEndInfo)
1474 {
1475 tu_CmdEndRenderPass(commandBuffer);
1476 }
1477
1478 struct tu_barrier_info
1479 {
1480 uint32_t eventCount;
1481 const VkEvent *pEvents;
1482 VkPipelineStageFlags srcStageMask;
1483 };
1484
1485 static void
1486 tu_barrier(struct tu_cmd_buffer *cmd_buffer,
1487 uint32_t memoryBarrierCount,
1488 const VkMemoryBarrier *pMemoryBarriers,
1489 uint32_t bufferMemoryBarrierCount,
1490 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1491 uint32_t imageMemoryBarrierCount,
1492 const VkImageMemoryBarrier *pImageMemoryBarriers,
1493 const struct tu_barrier_info *info)
1494 {
1495 }
1496
1497 void
1498 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
1499 VkPipelineStageFlags srcStageMask,
1500 VkPipelineStageFlags destStageMask,
1501 VkBool32 byRegion,
1502 uint32_t memoryBarrierCount,
1503 const VkMemoryBarrier *pMemoryBarriers,
1504 uint32_t bufferMemoryBarrierCount,
1505 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1506 uint32_t imageMemoryBarrierCount,
1507 const VkImageMemoryBarrier *pImageMemoryBarriers)
1508 {
1509 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1510 struct tu_barrier_info info;
1511
1512 info.eventCount = 0;
1513 info.pEvents = NULL;
1514 info.srcStageMask = srcStageMask;
1515
1516 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
1517 bufferMemoryBarrierCount, pBufferMemoryBarriers,
1518 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
1519 }
1520
1521 static void
1522 write_event(struct tu_cmd_buffer *cmd_buffer,
1523 struct tu_event *event,
1524 VkPipelineStageFlags stageMask,
1525 unsigned value)
1526 {
1527 }
1528
1529 void
1530 tu_CmdSetEvent(VkCommandBuffer commandBuffer,
1531 VkEvent _event,
1532 VkPipelineStageFlags stageMask)
1533 {
1534 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1535 TU_FROM_HANDLE(tu_event, event, _event);
1536
1537 write_event(cmd_buffer, event, stageMask, 1);
1538 }
1539
1540 void
1541 tu_CmdResetEvent(VkCommandBuffer commandBuffer,
1542 VkEvent _event,
1543 VkPipelineStageFlags stageMask)
1544 {
1545 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1546 TU_FROM_HANDLE(tu_event, event, _event);
1547
1548 write_event(cmd_buffer, event, stageMask, 0);
1549 }
1550
1551 void
1552 tu_CmdWaitEvents(VkCommandBuffer commandBuffer,
1553 uint32_t eventCount,
1554 const VkEvent *pEvents,
1555 VkPipelineStageFlags srcStageMask,
1556 VkPipelineStageFlags dstStageMask,
1557 uint32_t memoryBarrierCount,
1558 const VkMemoryBarrier *pMemoryBarriers,
1559 uint32_t bufferMemoryBarrierCount,
1560 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1561 uint32_t imageMemoryBarrierCount,
1562 const VkImageMemoryBarrier *pImageMemoryBarriers)
1563 {
1564 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1565 struct tu_barrier_info info;
1566
1567 info.eventCount = eventCount;
1568 info.pEvents = pEvents;
1569 info.srcStageMask = 0;
1570
1571 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
1572 bufferMemoryBarrierCount, pBufferMemoryBarriers,
1573 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
1574 }
1575
1576 void
1577 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
1578 {
1579 /* No-op */
1580 }