* that means the packets we're emitting also happen during binning. So
* we need to guard the write on !BINNING at CP execution time.
*/
+ tu_cs_reserve(cs, 3 + 4);
tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
CP_COND_REG_EXEC_0_GMEM | CP_COND_REG_EXEC_0_SYSMEM);
A6XX_CP_REG_TEST_0_BIT(0) |
A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
+ tu_cs_reserve(cs, 3 + 11);
tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(11));
static void
tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
- VkResult result = tu_cs_reserve_space(cs, 256);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
tu6_emit_cache_flush(cmd, cs);
tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UPDATE_CNTL, 0xfffff);
A6XX_CP_REG_TEST_0_BIT(0) |
A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
+ tu_cs_reserve(cs, 3 + 7);
tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(7));
/* TODO: We shouldn't need this flush, but without it we'd have an empty IB
* when nothing clears which we currently can't handle.
*/
- tu_cs_reserve_space(&sub_cs, 5);
tu6_emit_event_write(cmd, &sub_cs, PC_CCU_FLUSH_COLOR_TS, true);
cmd->state.sysmem_clear_ib = tu_cs_end_sub_stream(&cmd->sub_cs, &sub_cs);
tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
const struct VkRect2D *renderArea)
{
- VkResult result = tu_cs_reserve_space(cs, 1024);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
const struct tu_framebuffer *fb = cmd->state.framebuffer;
if (fb->width > 0 && fb->height > 0) {
tu6_emit_window_scissor(cmd, cs,
}
}
- const uint32_t space = 14 + tu_cs_get_call_size(&cmd->draw_epilogue_cs);
- VkResult result = tu_cs_reserve_space(cs, space);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
tu_cs_emit_call(cs, &cmd->draw_epilogue_cs);
tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
{
struct tu_physical_device *phys_dev = cmd->device->physical_device;
- VkResult result = tu_cs_reserve_space(cs, 1024);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
tu6_emit_lrz_flush(cmd, cs);
/* lrz clear? */
struct tu_cs *cs,
const struct tu_tile *tile)
{
- const uint32_t render_tile_space = 256 + tu_cs_get_call_size(&cmd->draw_cs);
- VkResult result = tu_cs_reserve_space(cs, render_tile_space);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
tu6_emit_tile_select(cmd, cs, tile);
tu_cs_emit_ib(cs, &cmd->state.tile_load_ib);
A6XX_CP_REG_TEST_0_BIT(0) |
A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
+ tu_cs_reserve(cs, 3 + 2);
tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
- tu_cs_emit(cs, 0x10000000);
- tu_cs_emit(cs, 2); /* conditionally execute next 2 dwords */
+ tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
+ tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(2));
/* if (no overflow) */ {
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
static void
tu6_tile_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
- const uint32_t space = 16 + tu_cs_get_call_size(&cmd->draw_epilogue_cs);
- VkResult result = tu_cs_reserve_space(cs, space);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
tu_cs_emit_call(cs, &cmd->draw_epilogue_cs);
tu_cs_emit_regs(cs,
tu6_sysmem_render_begin(cmd, &cmd->cs, &tiling->render_area);
- const uint32_t space = tu_cs_get_call_size(&cmd->draw_cs);
- VkResult result = tu_cs_reserve_space(&cmd->cs, space);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
tu_cs_emit_call(&cmd->cs, &cmd->draw_cs);
cmd->wait_for_idle = true;
/* initialize/update the restart index */
if (!cmd->state.index_buffer || cmd->state.index_type != indexType) {
struct tu_cs *draw_cs = &cmd->draw_cs;
- VkResult result = tu_cs_reserve_space(draw_cs, 2);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
tu6_emit_restart_index(
draw_cs, indexType == VK_INDEX_TYPE_UINT32 ? 0xffffffff : 0xffff);
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *draw_cs = &cmd->draw_cs;
- VkResult result = tu_cs_reserve_space(draw_cs, 12);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
assert(firstViewport == 0 && viewportCount == 1);
tu6_emit_viewport(draw_cs, pViewports);
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *draw_cs = &cmd->draw_cs;
- VkResult result = tu_cs_reserve_space(draw_cs, 3);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
assert(firstScissor == 0 && scissorCount == 1);
tu6_emit_scissor(draw_cs, pScissors);
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *draw_cs = &cmd->draw_cs;
- VkResult result = tu_cs_reserve_space(draw_cs, 4);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
tu6_emit_depth_bias(draw_cs, depthBiasConstantFactor, depthBiasClamp,
depthBiasSlopeFactor);
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *draw_cs = &cmd->draw_cs;
- VkResult result = tu_cs_reserve_space(draw_cs, 5);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
tu6_emit_blend_constants(draw_cs, blendConstants);
tu_cs_sanity_check(draw_cs);
tu_cmd_prepare_tile_load_ib(cmd, pRenderPassBegin);
tu_cmd_prepare_tile_store_ib(cmd);
- VkResult result = tu_cs_reserve_space(&cmd->draw_cs, 1024);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
tu6_emit_zs(cmd, cmd->state.subpass, &cmd->draw_cs);
tu6_emit_mrt(cmd, cmd->state.subpass, &cmd->draw_cs);
tu6_emit_msaa(cmd, cmd->state.subpass, &cmd->draw_cs);
}
}
- VkResult result = tu_cs_reserve_space(&cmd->draw_cs, 1024);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
/* invalidate because reading input attachments will cache GMEM and
* the cache isn''t updated when GMEM is written
* TODO: is there a no-cache bit for textures?
const struct tu_dynamic_state *dynamic = &cmd->state.dynamic;
struct tu_draw_state_group draw_state_groups[TU_DRAW_STATE_COUNT];
uint32_t draw_state_group_count = 0;
+ VkResult result;
struct tu_descriptor_state *descriptors_state =
&cmd->descriptors[VK_PIPELINE_BIND_POINT_GRAPHICS];
- VkResult result = tu_cs_reserve_space(cs, 256);
- if (result != VK_SUCCESS)
- return result;
-
/* TODO lrz */
tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9806, 0);
return;
}
- result = tu_cs_reserve_space(cs, 32);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
if (draw->indirect) {
tu_finishme("indirect draw");
return;
struct tu_pipeline *pipeline = cmd->state.compute_pipeline;
struct tu_descriptor_state *descriptors_state =
&cmd->descriptors[VK_PIPELINE_BIND_POINT_COMPUTE];
-
- VkResult result = tu_cs_reserve_space(cs, 256);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
+ VkResult result;
if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_PIPELINE)
tu_cs_emit_ib(cs, &pipeline->program.state_ib);
{
struct tu_cs *cs = &cmd->cs;
- VkResult result = tu_cs_reserve_space(cs, 4);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_WRITE);
/* TODO: any flush required before/after ? */
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = &cmd->cs;
- VkResult result = tu_cs_reserve_space(cs, eventCount * 7);
- if (result != VK_SUCCESS) {
- cmd->record_result = result;
- return;
- }
-
/* TODO: any flush required before/after? (CP_WAIT_FOR_ME?) */
for (uint32_t i = 0; i < eventCount; i++) {
sizeof(uint64_t) : sizeof(uint32_t);
uint64_t write_iova = base_write_iova + (offset * element_size);
- tu_cs_reserve_space(cs, 6);
tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 5);
uint32_t mem_to_mem_flags = flags & VK_QUERY_RESULT_64_BIT ?
CP_MEM_TO_MEM_0_DOUBLE : 0;
* To ensure that previous writes to the available bit are coherent, first
* wait for all writes to complete.
*/
- tu_cs_reserve_space(cs, 1);
tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
for (uint32_t i = 0; i < queryCount; i++) {
/* Wait for the available bit to be set if executed with the
* VK_QUERY_RESULT_WAIT_BIT flag. */
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
- tu_cs_reserve_space(cs, 7);
tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
CP_WAIT_REG_MEM_0_POLL_MEMORY);
* tests that ADDR0 != 0 and ADDR1 < REF. The packet here simply tests
* that 0 < available < 2, aka available == 1.
*/
- tu_cs_reserve_space(cs, 7 + 6);
+ tu_cs_reserve(cs, 7 + 6);
tu_cs_emit_pkt7(cs, CP_COND_EXEC, 6);
tu_cs_emit_qw(cs, available_iova);
tu_cs_emit_qw(cs, available_iova);
uint32_t query = firstQuery + i;
uint64_t available_iova = occlusion_query_iova(pool, query, available);
uint64_t result_iova = occlusion_query_iova(pool, query, result);
- tu_cs_reserve_space(cs, 11);
tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
tu_cs_emit_qw(cs, available_iova);
tu_cs_emit_qw(cs, 0x0);
uint64_t begin_iova = occlusion_query_iova(pool, query, begin);
- tu_cs_reserve_space(cs, 7);
tu_cs_emit_regs(cs,
A6XX_RB_SAMPLE_COUNT_CONTROL(.copy = true));
uint64_t begin_iova = occlusion_query_iova(pool, query, begin);
uint64_t end_iova = occlusion_query_iova(pool, query, end);
uint64_t result_iova = occlusion_query_iova(pool, query, result);
- tu_cs_reserve_space(cs, 31);
tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
tu_cs_emit_qw(cs, end_iova);
tu_cs_emit_qw(cs, 0xffffffffffffffffull);
*/
cs = &cmdbuf->draw_epilogue_cs;
- tu_cs_reserve_space(cs, 5);
tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
tu_cs_emit_qw(cs, available_iova);
tu_cs_emit_qw(cs, 0x1);