{
struct tu_physical_device *phys_dev = cmdbuf->device->physical_device;
- tu_cs_reserve_space(cmdbuf->device, cs, 66);
+ tu_cs_reserve_space(cs, 66);
enum a6xx_format fmt = tu6_get_native_format(blt->dst.fmt)->rb;
if (fmt == FMT6_Z24_UNORM_S8_UINT)
assert(blt->dst.samples == 1);
}
- tu_cs_reserve_space(cmdbuf->device, cs, 18);
+ tu_cs_reserve_space(cs, 18);
tu6_emit_event_write(cmdbuf, cs, LRZ_FLUSH, false);
tu6_emit_event_write(cmdbuf, cs, PC_CCU_FLUSH_COLOR_TS, true);
blt->src.ubwc_va += blt->src.ubwc_size;
}
- tu_cs_reserve_space(cmdbuf->device, cs, 17);
+ tu_cs_reserve_space(cs, 17);
tu6_emit_event_write(cmdbuf, cs, PC_CCU_FLUSH_COLOR_TS, true);
tu6_emit_event_write(cmdbuf, cs, PC_CCU_FLUSH_DEPTH_TS, true);
static void
tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
- VkResult result = tu_cs_reserve_space(cmd->device, cs, 256);
+ VkResult result = tu_cs_reserve_space(cs, 256);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
const struct tu_framebuffer *fb = cmd->state.framebuffer;
const uint32_t blit_cmd_space = 25 + 66 * fb->layers + 17;
const uint32_t clear_space =
- blit_cmd_space * cmd->state.pass->attachment_count + 5;
+ blit_cmd_space * cmd->state.pass->attachment_count + 5;
struct tu_cs sub_cs;
- VkResult result = tu_cs_begin_sub_stream(cmd->device, &cmd->sub_cs,
- clear_space, &sub_cs);
+ VkResult result =
+ tu_cs_begin_sub_stream(&cmd->sub_cs, clear_space, &sub_cs);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
/* TODO: We shouldn't need this flush, but without it we'd have an empty IB
* when nothing clears which we currently can't handle.
*/
- tu_cs_reserve_space(cmd->device, &sub_cs, 5);
+ tu_cs_reserve_space(&sub_cs, 5);
tu6_emit_event_write(cmd, &sub_cs, PC_CCU_FLUSH_COLOR_TS, true);
cmd->state.sysmem_clear_ib = tu_cs_end_sub_stream(&cmd->sub_cs, &sub_cs);
tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
const struct VkRect2D *renderArea)
{
- VkResult result = tu_cs_reserve_space(cmd->device, cs, 1024);
+ VkResult result = tu_cs_reserve_space(cs, 1024);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
}
const uint32_t space = 14 + tu_cs_get_call_size(&cmd->draw_epilogue_cs);
- VkResult result = tu_cs_reserve_space(cmd->device, cs, space);
+ VkResult result = tu_cs_reserve_space(cs, space);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
{
struct tu_physical_device *phys_dev = cmd->device->physical_device;
- VkResult result = tu_cs_reserve_space(cmd->device, cs, 1024);
+ VkResult result = tu_cs_reserve_space(cs, 1024);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
const struct tu_tile *tile)
{
const uint32_t render_tile_space = 256 + tu_cs_get_call_size(&cmd->draw_cs);
- VkResult result = tu_cs_reserve_space(cmd->device, cs, render_tile_space);
+ VkResult result = tu_cs_reserve_space(cs, render_tile_space);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
tu6_tile_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
const uint32_t space = 16 + tu_cs_get_call_size(&cmd->draw_epilogue_cs);
- VkResult result = tu_cs_reserve_space(cmd->device, cs, space);
+ VkResult result = tu_cs_reserve_space(cs, space);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
tu6_sysmem_render_begin(cmd, &cmd->cs, &tiling->render_area);
const uint32_t space = tu_cs_get_call_size(&cmd->draw_cs);
- VkResult result = tu_cs_reserve_space(cmd->device, &cmd->cs, space);
+ VkResult result = tu_cs_reserve_space(&cmd->cs, space);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
struct tu_cs sub_cs;
- VkResult result = tu_cs_begin_sub_stream(cmd->device, &cmd->sub_cs,
- tile_load_space, &sub_cs);
+ VkResult result =
+ tu_cs_begin_sub_stream(&cmd->sub_cs, tile_load_space, &sub_cs);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
const uint32_t tile_store_space = 32 + 23 * cmd->state.pass->attachment_count;
struct tu_cs sub_cs;
- VkResult result = tu_cs_begin_sub_stream(cmd->device, &cmd->sub_cs,
- tile_store_space, &sub_cs);
+ VkResult result =
+ tu_cs_begin_sub_stream(&cmd->sub_cs, tile_store_space, &sub_cs);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
}
tu_bo_list_init(&cmd_buffer->bo_list);
- tu_cs_init(&cmd_buffer->cs, TU_CS_MODE_GROW, 4096);
- tu_cs_init(&cmd_buffer->draw_cs, TU_CS_MODE_GROW, 4096);
- tu_cs_init(&cmd_buffer->draw_epilogue_cs, TU_CS_MODE_GROW, 4096);
- tu_cs_init(&cmd_buffer->sub_cs, TU_CS_MODE_SUB_STREAM, 2048);
+ tu_cs_init(&cmd_buffer->cs, device, TU_CS_MODE_GROW, 4096);
+ tu_cs_init(&cmd_buffer->draw_cs, device, TU_CS_MODE_GROW, 4096);
+ tu_cs_init(&cmd_buffer->draw_epilogue_cs, device, TU_CS_MODE_GROW, 4096);
+ tu_cs_init(&cmd_buffer->sub_cs, device, TU_CS_MODE_SUB_STREAM, 2048);
*pCommandBuffer = tu_cmd_buffer_to_handle(cmd_buffer);
for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
- tu_cs_finish(cmd_buffer->device, &cmd_buffer->cs);
- tu_cs_finish(cmd_buffer->device, &cmd_buffer->draw_cs);
- tu_cs_finish(cmd_buffer->device, &cmd_buffer->draw_epilogue_cs);
- tu_cs_finish(cmd_buffer->device, &cmd_buffer->sub_cs);
+ tu_cs_finish(&cmd_buffer->cs);
+ tu_cs_finish(&cmd_buffer->draw_cs);
+ tu_cs_finish(&cmd_buffer->draw_epilogue_cs);
+ tu_cs_finish(&cmd_buffer->sub_cs);
tu_bo_list_destroy(&cmd_buffer->bo_list);
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
cmd_buffer->record_result = VK_SUCCESS;
tu_bo_list_reset(&cmd_buffer->bo_list);
- tu_cs_reset(cmd_buffer->device, &cmd_buffer->cs);
- tu_cs_reset(cmd_buffer->device, &cmd_buffer->draw_cs);
- tu_cs_reset(cmd_buffer->device, &cmd_buffer->draw_epilogue_cs);
- tu_cs_reset(cmd_buffer->device, &cmd_buffer->sub_cs);
+ tu_cs_reset(&cmd_buffer->cs);
+ tu_cs_reset(&cmd_buffer->draw_cs);
+ tu_cs_reset(&cmd_buffer->draw_epilogue_cs);
+ tu_cs_reset(&cmd_buffer->sub_cs);
for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
cmd_buffer->descriptors[i].valid = 0;
/* initialize/update the restart index */
if (!cmd->state.index_buffer || cmd->state.index_type != indexType) {
struct tu_cs *draw_cs = &cmd->draw_cs;
- VkResult result = tu_cs_reserve_space(cmd->device, draw_cs, 2);
+ VkResult result = tu_cs_reserve_space(draw_cs, 2);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *draw_cs = &cmd->draw_cs;
- VkResult result = tu_cs_reserve_space(cmd->device, draw_cs, 12);
+ VkResult result = tu_cs_reserve_space(draw_cs, 12);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *draw_cs = &cmd->draw_cs;
- VkResult result = tu_cs_reserve_space(cmd->device, draw_cs, 3);
+ VkResult result = tu_cs_reserve_space(draw_cs, 3);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *draw_cs = &cmd->draw_cs;
- VkResult result = tu_cs_reserve_space(cmd->device, draw_cs, 4);
+ VkResult result = tu_cs_reserve_space(draw_cs, 4);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *draw_cs = &cmd->draw_cs;
- VkResult result = tu_cs_reserve_space(cmd->device, draw_cs, 5);
+ VkResult result = tu_cs_reserve_space(draw_cs, 5);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
tu_cmd_prepare_tile_load_ib(cmd, pRenderPassBegin);
tu_cmd_prepare_tile_store_ib(cmd);
- VkResult result = tu_cs_reserve_space(cmd->device, &cmd->draw_cs, 1024);
+ VkResult result = tu_cs_reserve_space(&cmd->draw_cs, 1024);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
}
}
- VkResult result = tu_cs_reserve_space(cmd->device, &cmd->draw_cs, 1024);
+ VkResult result = tu_cs_reserve_space(&cmd->draw_cs, 1024);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
gl_shader_stage type)
{
struct tu_cs cs;
- tu_cs_begin_sub_stream(cmd->device, &cmd->sub_cs, 512, &cs); /* TODO: maximum size? */
+ tu_cs_begin_sub_stream(&cmd->sub_cs, 512, &cs); /* TODO: maximum size? */
tu6_emit_user_consts(&cs, pipeline, descriptors_state, type, cmd->push_constants);
tu6_emit_ubos(&cs, pipeline, descriptors_state, type);
return VK_SUCCESS;
}
- VkResult result = tu_cs_begin_sub_stream(cmd->device, &cmd->sub_cs, 8, &cs);
+ VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 8, &cs);
if (result != VK_SUCCESS)
return result;
bool *needs_border,
bool is_sysmem)
{
- struct tu_device *device = cmd->device;
struct tu_cs *draw_state = &cmd->sub_cs;
const struct tu_program_descriptor_linkage *link =
&pipeline->program.link[type];
/* allocate and fill texture state */
struct ts_cs_memory tex_const;
- result = tu_cs_alloc(device, draw_state, link->texture_map.num_desc,
+ result = tu_cs_alloc(draw_state, link->texture_map.num_desc,
A6XX_TEX_CONST_DWORDS, &tex_const);
if (result != VK_SUCCESS)
return result;
/* allocate and fill sampler state */
struct ts_cs_memory tex_samp = { 0 };
if (link->sampler_map.num_desc) {
- result = tu_cs_alloc(device, draw_state, link->sampler_map.num_desc,
+ result = tu_cs_alloc(draw_state, link->sampler_map.num_desc,
A6XX_TEX_SAMP_DWORDS, &tex_samp);
if (result != VK_SUCCESS)
return result;
}
struct tu_cs cs;
- result = tu_cs_begin_sub_stream(device, draw_state, 16, &cs);
+ result = tu_cs_begin_sub_stream(draw_state, 16, &cs);
if (result != VK_SUCCESS)
return result;
gl_shader_stage type,
struct tu_cs_entry *entry)
{
- struct tu_device *device = cmd->device;
struct tu_cs *draw_state = &cmd->sub_cs;
const struct tu_program_descriptor_linkage *link =
&pipeline->program.link[type];
}
struct ts_cs_memory ibo_const;
- result = tu_cs_alloc(device, draw_state, num_desc,
+ result = tu_cs_alloc(draw_state, num_desc,
A6XX_TEX_CONST_DWORDS, &ibo_const);
if (result != VK_SUCCESS)
return result;
assert(ssbo_index == num_desc);
struct tu_cs cs;
- result = tu_cs_begin_sub_stream(device, draw_state, 7, &cs);
+ result = tu_cs_begin_sub_stream(draw_state, 7, &cs);
if (result != VK_SUCCESS)
return result;
&pipeline->program.link[MESA_SHADER_FRAGMENT].sampler_map;
struct ts_cs_memory ptr;
- VkResult result = tu_cs_alloc(cmd->device, &cmd->sub_cs,
+ VkResult result = tu_cs_alloc(&cmd->sub_cs,
vs_sampler->num_desc + fs_sampler->num_desc,
128 / 4,
&ptr);
struct tu_descriptor_state *descriptors_state =
&cmd->descriptors[VK_PIPELINE_BIND_POINT_GRAPHICS];
- VkResult result = tu_cs_reserve_space(cmd->device, cs, 256);
+ VkResult result = tu_cs_reserve_space(cs, 256);
if (result != VK_SUCCESS)
return result;
return;
}
- result = tu_cs_reserve_space(cmd->device, cs, 32);
+ result = tu_cs_reserve_space(cs, 32);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
struct tu_descriptor_state *descriptors_state =
&cmd->descriptors[VK_PIPELINE_BIND_POINT_COMPUTE];
- VkResult result = tu_cs_reserve_space(cmd->device, cs, 256);
+ VkResult result = tu_cs_reserve_space(cs, 256);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
{
struct tu_cs *cs = &cmd->cs;
- VkResult result = tu_cs_reserve_space(cmd->device, cs, 4);
+ VkResult result = tu_cs_reserve_space(cs, 4);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
struct tu_cs *cs = &cmd->cs;
- VkResult result = tu_cs_reserve_space(cmd->device, cs, eventCount * 7);
+ VkResult result = tu_cs_reserve_space(cs, eventCount * 7);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
* Initialize a command stream.
*/
void
-tu_cs_init(struct tu_cs *cs, enum tu_cs_mode mode, uint32_t initial_size)
+tu_cs_init(struct tu_cs *cs,
+ struct tu_device *device,
+ enum tu_cs_mode mode,
+ uint32_t initial_size)
{
assert(mode != TU_CS_MODE_EXTERNAL);
memset(cs, 0, sizeof(*cs));
+ cs->device = device;
cs->mode = mode;
cs->next_bo_size = initial_size;
}
* Finish and release all resources owned by a command stream.
*/
void
-tu_cs_finish(struct tu_device *dev, struct tu_cs *cs)
+tu_cs_finish(struct tu_cs *cs)
{
for (uint32_t i = 0; i < cs->bo_count; ++i) {
- tu_bo_finish(dev, cs->bos[i]);
+ tu_bo_finish(cs->device, cs->bos[i]);
free(cs->bos[i]);
}
* be emitted to the new BO.
*/
static VkResult
-tu_cs_add_bo(struct tu_device *dev, struct tu_cs *cs, uint32_t size)
+tu_cs_add_bo(struct tu_cs *cs, uint32_t size)
{
/* no BO for TU_CS_MODE_EXTERNAL */
assert(cs->mode != TU_CS_MODE_EXTERNAL);
if (!new_bo)
return VK_ERROR_OUT_OF_HOST_MEMORY;
- VkResult result = tu_bo_init_new(dev, new_bo, size * sizeof(uint32_t));
+ VkResult result =
+ tu_bo_init_new(cs->device, new_bo, size * sizeof(uint32_t));
if (result != VK_SUCCESS) {
free(new_bo);
return result;
}
- result = tu_bo_map(dev, new_bo);
+ result = tu_bo_map(cs->device, new_bo);
if (result != VK_SUCCESS) {
- tu_bo_finish(dev, new_bo);
+ tu_bo_finish(cs->device, new_bo);
free(new_bo);
return result;
}
* emission.
*/
VkResult
-tu_cs_begin_sub_stream(struct tu_device *dev,
- struct tu_cs *cs,
- uint32_t size,
- struct tu_cs *sub_cs)
+tu_cs_begin_sub_stream(struct tu_cs *cs, uint32_t size, struct tu_cs *sub_cs)
{
assert(cs->mode == TU_CS_MODE_SUB_STREAM);
assert(size);
- VkResult result = tu_cs_reserve_space(dev, cs, size);
+ VkResult result = tu_cs_reserve_space(cs, size);
if (result != VK_SUCCESS)
return result;
tu_cs_init_external(sub_cs, cs->cur, cs->reserved_end);
tu_cs_begin(sub_cs);
- result = tu_cs_reserve_space(dev, sub_cs, size);
+ result = tu_cs_reserve_space(sub_cs, size);
assert(result == VK_SUCCESS);
return VK_SUCCESS;
*
*/
VkResult
-tu_cs_alloc(struct tu_device *dev,
- struct tu_cs *cs,
+tu_cs_alloc(struct tu_cs *cs,
uint32_t count,
uint32_t size,
struct ts_cs_memory *memory)
/* TODO: smarter way to deal with alignment? */
- VkResult result = tu_cs_reserve_space(dev, cs, count * size + (size-1));
+ VkResult result = tu_cs_reserve_space(cs, count * size + (size-1));
if (result != VK_SUCCESS)
return result;
* This never fails when \a cs has mode TU_CS_MODE_EXTERNAL.
*/
VkResult
-tu_cs_reserve_space(struct tu_device *dev,
- struct tu_cs *cs,
- uint32_t reserved_size)
+tu_cs_reserve_space(struct tu_cs *cs, uint32_t reserved_size)
{
if (tu_cs_get_space(cs) < reserved_size) {
if (cs->mode == TU_CS_MODE_EXTERNAL) {
/* switch to a new BO */
uint32_t new_size = MAX2(cs->next_bo_size, reserved_size);
- VkResult result = tu_cs_add_bo(dev, cs, new_size);
+ VkResult result = tu_cs_add_bo(cs, new_size);
if (result != VK_SUCCESS)
return result;
* packets in \a cs, but does not necessarily release all resources.
*/
void
-tu_cs_reset(struct tu_device *dev, struct tu_cs *cs)
+tu_cs_reset(struct tu_cs *cs)
{
if (cs->mode == TU_CS_MODE_EXTERNAL) {
assert(!cs->bo_count && !cs->entry_count);
}
for (uint32_t i = 0; i + 1 < cs->bo_count; ++i) {
- tu_bo_finish(dev, cs->bos[i]);
+ tu_bo_finish(cs->device, cs->bos[i]);
free(cs->bos[i]);
}
#include "registers/adreno_pm4.xml.h"
void
-tu_cs_init(struct tu_cs *cs, enum tu_cs_mode mode, uint32_t initial_size);
+tu_cs_init(struct tu_cs *cs,
+ struct tu_device *device,
+ enum tu_cs_mode mode,
+ uint32_t initial_size);
void
tu_cs_init_external(struct tu_cs *cs, uint32_t *start, uint32_t *end);
void
-tu_cs_finish(struct tu_device *dev, struct tu_cs *cs);
+tu_cs_finish(struct tu_cs *cs);
void
tu_cs_begin(struct tu_cs *cs);
tu_cs_end(struct tu_cs *cs);
VkResult
-tu_cs_begin_sub_stream(struct tu_device *dev,
- struct tu_cs *cs,
- uint32_t size,
- struct tu_cs *sub_cs);
+tu_cs_begin_sub_stream(struct tu_cs *cs, uint32_t size, struct tu_cs *sub_cs);
VkResult
-tu_cs_alloc(struct tu_device *dev,
- struct tu_cs *cs,
+tu_cs_alloc(struct tu_cs *cs,
uint32_t count,
uint32_t size,
struct ts_cs_memory *memory);
tu_cs_end_sub_stream(struct tu_cs *cs, struct tu_cs *sub_cs);
VkResult
-tu_cs_reserve_space(struct tu_device *dev,
- struct tu_cs *cs,
- uint32_t reserved_size);
+tu_cs_reserve_space(struct tu_cs *cs, uint32_t reserved_size);
void
-tu_cs_reset(struct tu_device *dev, struct tu_cs *cs);
+tu_cs_reset(struct tu_cs *cs);
VkResult
tu_cs_add_entries(struct tu_cs *cs, struct tu_cs *target);
/* Reserve enough space so that both the condition packet and the actual
* condition will fit in the same IB.
*/
- VkResult result = tu_cs_reserve_space(dev, cs, max_dwords + 3);
+ VkResult result = tu_cs_reserve_space(cs, max_dwords + 3);
if (result != VK_SUCCESS)
return result;
tu_bo_list_add(&cmd->bo_list, buffer->bo, MSM_SUBMIT_BO_WRITE);
struct ts_cs_memory tmp;
- VkResult result = tu_cs_alloc(cmd->device, &cmd->sub_cs, DIV_ROUND_UP(dataSize, 64), 64, &tmp);
+ VkResult result = tu_cs_alloc(&cmd->sub_cs, DIV_ROUND_UP(dataSize, 64), 64, &tmp);
if (result != VK_SUCCESS) {
cmd->record_result = result;
return;
if (!pipeline)
return VK_ERROR_OUT_OF_HOST_MEMORY;
- tu_cs_init(&pipeline->cs, TU_CS_MODE_SUB_STREAM, 2048);
+ tu_cs_init(&pipeline->cs, dev, TU_CS_MODE_SUB_STREAM, 2048);
/* reserve the space now such that tu_cs_begin_sub_stream never fails */
- VkResult result = tu_cs_reserve_space(dev, &pipeline->cs, 2048);
+ VkResult result = tu_cs_reserve_space(&pipeline->cs, 2048);
if (result != VK_SUCCESS) {
vk_free2(&dev->alloc, pAllocator, pipeline);
return result;
struct tu_pipeline *pipeline)
{
struct tu_cs prog_cs;
- tu_cs_begin_sub_stream(builder->device, &pipeline->cs, 512, &prog_cs);
+ tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);
tu6_emit_program(&prog_cs, builder, &pipeline->program.binary_bo, false);
pipeline->program.state_ib = tu_cs_end_sub_stream(&pipeline->cs, &prog_cs);
- tu_cs_begin_sub_stream(builder->device, &pipeline->cs, 512, &prog_cs);
+ tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);
tu6_emit_program(&prog_cs, builder, &pipeline->program.binary_bo, true);
pipeline->program.binning_state_ib =
tu_cs_end_sub_stream(&pipeline->cs, &prog_cs);
const struct tu_shader *vs = builder->shaders[MESA_SHADER_VERTEX];
struct tu_cs vi_cs;
- tu_cs_begin_sub_stream(builder->device, &pipeline->cs,
+ tu_cs_begin_sub_stream(&pipeline->cs,
MAX_VERTEX_ATTRIBS * 5 + 2, &vi_cs);
tu6_emit_vertex_input(&vi_cs, &vs->variants[0], vi_info,
pipeline->vi.bindings, pipeline->vi.strides,
pipeline->vi.state_ib = tu_cs_end_sub_stream(&pipeline->cs, &vi_cs);
if (vs->has_binning_pass) {
- tu_cs_begin_sub_stream(builder->device, &pipeline->cs,
+ tu_cs_begin_sub_stream(&pipeline->cs,
MAX_VERTEX_ATTRIBS * 5 + 2, &vi_cs);
tu6_emit_vertex_input(
&vi_cs, &vs->variants[1], vi_info, pipeline->vi.binning_bindings,
builder->create_info->pViewportState;
struct tu_cs vp_cs;
- tu_cs_begin_sub_stream(builder->device, &pipeline->cs, 15, &vp_cs);
+ tu_cs_begin_sub_stream(&pipeline->cs, 15, &vp_cs);
if (!(pipeline->dynamic_state.mask & TU_DYNAMIC_VIEWPORT)) {
assert(vp_info->viewportCount == 1);
assert(rast_info->polygonMode == VK_POLYGON_MODE_FILL);
struct tu_cs rast_cs;
- tu_cs_begin_sub_stream(builder->device, &pipeline->cs, 20, &rast_cs);
+ tu_cs_begin_sub_stream(&pipeline->cs, 20, &rast_cs);
/* move to hw ctx init? */
tu6_emit_gras_unknowns(&rast_cs);
: &dummy_ds_info;
struct tu_cs ds_cs;
- tu_cs_begin_sub_stream(builder->device, &pipeline->cs, 12, &ds_cs);
+ tu_cs_begin_sub_stream(&pipeline->cs, 12, &ds_cs);
/* move to hw ctx init? */
tu6_emit_alpha_control_disable(&ds_cs);
: &dummy_blend_info;
struct tu_cs blend_cs;
- tu_cs_begin_sub_stream(builder->device, &pipeline->cs, MAX_RTS * 3 + 9,
- &blend_cs);
+ tu_cs_begin_sub_stream(&pipeline->cs, MAX_RTS * 3 + 9, &blend_cs);
uint32_t blend_enable_mask;
tu6_emit_rb_mrt_controls(&blend_cs, blend_info,
struct tu_device *dev,
const VkAllocationCallbacks *alloc)
{
- tu_cs_finish(dev, &pipeline->cs);
+ tu_cs_finish(&pipeline->cs);
if (pipeline->program.binary_bo.gem_handle)
tu_bo_finish(dev, &pipeline->program.binary_bo);
pipeline->compute.local_size[i] = v->shader->nir->info.cs.local_size[i];
struct tu_cs prog_cs;
- tu_cs_begin_sub_stream(dev, &pipeline->cs, 512, &prog_cs);
+ tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);
tu6_emit_compute_program(&prog_cs, shader, &pipeline->program.binary_bo);
pipeline->program.state_ib = tu_cs_end_sub_stream(&pipeline->cs, &prog_cs);
uint32_t *reserved_end;
uint32_t *end;
+ struct tu_device *device;
enum tu_cs_mode mode;
uint32_t next_bo_size;
sizeof(uint64_t) : sizeof(uint32_t);
uint64_t write_iova = base_write_iova + (offset * element_size);
- tu_cs_reserve_space(cmdbuf->device, cs, 6);
+ tu_cs_reserve_space(cs, 6);
tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 5);
uint32_t mem_to_mem_flags = flags & VK_QUERY_RESULT_64_BIT ?
CP_MEM_TO_MEM_0_DOUBLE : 0;
* To ensure that previous writes to the available bit are coherent, first
* wait for all writes to complete.
*/
- tu_cs_reserve_space(cmdbuf->device, cs, 1);
+ tu_cs_reserve_space(cs, 1);
tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
for (uint32_t i = 0; i < queryCount; i++) {
/* Wait for the available bit to be set if executed with the
* VK_QUERY_RESULT_WAIT_BIT flag. */
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
- tu_cs_reserve_space(cmdbuf->device, cs, 7);
+ tu_cs_reserve_space(cs, 7);
tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
CP_WAIT_REG_MEM_0_POLL_MEMORY);
* tests that ADDR0 != 0 and ADDR1 < REF. The packet here simply tests
* that 0 < available < 2, aka available == 1.
*/
- tu_cs_reserve_space(cmdbuf->device, cs, 7 + 6);
+ tu_cs_reserve_space(cs, 7 + 6);
tu_cs_emit_pkt7(cs, CP_COND_EXEC, 6);
tu_cs_emit_qw(cs, available_iova);
tu_cs_emit_qw(cs, available_iova);
uint32_t query = firstQuery + i;
uint64_t available_iova = occlusion_query_iova(pool, query, available);
uint64_t result_iova = occlusion_query_iova(pool, query, result);
- tu_cs_reserve_space(cmdbuf->device, cs, 11);
+ tu_cs_reserve_space(cs, 11);
tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
tu_cs_emit_qw(cs, available_iova);
tu_cs_emit_qw(cs, 0x0);
uint64_t begin_iova = occlusion_query_iova(pool, query, begin);
- tu_cs_reserve_space(cmdbuf->device, cs, 7);
+ tu_cs_reserve_space(cs, 7);
tu_cs_emit_regs(cs,
A6XX_RB_SAMPLE_COUNT_CONTROL(.copy = true));
uint64_t begin_iova = occlusion_query_iova(pool, query, begin);
uint64_t end_iova = occlusion_query_iova(pool, query, end);
uint64_t result_iova = occlusion_query_iova(pool, query, result);
- tu_cs_reserve_space(cmdbuf->device, cs, 31);
+ tu_cs_reserve_space(cs, 31);
tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
tu_cs_emit_qw(cs, end_iova);
tu_cs_emit_qw(cs, 0xffffffffffffffffull);
*/
cs = &cmdbuf->draw_epilogue_cs;
- tu_cs_reserve_space(cmdbuf->device, cs, 5);
+ tu_cs_reserve_space(cs, 5);
tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
tu_cs_emit_qw(cs, available_iova);
tu_cs_emit_qw(cs, 0x1);