switch (pipelineBindPoint) {
case VK_PIPELINE_BIND_POINT_COMPUTE:
- cmd_buffer->state.compute_pipeline = pipeline;
+ cmd_buffer->state.compute.base.pipeline = pipeline;
cmd_buffer->state.compute_dirty |= ANV_CMD_DIRTY_PIPELINE;
cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
break;
case VK_PIPELINE_BIND_POINT_GRAPHICS:
- cmd_buffer->state.pipeline = pipeline;
+ cmd_buffer->state.gfx.base.pipeline = pipeline;
cmd_buffer->state.vb_dirty |= pipeline->vb_used;
cmd_buffer->state.dirty |= ANV_CMD_DIRTY_PIPELINE;
cmd_buffer->state.push_constants_dirty |= pipeline->active_stages;
anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
gl_shader_stage stage)
{
+ struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
+
/* If we don't have this stage, bail. */
- if (!anv_pipeline_has_stage(cmd_buffer->state.pipeline, stage))
+ if (!anv_pipeline_has_stage(pipeline, stage))
return (struct anv_state) { .offset = 0 };
struct anv_push_constants *data =
cmd_buffer->state.push_constants[stage];
const struct brw_stage_prog_data *prog_data =
- cmd_buffer->state.pipeline->shaders[stage]->prog_data;
+ pipeline->shaders[stage]->prog_data;
/* If we don't actually have any push constants, bail. */
if (data == NULL || prog_data == NULL || prog_data->nr_params == 0)
{
struct anv_push_constants *data =
cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
- struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline;
const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
bool clear_color_is_zero;
};
+/** State tracking for particular pipeline bind point
+ *
+ * This struct is the base struct for anv_cmd_graphics_state and
+ * anv_cmd_compute_state. These are used to track state which is bound to a
+ * particular type of pipeline. Generic state that applies per-stage such as
+ * binding table offsets and push constants is tracked generically with a
+ * per-stage array in anv_cmd_state.
+ */
+struct anv_cmd_pipeline_state {
+ struct anv_pipeline *pipeline;
+};
+
+/** State tracking for graphics pipeline
+ *
+ * This has anv_cmd_pipeline_state as a base struct to track things which get
+ * bound to a graphics pipeline. Along with general pipeline bind point state
+ * which is in the anv_cmd_pipeline_state base struct, it also contains other
+ * state which is graphics-specific.
+ */
+struct anv_cmd_graphics_state {
+ struct anv_cmd_pipeline_state base;
+};
+
+/** State tracking for compute pipeline
+ *
+ * This has anv_cmd_pipeline_state as a base struct to track things which get
+ * bound to a compute pipeline. Along with general pipeline bind point state
+ * which is in the anv_cmd_pipeline_state base struct, it also contains other
+ * state which is compute-specific.
+ */
+struct anv_cmd_compute_state {
+ struct anv_cmd_pipeline_state base;
+};
+
/** State required while building cmd buffer */
struct anv_cmd_state {
/* PIPELINE_SELECT.PipelineSelection */
uint32_t current_pipeline;
const struct gen_l3_config * current_l3_config;
+
+ struct anv_cmd_graphics_state gfx;
+ struct anv_cmd_compute_state compute;
+
uint32_t vb_dirty;
anv_cmd_dirty_mask_t dirty;
anv_cmd_dirty_mask_t compute_dirty;
struct anv_bo *num_workgroups_bo;
VkShaderStageFlags descriptors_dirty;
VkShaderStageFlags push_constants_dirty;
- struct anv_pipeline * pipeline;
- struct anv_pipeline * compute_pipeline;
+
struct anv_framebuffer * framebuffer;
struct anv_render_pass * pass;
struct anv_subpass * subpass;
static void
cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer)
{
- VkShaderStageFlags stages = cmd_buffer->state.pipeline->active_stages;
+ VkShaderStageFlags stages =
+ cmd_buffer->state.gfx.base.pipeline->active_stages;
/* In order to avoid thrash, we assume that vertex and fragment stages
* always exist. In the rare case where one is missing *and* the other
struct anv_state *bt_state)
{
struct anv_subpass *subpass = cmd_buffer->state.subpass;
+ struct anv_cmd_pipeline_state *pipe_state;
struct anv_pipeline *pipeline;
uint32_t bias, state_offset;
switch (stage) {
case MESA_SHADER_COMPUTE:
- pipeline = cmd_buffer->state.compute_pipeline;
+ pipe_state = &cmd_buffer->state.compute.base;
bias = 1;
break;
default:
- pipeline = cmd_buffer->state.pipeline;
+ pipe_state = &cmd_buffer->state.gfx.base;
bias = 0;
break;
}
+ pipeline = pipe_state->pipeline;
if (!anv_pipeline_has_stage(pipeline, stage)) {
*bt_state = (struct anv_state) { 0, };
gl_shader_stage stage,
struct anv_state *state)
{
- struct anv_pipeline *pipeline;
-
- if (stage == MESA_SHADER_COMPUTE)
- pipeline = cmd_buffer->state.compute_pipeline;
- else
- pipeline = cmd_buffer->state.pipeline;
+ struct anv_cmd_pipeline_state *pipe_state =
+ stage == MESA_SHADER_COMPUTE ? &cmd_buffer->state.compute.base :
+ &cmd_buffer->state.gfx.base;
+ struct anv_pipeline *pipeline = pipe_state->pipeline;
if (!anv_pipeline_has_stage(pipeline, stage)) {
*state = (struct anv_state) { 0, };
static uint32_t
flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer)
{
+ struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
+
VkShaderStageFlags dirty = cmd_buffer->state.descriptors_dirty &
- cmd_buffer->state.pipeline->active_stages;
+ pipeline->active_stages;
VkResult result = VK_SUCCESS;
anv_foreach_stage(s, dirty) {
genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
/* Re-emit all active binding tables */
- dirty |= cmd_buffer->state.pipeline->active_stages;
+ dirty |= pipeline->active_stages;
anv_foreach_stage(s, dirty) {
result = emit_samplers(cmd_buffer, s, &cmd_buffer->state.samplers[s]);
if (result != VK_SUCCESS) {
cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer,
VkShaderStageFlags dirty_stages)
{
- const struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ const struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
static const uint32_t push_constant_opcodes[] = {
[MESA_SHADER_VERTEX] = 21,
void
genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
{
- struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
uint32_t *p;
uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
uint32_t firstInstance)
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
if (anv_batch_has_error(&cmd_buffer->batch))
uint32_t firstInstance)
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
if (anv_batch_has_error(&cmd_buffer->batch))
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
- struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
if (anv_batch_has_error(&cmd_buffer->batch))
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
- struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
if (anv_batch_has_error(&cmd_buffer->batch))
static VkResult
flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
{
- struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline;
struct anv_state surfaces = { 0, }, samplers = { 0, };
VkResult result;
void
genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
{
- struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline;
MAYBE_UNUSED VkResult result;
assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
uint32_t z)
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline;
const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
if (anv_batch_has_error(&cmd_buffer->batch))
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
- struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline;
const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
struct anv_bo *bo = buffer->bo;
uint32_t bo_offset = buffer->offset + offset;