2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
36 cmd_buffer_flush_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
38 static const uint32_t push_constant_opcodes
[] = {
39 [MESA_SHADER_VERTEX
] = 21,
40 [MESA_SHADER_TESS_CTRL
] = 25, /* HS */
41 [MESA_SHADER_TESS_EVAL
] = 26, /* DS */
42 [MESA_SHADER_GEOMETRY
] = 22,
43 [MESA_SHADER_FRAGMENT
] = 23,
44 [MESA_SHADER_COMPUTE
] = 0,
47 VkShaderStageFlags flushed
= 0;
49 anv_foreach_stage(stage
, cmd_buffer
->state
.push_constants_dirty
) {
50 if (stage
== MESA_SHADER_COMPUTE
)
53 struct anv_state state
= anv_cmd_buffer_push_constants(cmd_buffer
, stage
);
55 if (state
.offset
== 0)
58 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CONSTANT_VS
),
59 ._3DCommandSubOpcode
= push_constant_opcodes
[stage
],
61 .PointerToConstantBuffer0
= { .offset
= state
.offset
},
62 .ConstantBuffer0ReadLength
= DIV_ROUND_UP(state
.alloc_size
, 32),
65 flushed
|= mesa_to_vk_shader_stage(stage
);
68 cmd_buffer
->state
.push_constants_dirty
&= ~flushed
;
73 #if GEN_GEN == 7 && !GEN_IS_HASWELL
75 gen7_cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer
*cmd_buffer
,
78 static const uint32_t sampler_state_opcodes
[] = {
79 [MESA_SHADER_VERTEX
] = 43,
80 [MESA_SHADER_TESS_CTRL
] = 44, /* HS */
81 [MESA_SHADER_TESS_EVAL
] = 45, /* DS */
82 [MESA_SHADER_GEOMETRY
] = 46,
83 [MESA_SHADER_FRAGMENT
] = 47,
84 [MESA_SHADER_COMPUTE
] = 0,
87 static const uint32_t binding_table_opcodes
[] = {
88 [MESA_SHADER_VERTEX
] = 38,
89 [MESA_SHADER_TESS_CTRL
] = 39,
90 [MESA_SHADER_TESS_EVAL
] = 40,
91 [MESA_SHADER_GEOMETRY
] = 41,
92 [MESA_SHADER_FRAGMENT
] = 42,
93 [MESA_SHADER_COMPUTE
] = 0,
96 anv_foreach_stage(s
, stages
) {
97 if (cmd_buffer
->state
.samplers
[s
].alloc_size
> 0) {
98 anv_batch_emit(&cmd_buffer
->batch
,
99 GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS
),
100 ._3DCommandSubOpcode
= sampler_state_opcodes
[s
],
101 .PointertoVSSamplerState
= cmd_buffer
->state
.samplers
[s
].offset
);
104 /* Always emit binding table pointers if we're asked to, since on SKL
105 * this is what flushes push constants. */
106 anv_batch_emit(&cmd_buffer
->batch
,
107 GENX(3DSTATE_BINDING_TABLE_POINTERS_VS
),
108 ._3DCommandSubOpcode
= binding_table_opcodes
[s
],
109 .PointertoVSBindingTable
= cmd_buffer
->state
.binding_tables
[s
].offset
);
114 gen7_cmd_buffer_flush_descriptor_sets(struct anv_cmd_buffer
*cmd_buffer
)
116 VkShaderStageFlags dirty
= cmd_buffer
->state
.descriptors_dirty
&
117 cmd_buffer
->state
.pipeline
->active_stages
;
119 VkResult result
= VK_SUCCESS
;
120 anv_foreach_stage(s
, dirty
) {
121 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
, s
,
122 &cmd_buffer
->state
.samplers
[s
]);
123 if (result
!= VK_SUCCESS
)
125 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
, s
,
126 &cmd_buffer
->state
.binding_tables
[s
]);
127 if (result
!= VK_SUCCESS
)
131 if (result
!= VK_SUCCESS
) {
132 assert(result
== VK_ERROR_OUT_OF_DEVICE_MEMORY
);
134 result
= anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
135 assert(result
== VK_SUCCESS
);
137 /* Re-emit state base addresses so we get the new surface state base
138 * address before we start emitting binding tables etc.
140 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
142 /* Re-emit all active binding tables */
143 dirty
|= cmd_buffer
->state
.pipeline
->active_stages
;
144 anv_foreach_stage(s
, dirty
) {
145 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
, s
,
146 &cmd_buffer
->state
.samplers
[s
]);
147 if (result
!= VK_SUCCESS
)
149 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
, s
,
150 &cmd_buffer
->state
.binding_tables
[s
]);
151 if (result
!= VK_SUCCESS
)
156 cmd_buffer
->state
.descriptors_dirty
&= ~dirty
;
160 #endif /* GEN_GEN == 7 && !GEN_IS_HASWELL */
162 static inline int64_t
163 clamp_int64(int64_t x
, int64_t min
, int64_t max
)
173 #if GEN_GEN == 7 && !GEN_IS_HASWELL
175 emit_scissor_state(struct anv_cmd_buffer
*cmd_buffer
,
176 uint32_t count
, const VkRect2D
*scissors
)
178 struct anv_state scissor_state
=
179 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 8, 32);
181 for (uint32_t i
= 0; i
< count
; i
++) {
182 const VkRect2D
*s
= &scissors
[i
];
184 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
185 * ymax < ymin for empty clips. In case clip x, y, width height are all
186 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
187 * what we want. Just special case empty clips and produce a canonical
189 static const struct GEN7_SCISSOR_RECT empty_scissor
= {
190 .ScissorRectangleYMin
= 1,
191 .ScissorRectangleXMin
= 1,
192 .ScissorRectangleYMax
= 0,
193 .ScissorRectangleXMax
= 0
196 const int max
= 0xffff;
197 struct GEN7_SCISSOR_RECT scissor
= {
198 /* Do this math using int64_t so overflow gets clamped correctly. */
199 .ScissorRectangleYMin
= clamp_int64(s
->offset
.y
, 0, max
),
200 .ScissorRectangleXMin
= clamp_int64(s
->offset
.x
, 0, max
),
201 .ScissorRectangleYMax
= clamp_int64((uint64_t) s
->offset
.y
+ s
->extent
.height
- 1, 0, max
),
202 .ScissorRectangleXMax
= clamp_int64((uint64_t) s
->offset
.x
+ s
->extent
.width
- 1, 0, max
)
205 if (s
->extent
.width
<= 0 || s
->extent
.height
<= 0) {
206 GEN7_SCISSOR_RECT_pack(NULL
, scissor_state
.map
+ i
* 8,
209 GEN7_SCISSOR_RECT_pack(NULL
, scissor_state
.map
+ i
* 8, &scissor
);
213 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DSTATE_SCISSOR_STATE_POINTERS
,
214 .ScissorRectPointer
= scissor_state
.offset
);
216 if (!cmd_buffer
->device
->info
.has_llc
)
217 anv_state_clflush(scissor_state
);
221 gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer
*cmd_buffer
)
223 if (cmd_buffer
->state
.dynamic
.scissor
.count
> 0) {
224 emit_scissor_state(cmd_buffer
, cmd_buffer
->state
.dynamic
.scissor
.count
,
225 cmd_buffer
->state
.dynamic
.scissor
.scissors
);
227 /* Emit a default scissor based on the currently bound framebuffer */
228 emit_scissor_state(cmd_buffer
, 1,
230 .offset
= { .x
= 0, .y
= 0, },
232 .width
= cmd_buffer
->state
.framebuffer
->width
,
233 .height
= cmd_buffer
->state
.framebuffer
->height
,
240 static const uint32_t vk_to_gen_index_type
[] = {
241 [VK_INDEX_TYPE_UINT16
] = INDEX_WORD
,
242 [VK_INDEX_TYPE_UINT32
] = INDEX_DWORD
,
245 static const uint32_t restart_index_for_type
[] = {
246 [VK_INDEX_TYPE_UINT16
] = UINT16_MAX
,
247 [VK_INDEX_TYPE_UINT32
] = UINT32_MAX
,
250 void genX(CmdBindIndexBuffer
)(
251 VkCommandBuffer commandBuffer
,
254 VkIndexType indexType
)
256 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
257 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
259 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_INDEX_BUFFER
;
261 cmd_buffer
->state
.restart_index
= restart_index_for_type
[indexType
];
262 cmd_buffer
->state
.gen7
.index_buffer
= buffer
;
263 cmd_buffer
->state
.gen7
.index_type
= vk_to_gen_index_type
[indexType
];
264 cmd_buffer
->state
.gen7
.index_offset
= offset
;
268 flush_compute_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
)
270 struct anv_device
*device
= cmd_buffer
->device
;
271 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
272 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
275 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
,
276 MESA_SHADER_COMPUTE
, &samplers
);
277 if (result
!= VK_SUCCESS
)
279 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
,
280 MESA_SHADER_COMPUTE
, &surfaces
);
281 if (result
!= VK_SUCCESS
)
284 struct anv_state push_state
= anv_cmd_buffer_cs_push_constants(cmd_buffer
);
286 const struct brw_cs_prog_data
*cs_prog_data
= &pipeline
->cs_prog_data
;
287 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
289 unsigned local_id_dwords
= cs_prog_data
->local_invocation_id_regs
* 8;
290 unsigned push_constant_data_size
=
291 (prog_data
->nr_params
+ local_id_dwords
) * 4;
292 unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
293 unsigned push_constant_regs
= reg_aligned_constant_size
/ 32;
295 if (push_state
.alloc_size
) {
296 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_CURBE_LOAD
),
297 .CURBETotalDataLength
= push_state
.alloc_size
,
298 .CURBEDataStartAddress
= push_state
.offset
);
301 assert(prog_data
->total_shared
<= 64 * 1024);
302 uint32_t slm_size
= 0;
303 if (prog_data
->total_shared
> 0) {
304 /* slm_size is in 4k increments, but must be a power of 2. */
306 while (slm_size
< prog_data
->total_shared
)
308 slm_size
/= 4 * 1024;
311 struct anv_state state
=
312 anv_state_pool_emit(&device
->dynamic_state_pool
,
313 GENX(INTERFACE_DESCRIPTOR_DATA
), 64,
314 .KernelStartPointer
= pipeline
->cs_simd
,
315 .BindingTablePointer
= surfaces
.offset
,
316 .SamplerStatePointer
= samplers
.offset
,
317 .ConstantURBEntryReadLength
=
320 .ConstantURBEntryReadOffset
= 0,
322 .BarrierEnable
= cs_prog_data
->uses_barrier
,
323 .SharedLocalMemorySize
= slm_size
,
324 .NumberofThreadsinGPGPUThreadGroup
=
325 pipeline
->cs_thread_width_max
);
327 const uint32_t size
= GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
328 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
),
329 .InterfaceDescriptorTotalLength
= size
,
330 .InterfaceDescriptorDataStartAddress
= state
.offset
);
336 genX(cmd_buffer_flush_compute_state
)(struct anv_cmd_buffer
*cmd_buffer
)
338 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
341 assert(pipeline
->active_stages
== VK_SHADER_STAGE_COMPUTE_BIT
);
343 if (cmd_buffer
->state
.current_pipeline
!= GPGPU
) {
344 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPELINE_SELECT
),
345 .PipelineSelection
= GPGPU
);
346 cmd_buffer
->state
.current_pipeline
= GPGPU
;
349 if (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)
350 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
352 if ((cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_COMPUTE_BIT
) ||
353 (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)) {
354 /* FIXME: figure out descriptors for gen7 */
355 result
= flush_compute_descriptor_set(cmd_buffer
);
356 assert(result
== VK_SUCCESS
);
357 cmd_buffer
->state
.descriptors_dirty
&= ~VK_SHADER_STAGE_COMPUTE_BIT
;
360 cmd_buffer
->state
.compute_dirty
= 0;
364 genX(cmd_buffer_flush_state
)(struct anv_cmd_buffer
*cmd_buffer
)
366 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
369 uint32_t vb_emit
= cmd_buffer
->state
.vb_dirty
& pipeline
->vb_used
;
371 assert((pipeline
->active_stages
& VK_SHADER_STAGE_COMPUTE_BIT
) == 0);
373 genX(flush_pipeline_select_3d
)(cmd_buffer
);
376 const uint32_t num_buffers
= __builtin_popcount(vb_emit
);
377 const uint32_t num_dwords
= 1 + num_buffers
* 4;
379 p
= anv_batch_emitn(&cmd_buffer
->batch
, num_dwords
,
380 GENX(3DSTATE_VERTEX_BUFFERS
));
382 for_each_bit(vb
, vb_emit
) {
383 struct anv_buffer
*buffer
= cmd_buffer
->state
.vertex_bindings
[vb
].buffer
;
384 uint32_t offset
= cmd_buffer
->state
.vertex_bindings
[vb
].offset
;
386 struct GENX(VERTEX_BUFFER_STATE
) state
= {
387 .VertexBufferIndex
= vb
,
388 .BufferAccessType
= pipeline
->instancing_enable
[vb
] ? INSTANCEDATA
: VERTEXDATA
,
389 .VertexBufferMemoryObjectControlState
= GENX(MOCS
),
390 .AddressModifyEnable
= true,
391 .BufferPitch
= pipeline
->binding_stride
[vb
],
392 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
393 .EndAddress
= { buffer
->bo
, buffer
->offset
+ buffer
->size
- 1},
394 .InstanceDataStepRate
= 1
397 GENX(VERTEX_BUFFER_STATE_pack
)(&cmd_buffer
->batch
, &p
[1 + i
* 4], &state
);
402 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_PIPELINE
) {
403 /* If somebody compiled a pipeline after starting a command buffer the
404 * scratch bo may have grown since we started this cmd buffer (and
405 * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now,
406 * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
407 if (cmd_buffer
->state
.scratch_size
< pipeline
->total_scratch
)
408 gen7_cmd_buffer_emit_state_base_address(cmd_buffer
);
410 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
413 if (cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_VERTEX_BIT
||
414 cmd_buffer
->state
.push_constants_dirty
& VK_SHADER_STAGE_VERTEX_BIT
) {
415 /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
417 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
418 * stall needs to be sent just prior to any 3DSTATE_VS,
419 * 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
420 * 3DSTATE_BINDING_TABLE_POINTER_VS,
421 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one
422 * PIPE_CONTROL needs to be sent before any combination of VS
423 * associated 3DSTATE."
425 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
426 .DepthStallEnable
= true,
427 .PostSyncOperation
= WriteImmediateData
,
428 .Address
= { &cmd_buffer
->device
->workaround_bo
, 0 });
432 if (cmd_buffer
->state
.descriptors_dirty
) {
433 dirty
= gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer
);
434 gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer
, dirty
);
437 if (cmd_buffer
->state
.push_constants_dirty
)
438 cmd_buffer_flush_push_constants(cmd_buffer
);
440 /* We use the gen8 state here because it only contains the additional
441 * min/max fields and, since they occur at the end of the packet and
442 * don't change the stride, they work on gen7 too.
444 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
)
445 gen8_cmd_buffer_emit_viewport(cmd_buffer
);
447 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_SCISSOR
)
448 gen7_cmd_buffer_emit_scissor(cmd_buffer
);
450 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
451 ANV_CMD_DIRTY_RENDER_TARGETS
|
452 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
|
453 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
)) {
455 bool enable_bias
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
!= 0.0f
||
456 cmd_buffer
->state
.dynamic
.depth_bias
.slope
!= 0.0f
;
458 const struct anv_image_view
*iview
=
459 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer
);
460 const struct anv_image
*image
= iview
? iview
->image
: NULL
;
461 const struct anv_format
*anv_format
=
462 iview
? anv_format_for_vk_format(iview
->vk_format
) : NULL
;
463 const bool has_depth
= iview
&& anv_format
->has_depth
;
464 const uint32_t depth_format
= has_depth
?
465 isl_surf_get_depth_format(&cmd_buffer
->device
->isl_dev
,
466 &image
->depth_surface
.isl
) : D16_UNORM
;
468 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
469 struct GENX(3DSTATE_SF
) sf
= {
470 GENX(3DSTATE_SF_header
),
471 .DepthBufferSurfaceFormat
= depth_format
,
472 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
473 .GlobalDepthOffsetEnableSolid
= enable_bias
,
474 .GlobalDepthOffsetEnableWireframe
= enable_bias
,
475 .GlobalDepthOffsetEnablePoint
= enable_bias
,
476 .GlobalDepthOffsetConstant
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
,
477 .GlobalDepthOffsetScale
= cmd_buffer
->state
.dynamic
.depth_bias
.slope
,
478 .GlobalDepthOffsetClamp
= cmd_buffer
->state
.dynamic
.depth_bias
.clamp
480 GENX(3DSTATE_SF_pack
)(NULL
, sf_dw
, &sf
);
482 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
, pipeline
->gen7
.sf
);
485 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
|
486 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
487 struct anv_state cc_state
=
488 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
489 GENX(COLOR_CALC_STATE_length
) * 4,
491 struct GENX(COLOR_CALC_STATE
) cc
= {
492 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
493 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
494 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
495 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
496 .StencilReferenceValue
=
497 cmd_buffer
->state
.dynamic
.stencil_reference
.front
,
498 .BackFaceStencilReferenceValue
=
499 cmd_buffer
->state
.dynamic
.stencil_reference
.back
,
501 GENX(COLOR_CALC_STATE_pack
)(NULL
, cc_state
.map
, &cc
);
502 if (!cmd_buffer
->device
->info
.has_llc
)
503 anv_state_clflush(cc_state
);
505 anv_batch_emit(&cmd_buffer
->batch
,
506 GENX(3DSTATE_CC_STATE_POINTERS
),
507 .ColorCalcStatePointer
= cc_state
.offset
);
510 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
511 ANV_CMD_DIRTY_RENDER_TARGETS
|
512 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
513 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
)) {
514 uint32_t depth_stencil_dw
[GENX(DEPTH_STENCIL_STATE_length
)];
516 const struct anv_image_view
*iview
=
517 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer
);
519 struct GENX(DEPTH_STENCIL_STATE
) depth_stencil
= {
520 .StencilBufferWriteEnable
= iview
&& (iview
->aspect_mask
& VK_IMAGE_ASPECT_STENCIL_BIT
),
523 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.front
& 0xff,
525 cmd_buffer
->state
.dynamic
.stencil_write_mask
.front
& 0xff,
527 .BackfaceStencilTestMask
=
528 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.back
& 0xff,
529 .BackfaceStencilWriteMask
=
530 cmd_buffer
->state
.dynamic
.stencil_write_mask
.back
& 0xff,
532 GENX(DEPTH_STENCIL_STATE_pack
)(NULL
, depth_stencil_dw
, &depth_stencil
);
534 struct anv_state ds_state
=
535 anv_cmd_buffer_merge_dynamic(cmd_buffer
, depth_stencil_dw
,
536 pipeline
->gen7
.depth_stencil_state
,
537 GENX(DEPTH_STENCIL_STATE_length
), 64);
539 anv_batch_emit(&cmd_buffer
->batch
,
540 GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS
),
541 .PointertoDEPTH_STENCIL_STATE
= ds_state
.offset
);
544 if (cmd_buffer
->state
.gen7
.index_buffer
&&
545 cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
546 ANV_CMD_DIRTY_INDEX_BUFFER
)) {
547 struct anv_buffer
*buffer
= cmd_buffer
->state
.gen7
.index_buffer
;
548 uint32_t offset
= cmd_buffer
->state
.gen7
.index_offset
;
551 anv_batch_emit(&cmd_buffer
->batch
, GEN75_3DSTATE_VF
,
552 .IndexedDrawCutIndexEnable
= pipeline
->primitive_restart
,
553 .CutIndex
= cmd_buffer
->state
.restart_index
);
556 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_INDEX_BUFFER
),
558 .CutIndexEnable
= pipeline
->primitive_restart
,
560 .IndexFormat
= cmd_buffer
->state
.gen7
.index_type
,
561 .MemoryObjectControlState
= GENX(MOCS
),
562 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
563 .BufferEndingAddress
= { buffer
->bo
, buffer
->offset
+ buffer
->size
});
566 cmd_buffer
->state
.vb_dirty
&= ~vb_emit
;
567 cmd_buffer
->state
.dirty
= 0;
570 void genX(CmdSetEvent
)(
571 VkCommandBuffer commandBuffer
,
573 VkPipelineStageFlags stageMask
)
578 void genX(CmdResetEvent
)(
579 VkCommandBuffer commandBuffer
,
581 VkPipelineStageFlags stageMask
)
586 void genX(CmdWaitEvents
)(
587 VkCommandBuffer commandBuffer
,
589 const VkEvent
* pEvents
,
590 VkPipelineStageFlags srcStageMask
,
591 VkPipelineStageFlags destStageMask
,
592 uint32_t memoryBarrierCount
,
593 const VkMemoryBarrier
* pMemoryBarriers
,
594 uint32_t bufferMemoryBarrierCount
,
595 const VkBufferMemoryBarrier
* pBufferMemoryBarriers
,
596 uint32_t imageMemoryBarrierCount
,
597 const VkImageMemoryBarrier
* pImageMemoryBarriers
)