2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "genxml/gen7_pack.h"
33 #include "genxml/gen75_pack.h"
36 cmd_buffer_flush_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
38 static const uint32_t push_constant_opcodes
[] = {
39 [MESA_SHADER_VERTEX
] = 21,
40 [MESA_SHADER_TESS_CTRL
] = 25, /* HS */
41 [MESA_SHADER_TESS_EVAL
] = 26, /* DS */
42 [MESA_SHADER_GEOMETRY
] = 22,
43 [MESA_SHADER_FRAGMENT
] = 23,
44 [MESA_SHADER_COMPUTE
] = 0,
47 VkShaderStageFlags flushed
= 0;
49 anv_foreach_stage(stage
, cmd_buffer
->state
.push_constants_dirty
) {
50 if (stage
== MESA_SHADER_COMPUTE
)
53 struct anv_state state
= anv_cmd_buffer_push_constants(cmd_buffer
, stage
);
55 if (state
.offset
== 0)
58 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DSTATE_CONSTANT_VS
,
59 ._3DCommandSubOpcode
= push_constant_opcodes
[stage
],
61 .PointerToConstantBuffer0
= { .offset
= state
.offset
},
62 .ConstantBuffer0ReadLength
= DIV_ROUND_UP(state
.alloc_size
, 32),
65 flushed
|= mesa_to_vk_shader_stage(stage
);
68 cmd_buffer
->state
.push_constants_dirty
&= ~flushed
;
73 GENX_FUNC(GEN7
, GEN7
) void
74 genX(cmd_buffer_emit_descriptor_pointers
)(struct anv_cmd_buffer
*cmd_buffer
,
77 static const uint32_t sampler_state_opcodes
[] = {
78 [MESA_SHADER_VERTEX
] = 43,
79 [MESA_SHADER_TESS_CTRL
] = 44, /* HS */
80 [MESA_SHADER_TESS_EVAL
] = 45, /* DS */
81 [MESA_SHADER_GEOMETRY
] = 46,
82 [MESA_SHADER_FRAGMENT
] = 47,
83 [MESA_SHADER_COMPUTE
] = 0,
86 static const uint32_t binding_table_opcodes
[] = {
87 [MESA_SHADER_VERTEX
] = 38,
88 [MESA_SHADER_TESS_CTRL
] = 39,
89 [MESA_SHADER_TESS_EVAL
] = 40,
90 [MESA_SHADER_GEOMETRY
] = 41,
91 [MESA_SHADER_FRAGMENT
] = 42,
92 [MESA_SHADER_COMPUTE
] = 0,
95 anv_foreach_stage(s
, stages
) {
96 if (cmd_buffer
->state
.samplers
[s
].alloc_size
> 0) {
97 anv_batch_emit(&cmd_buffer
->batch
,
98 GEN7_3DSTATE_SAMPLER_STATE_POINTERS_VS
,
99 ._3DCommandSubOpcode
= sampler_state_opcodes
[s
],
100 .PointertoVSSamplerState
= cmd_buffer
->state
.samplers
[s
].offset
);
103 /* Always emit binding table pointers if we're asked to, since on SKL
104 * this is what flushes push constants. */
105 anv_batch_emit(&cmd_buffer
->batch
,
106 GEN7_3DSTATE_BINDING_TABLE_POINTERS_VS
,
107 ._3DCommandSubOpcode
= binding_table_opcodes
[s
],
108 .PointertoVSBindingTable
= cmd_buffer
->state
.binding_tables
[s
].offset
);
112 GENX_FUNC(GEN7
, GEN7
) uint32_t
113 genX(cmd_buffer_flush_descriptor_sets
)(struct anv_cmd_buffer
*cmd_buffer
)
115 VkShaderStageFlags dirty
= cmd_buffer
->state
.descriptors_dirty
&
116 cmd_buffer
->state
.pipeline
->active_stages
;
118 VkResult result
= VK_SUCCESS
;
119 anv_foreach_stage(s
, dirty
) {
120 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
, s
,
121 &cmd_buffer
->state
.samplers
[s
]);
122 if (result
!= VK_SUCCESS
)
124 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
, s
,
125 &cmd_buffer
->state
.binding_tables
[s
]);
126 if (result
!= VK_SUCCESS
)
130 if (result
!= VK_SUCCESS
) {
131 assert(result
== VK_ERROR_OUT_OF_DEVICE_MEMORY
);
133 result
= anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
134 assert(result
== VK_SUCCESS
);
136 /* Re-emit state base addresses so we get the new surface state base
137 * address before we start emitting binding tables etc.
139 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
141 /* Re-emit all active binding tables */
142 dirty
|= cmd_buffer
->state
.pipeline
->active_stages
;
143 anv_foreach_stage(s
, dirty
) {
144 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
, s
,
145 &cmd_buffer
->state
.samplers
[s
]);
146 if (result
!= VK_SUCCESS
)
148 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
, s
,
149 &cmd_buffer
->state
.binding_tables
[s
]);
150 if (result
!= VK_SUCCESS
)
155 cmd_buffer
->state
.descriptors_dirty
&= ~dirty
;
160 static inline int64_t
161 clamp_int64(int64_t x
, int64_t min
, int64_t max
)
172 emit_scissor_state(struct anv_cmd_buffer
*cmd_buffer
,
173 uint32_t count
, const VkRect2D
*scissors
)
175 struct anv_state scissor_state
=
176 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 8, 32);
178 for (uint32_t i
= 0; i
< count
; i
++) {
179 const VkRect2D
*s
= &scissors
[i
];
181 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
182 * ymax < ymin for empty clips. In case clip x, y, width height are all
183 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
184 * what we want. Just special case empty clips and produce a canonical
186 static const struct GEN7_SCISSOR_RECT empty_scissor
= {
187 .ScissorRectangleYMin
= 1,
188 .ScissorRectangleXMin
= 1,
189 .ScissorRectangleYMax
= 0,
190 .ScissorRectangleXMax
= 0
193 const int max
= 0xffff;
194 struct GEN7_SCISSOR_RECT scissor
= {
195 /* Do this math using int64_t so overflow gets clamped correctly. */
196 .ScissorRectangleYMin
= clamp_int64(s
->offset
.y
, 0, max
),
197 .ScissorRectangleXMin
= clamp_int64(s
->offset
.x
, 0, max
),
198 .ScissorRectangleYMax
= clamp_int64((uint64_t) s
->offset
.y
+ s
->extent
.height
- 1, 0, max
),
199 .ScissorRectangleXMax
= clamp_int64((uint64_t) s
->offset
.x
+ s
->extent
.width
- 1, 0, max
)
202 if (s
->extent
.width
<= 0 || s
->extent
.height
<= 0) {
203 GEN7_SCISSOR_RECT_pack(NULL
, scissor_state
.map
+ i
* 8,
206 GEN7_SCISSOR_RECT_pack(NULL
, scissor_state
.map
+ i
* 8, &scissor
);
210 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DSTATE_SCISSOR_STATE_POINTERS
,
211 .ScissorRectPointer
= scissor_state
.offset
);
213 if (!cmd_buffer
->device
->info
.has_llc
)
214 anv_state_clflush(scissor_state
);
217 GENX_FUNC(GEN7
, GEN7
) void
218 genX(cmd_buffer_emit_scissor
)(struct anv_cmd_buffer
*cmd_buffer
)
220 if (cmd_buffer
->state
.dynamic
.scissor
.count
> 0) {
221 emit_scissor_state(cmd_buffer
, cmd_buffer
->state
.dynamic
.scissor
.count
,
222 cmd_buffer
->state
.dynamic
.scissor
.scissors
);
224 /* Emit a default scissor based on the currently bound framebuffer */
225 emit_scissor_state(cmd_buffer
, 1,
227 .offset
= { .x
= 0, .y
= 0, },
229 .width
= cmd_buffer
->state
.framebuffer
->width
,
230 .height
= cmd_buffer
->state
.framebuffer
->height
,
236 static const uint32_t vk_to_gen_index_type
[] = {
237 [VK_INDEX_TYPE_UINT16
] = INDEX_WORD
,
238 [VK_INDEX_TYPE_UINT32
] = INDEX_DWORD
,
241 static const uint32_t restart_index_for_type
[] = {
242 [VK_INDEX_TYPE_UINT16
] = UINT16_MAX
,
243 [VK_INDEX_TYPE_UINT32
] = UINT32_MAX
,
246 void genX(CmdBindIndexBuffer
)(
247 VkCommandBuffer commandBuffer
,
250 VkIndexType indexType
)
252 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
253 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
255 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_INDEX_BUFFER
;
257 cmd_buffer
->state
.restart_index
= restart_index_for_type
[indexType
];
258 cmd_buffer
->state
.gen7
.index_buffer
= buffer
;
259 cmd_buffer
->state
.gen7
.index_type
= vk_to_gen_index_type
[indexType
];
260 cmd_buffer
->state
.gen7
.index_offset
= offset
;
264 flush_compute_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
)
266 struct anv_device
*device
= cmd_buffer
->device
;
267 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
268 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
271 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
,
272 MESA_SHADER_COMPUTE
, &samplers
);
273 if (result
!= VK_SUCCESS
)
275 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
,
276 MESA_SHADER_COMPUTE
, &surfaces
);
277 if (result
!= VK_SUCCESS
)
280 struct anv_state push_state
= anv_cmd_buffer_cs_push_constants(cmd_buffer
);
282 const struct brw_cs_prog_data
*cs_prog_data
= &pipeline
->cs_prog_data
;
283 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
285 unsigned local_id_dwords
= cs_prog_data
->local_invocation_id_regs
* 8;
286 unsigned push_constant_data_size
=
287 (prog_data
->nr_params
+ local_id_dwords
) * 4;
288 unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
289 unsigned push_constant_regs
= reg_aligned_constant_size
/ 32;
291 if (push_state
.alloc_size
) {
292 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_CURBE_LOAD
),
293 .CURBETotalDataLength
= push_state
.alloc_size
,
294 .CURBEDataStartAddress
= push_state
.offset
);
297 assert(prog_data
->total_shared
<= 64 * 1024);
298 uint32_t slm_size
= 0;
299 if (prog_data
->total_shared
> 0) {
300 /* slm_size is in 4k increments, but must be a power of 2. */
302 while (slm_size
< prog_data
->total_shared
)
304 slm_size
/= 4 * 1024;
307 struct anv_state state
=
308 anv_state_pool_emit(&device
->dynamic_state_pool
,
309 GEN7_INTERFACE_DESCRIPTOR_DATA
, 64,
310 .KernelStartPointer
= pipeline
->cs_simd
,
311 .BindingTablePointer
= surfaces
.offset
,
312 .SamplerStatePointer
= samplers
.offset
,
313 .ConstantURBEntryReadLength
=
315 .ConstantURBEntryReadOffset
= 0,
316 .BarrierEnable
= cs_prog_data
->uses_barrier
,
317 .SharedLocalMemorySize
= slm_size
,
318 .NumberofThreadsinGPGPUThreadGroup
=
319 pipeline
->cs_thread_width_max
);
321 const uint32_t size
= GEN7_INTERFACE_DESCRIPTOR_DATA_length
* sizeof(uint32_t);
322 anv_batch_emit(&cmd_buffer
->batch
, GEN7_MEDIA_INTERFACE_DESCRIPTOR_LOAD
,
323 .InterfaceDescriptorTotalLength
= size
,
324 .InterfaceDescriptorDataStartAddress
= state
.offset
);
330 genX(cmd_buffer_flush_compute_state
)(struct anv_cmd_buffer
*cmd_buffer
)
332 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
335 assert(pipeline
->active_stages
== VK_SHADER_STAGE_COMPUTE_BIT
);
337 if (cmd_buffer
->state
.current_pipeline
!= GPGPU
) {
338 anv_batch_emit(&cmd_buffer
->batch
, GEN7_PIPELINE_SELECT
,
339 .PipelineSelection
= GPGPU
);
340 cmd_buffer
->state
.current_pipeline
= GPGPU
;
343 if (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)
344 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
346 if ((cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_COMPUTE_BIT
) ||
347 (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)) {
348 /* FIXME: figure out descriptors for gen7 */
349 result
= flush_compute_descriptor_set(cmd_buffer
);
350 assert(result
== VK_SUCCESS
);
351 cmd_buffer
->state
.descriptors_dirty
&= ~VK_SHADER_STAGE_COMPUTE_BIT
;
354 cmd_buffer
->state
.compute_dirty
= 0;
358 genX(cmd_buffer_flush_state
)(struct anv_cmd_buffer
*cmd_buffer
)
360 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
363 uint32_t vb_emit
= cmd_buffer
->state
.vb_dirty
& pipeline
->vb_used
;
365 assert((pipeline
->active_stages
& VK_SHADER_STAGE_COMPUTE_BIT
) == 0);
367 genX(flush_pipeline_select_3d
)(cmd_buffer
);
370 const uint32_t num_buffers
= __builtin_popcount(vb_emit
);
371 const uint32_t num_dwords
= 1 + num_buffers
* 4;
373 p
= anv_batch_emitn(&cmd_buffer
->batch
, num_dwords
,
374 GEN7_3DSTATE_VERTEX_BUFFERS
);
376 for_each_bit(vb
, vb_emit
) {
377 struct anv_buffer
*buffer
= cmd_buffer
->state
.vertex_bindings
[vb
].buffer
;
378 uint32_t offset
= cmd_buffer
->state
.vertex_bindings
[vb
].offset
;
380 struct GEN7_VERTEX_BUFFER_STATE state
= {
381 .VertexBufferIndex
= vb
,
382 .BufferAccessType
= pipeline
->instancing_enable
[vb
] ? INSTANCEDATA
: VERTEXDATA
,
383 .VertexBufferMemoryObjectControlState
= GEN7_MOCS
,
384 .AddressModifyEnable
= true,
385 .BufferPitch
= pipeline
->binding_stride
[vb
],
386 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
387 .EndAddress
= { buffer
->bo
, buffer
->offset
+ buffer
->size
- 1},
388 .InstanceDataStepRate
= 1
391 GEN7_VERTEX_BUFFER_STATE_pack(&cmd_buffer
->batch
, &p
[1 + i
* 4], &state
);
396 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_PIPELINE
) {
397 /* If somebody compiled a pipeline after starting a command buffer the
398 * scratch bo may have grown since we started this cmd buffer (and
399 * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now,
400 * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
401 if (cmd_buffer
->state
.scratch_size
< pipeline
->total_scratch
)
402 gen7_cmd_buffer_emit_state_base_address(cmd_buffer
);
404 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
407 if (cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_VERTEX_BIT
||
408 cmd_buffer
->state
.push_constants_dirty
& VK_SHADER_STAGE_VERTEX_BIT
) {
409 /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
411 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
412 * stall needs to be sent just prior to any 3DSTATE_VS,
413 * 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
414 * 3DSTATE_BINDING_TABLE_POINTER_VS,
415 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one
416 * PIPE_CONTROL needs to be sent before any combination of VS
417 * associated 3DSTATE."
419 anv_batch_emit(&cmd_buffer
->batch
, GEN7_PIPE_CONTROL
,
420 .DepthStallEnable
= true,
421 .PostSyncOperation
= WriteImmediateData
,
422 .Address
= { &cmd_buffer
->device
->workaround_bo
, 0 });
426 if (cmd_buffer
->state
.descriptors_dirty
) {
427 dirty
= gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer
);
428 gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer
, dirty
);
431 if (cmd_buffer
->state
.push_constants_dirty
)
432 cmd_buffer_flush_push_constants(cmd_buffer
);
434 /* We use the gen8 state here because it only contains the additional
435 * min/max fields and, since they occur at the end of the packet and
436 * don't change the stride, they work on gen7 too.
438 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
)
439 gen8_cmd_buffer_emit_viewport(cmd_buffer
);
441 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_SCISSOR
)
442 gen7_cmd_buffer_emit_scissor(cmd_buffer
);
444 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
445 ANV_CMD_DIRTY_RENDER_TARGETS
|
446 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
|
447 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
)) {
449 bool enable_bias
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
!= 0.0f
||
450 cmd_buffer
->state
.dynamic
.depth_bias
.slope
!= 0.0f
;
452 const struct anv_image_view
*iview
=
453 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer
);
454 const struct anv_image
*image
= iview
? iview
->image
: NULL
;
455 const uint32_t depth_format
= image
?
456 isl_surf_get_depth_format(&cmd_buffer
->device
->isl_dev
,
457 &image
->depth_surface
.isl
) : D16_UNORM
;
459 uint32_t sf_dw
[GEN7_3DSTATE_SF_length
];
460 struct GEN7_3DSTATE_SF sf
= {
461 GEN7_3DSTATE_SF_header
,
462 .DepthBufferSurfaceFormat
= depth_format
,
463 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
464 .GlobalDepthOffsetEnableSolid
= enable_bias
,
465 .GlobalDepthOffsetEnableWireframe
= enable_bias
,
466 .GlobalDepthOffsetEnablePoint
= enable_bias
,
467 .GlobalDepthOffsetConstant
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
,
468 .GlobalDepthOffsetScale
= cmd_buffer
->state
.dynamic
.depth_bias
.slope
,
469 .GlobalDepthOffsetClamp
= cmd_buffer
->state
.dynamic
.depth_bias
.clamp
471 GEN7_3DSTATE_SF_pack(NULL
, sf_dw
, &sf
);
473 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
, pipeline
->gen7
.sf
);
476 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
|
477 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
478 struct anv_state cc_state
=
479 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
480 GEN7_COLOR_CALC_STATE_length
* 4,
482 struct GEN7_COLOR_CALC_STATE cc
= {
483 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
484 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
485 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
486 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
487 .StencilReferenceValue
=
488 cmd_buffer
->state
.dynamic
.stencil_reference
.front
,
489 .BackFaceStencilReferenceValue
=
490 cmd_buffer
->state
.dynamic
.stencil_reference
.back
,
492 GEN7_COLOR_CALC_STATE_pack(NULL
, cc_state
.map
, &cc
);
493 if (!cmd_buffer
->device
->info
.has_llc
)
494 anv_state_clflush(cc_state
);
496 anv_batch_emit(&cmd_buffer
->batch
,
497 GEN7_3DSTATE_CC_STATE_POINTERS
,
498 .ColorCalcStatePointer
= cc_state
.offset
);
501 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
502 ANV_CMD_DIRTY_RENDER_TARGETS
|
503 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
504 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
)) {
505 uint32_t depth_stencil_dw
[GEN7_DEPTH_STENCIL_STATE_length
];
507 const struct anv_image_view
*iview
=
508 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer
);
510 struct GEN7_DEPTH_STENCIL_STATE depth_stencil
= {
511 .StencilBufferWriteEnable
= iview
&& (iview
->aspect_mask
& VK_IMAGE_ASPECT_STENCIL_BIT
),
514 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.front
& 0xff,
516 cmd_buffer
->state
.dynamic
.stencil_write_mask
.front
& 0xff,
518 .BackfaceStencilTestMask
=
519 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.back
& 0xff,
520 .BackfaceStencilWriteMask
=
521 cmd_buffer
->state
.dynamic
.stencil_write_mask
.back
& 0xff,
523 GEN7_DEPTH_STENCIL_STATE_pack(NULL
, depth_stencil_dw
, &depth_stencil
);
525 struct anv_state ds_state
=
526 anv_cmd_buffer_merge_dynamic(cmd_buffer
, depth_stencil_dw
,
527 pipeline
->gen7
.depth_stencil_state
,
528 GEN7_DEPTH_STENCIL_STATE_length
, 64);
530 anv_batch_emit(&cmd_buffer
->batch
,
531 GEN7_3DSTATE_DEPTH_STENCIL_STATE_POINTERS
,
532 .PointertoDEPTH_STENCIL_STATE
= ds_state
.offset
);
535 if (cmd_buffer
->state
.gen7
.index_buffer
&&
536 cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
537 ANV_CMD_DIRTY_INDEX_BUFFER
)) {
538 struct anv_buffer
*buffer
= cmd_buffer
->state
.gen7
.index_buffer
;
539 uint32_t offset
= cmd_buffer
->state
.gen7
.index_offset
;
541 if (ANV_IS_HASWELL
) {
542 anv_batch_emit(&cmd_buffer
->batch
, GEN75_3DSTATE_VF
,
543 .IndexedDrawCutIndexEnable
= pipeline
->primitive_restart
,
544 .CutIndex
= cmd_buffer
->state
.restart_index
);
547 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DSTATE_INDEX_BUFFER
,
548 .CutIndexEnable
= pipeline
->primitive_restart
,
549 .IndexFormat
= cmd_buffer
->state
.gen7
.index_type
,
550 .MemoryObjectControlState
= GEN7_MOCS
,
551 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
552 .BufferEndingAddress
= { buffer
->bo
, buffer
->offset
+ buffer
->size
});
555 cmd_buffer
->state
.vb_dirty
&= ~vb_emit
;
556 cmd_buffer
->state
.dirty
= 0;
559 void genX(CmdSetEvent
)(
560 VkCommandBuffer commandBuffer
,
562 VkPipelineStageFlags stageMask
)
567 void genX(CmdResetEvent
)(
568 VkCommandBuffer commandBuffer
,
570 VkPipelineStageFlags stageMask
)
575 void genX(CmdWaitEvents
)(
576 VkCommandBuffer commandBuffer
,
578 const VkEvent
* pEvents
,
579 VkPipelineStageFlags srcStageMask
,
580 VkPipelineStageFlags destStageMask
,
581 uint32_t memoryBarrierCount
,
582 const VkMemoryBarrier
* pMemoryBarriers
,
583 uint32_t bufferMemoryBarrierCount
,
584 const VkBufferMemoryBarrier
* pBufferMemoryBarriers
,
585 uint32_t imageMemoryBarrierCount
,
586 const VkImageMemoryBarrier
* pImageMemoryBarriers
)