2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "gen7_pack.h"
35 gen7_cmd_buffer_flush_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
37 static const uint32_t push_constant_opcodes
[] = {
38 [VK_SHADER_STAGE_VERTEX
] = 21,
39 [VK_SHADER_STAGE_TESS_CONTROL
] = 25, /* HS */
40 [VK_SHADER_STAGE_TESS_EVALUATION
] = 26, /* DS */
41 [VK_SHADER_STAGE_GEOMETRY
] = 22,
42 [VK_SHADER_STAGE_FRAGMENT
] = 23,
43 [VK_SHADER_STAGE_COMPUTE
] = 0,
47 VkShaderStageFlags flushed
= 0;
49 for_each_bit(stage
, cmd_buffer
->state
.push_constants_dirty
) {
50 struct anv_state state
= anv_cmd_buffer_push_constants(cmd_buffer
, stage
);
52 if (state
.offset
== 0)
55 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DSTATE_CONSTANT_VS
,
56 ._3DCommandSubOpcode
= push_constant_opcodes
[stage
],
58 .PointerToConstantBuffer0
= { .offset
= state
.offset
},
59 .ConstantBuffer0ReadLength
= DIV_ROUND_UP(state
.alloc_size
, 32),
62 flushed
|= 1 << stage
;
65 cmd_buffer
->state
.push_constants_dirty
&= ~flushed
;
70 gen7_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
)
72 struct anv_device
*device
= cmd_buffer
->device
;
73 struct anv_bo
*scratch_bo
= NULL
;
75 cmd_buffer
->state
.scratch_size
=
76 anv_block_pool_size(&device
->scratch_block_pool
);
77 if (cmd_buffer
->state
.scratch_size
> 0)
78 scratch_bo
= &device
->scratch_block_pool
.bo
;
80 anv_batch_emit(&cmd_buffer
->batch
, GEN7_STATE_BASE_ADDRESS
,
81 .GeneralStateBaseAddress
= { scratch_bo
, 0 },
82 .GeneralStateMemoryObjectControlState
= GEN7_MOCS
,
83 .GeneralStateBaseAddressModifyEnable
= true,
84 .GeneralStateAccessUpperBound
= { scratch_bo
, scratch_bo
->size
},
85 .GeneralStateAccessUpperBoundModifyEnable
= true,
87 .SurfaceStateBaseAddress
= anv_cmd_buffer_surface_base_address(cmd_buffer
),
88 .SurfaceStateMemoryObjectControlState
= GEN7_MOCS
,
89 .SurfaceStateBaseAddressModifyEnable
= true,
91 .DynamicStateBaseAddress
= { &device
->dynamic_state_block_pool
.bo
, 0 },
92 .DynamicStateMemoryObjectControlState
= GEN7_MOCS
,
93 .DynamicStateBaseAddressModifyEnable
= true,
94 .DynamicStateAccessUpperBound
= { &device
->dynamic_state_block_pool
.bo
,
95 device
->dynamic_state_block_pool
.bo
.size
},
96 .DynamicStateAccessUpperBoundModifyEnable
= true,
98 .IndirectObjectBaseAddress
= { NULL
, 0 },
99 .IndirectObjectMemoryObjectControlState
= GEN7_MOCS
,
100 .IndirectObjectBaseAddressModifyEnable
= true,
102 .IndirectObjectAccessUpperBound
= { NULL
, 0xffffffff },
103 .IndirectObjectAccessUpperBoundModifyEnable
= true,
105 .InstructionBaseAddress
= { &device
->instruction_block_pool
.bo
, 0 },
106 .InstructionMemoryObjectControlState
= GEN7_MOCS
,
107 .InstructionBaseAddressModifyEnable
= true,
108 .InstructionAccessUpperBound
= { &device
->instruction_block_pool
.bo
,
109 device
->instruction_block_pool
.bo
.size
},
110 .InstructionAccessUpperBoundModifyEnable
= true);
112 /* After re-setting the surface state base address, we have to do some
113 * cache flusing so that the sampler engine will pick up the new
114 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
115 * Shared Function > 3D Sampler > State > State Caching (page 96):
117 * Coherency with system memory in the state cache, like the texture
118 * cache is handled partially by software. It is expected that the
119 * command stream or shader will issue Cache Flush operation or
120 * Cache_Flush sampler message to ensure that the L1 cache remains
121 * coherent with system memory.
125 * Whenever the value of the Dynamic_State_Base_Addr,
126 * Surface_State_Base_Addr are altered, the L1 state cache must be
127 * invalidated to ensure the new surface or sampler state is fetched
128 * from system memory.
130 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
131 * which, according the PIPE_CONTROL instruction documentation in the
134 * Setting this bit is independent of any other bit in this packet.
135 * This bit controls the invalidation of the L1 and L2 state caches
136 * at the top of the pipe i.e. at the parsing time.
138 * Unfortunately, experimentation seems to indicate that state cache
139 * invalidation through a PIPE_CONTROL does nothing whatsoever in
140 * regards to surface state and binding tables. In stead, it seems that
141 * invalidating the texture cache is what is actually needed.
143 * XXX: As far as we have been able to determine through
144 * experimentation, shows that flush the texture cache appears to be
145 * sufficient. The theory here is that all of the sampling/rendering
146 * units cache the binding table in the texture cache. However, we have
147 * yet to be able to actually confirm this.
149 anv_batch_emit(&cmd_buffer
->batch
, GEN7_PIPE_CONTROL
,
150 .TextureCacheInvalidationEnable
= true);
154 flush_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
, VkShaderStage stage
)
156 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
159 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
, stage
, &samplers
);
160 if (result
!= VK_SUCCESS
)
162 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
, stage
, &surfaces
);
163 if (result
!= VK_SUCCESS
)
166 static const uint32_t sampler_state_opcodes
[] = {
167 [VK_SHADER_STAGE_VERTEX
] = 43,
168 [VK_SHADER_STAGE_TESS_CONTROL
] = 44, /* HS */
169 [VK_SHADER_STAGE_TESS_EVALUATION
] = 45, /* DS */
170 [VK_SHADER_STAGE_GEOMETRY
] = 46,
171 [VK_SHADER_STAGE_FRAGMENT
] = 47,
172 [VK_SHADER_STAGE_COMPUTE
] = 0,
175 static const uint32_t binding_table_opcodes
[] = {
176 [VK_SHADER_STAGE_VERTEX
] = 38,
177 [VK_SHADER_STAGE_TESS_CONTROL
] = 39,
178 [VK_SHADER_STAGE_TESS_EVALUATION
] = 40,
179 [VK_SHADER_STAGE_GEOMETRY
] = 41,
180 [VK_SHADER_STAGE_FRAGMENT
] = 42,
181 [VK_SHADER_STAGE_COMPUTE
] = 0,
184 if (samplers
.alloc_size
> 0) {
185 anv_batch_emit(&cmd_buffer
->batch
,
186 GEN7_3DSTATE_SAMPLER_STATE_POINTERS_VS
,
187 ._3DCommandSubOpcode
= sampler_state_opcodes
[stage
],
188 .PointertoVSSamplerState
= samplers
.offset
);
191 if (surfaces
.alloc_size
> 0) {
192 anv_batch_emit(&cmd_buffer
->batch
,
193 GEN7_3DSTATE_BINDING_TABLE_POINTERS_VS
,
194 ._3DCommandSubOpcode
= binding_table_opcodes
[stage
],
195 .PointertoVSBindingTable
= surfaces
.offset
);
202 gen7_cmd_buffer_flush_descriptor_sets(struct anv_cmd_buffer
*cmd_buffer
)
205 VkShaderStageFlags dirty
= cmd_buffer
->state
.descriptors_dirty
&
206 cmd_buffer
->state
.pipeline
->active_stages
;
208 VkResult result
= VK_SUCCESS
;
209 for_each_bit(s
, dirty
) {
210 result
= flush_descriptor_set(cmd_buffer
, s
);
211 if (result
!= VK_SUCCESS
)
215 if (result
!= VK_SUCCESS
) {
216 assert(result
== VK_ERROR_OUT_OF_DEVICE_MEMORY
);
218 result
= anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
219 assert(result
== VK_SUCCESS
);
221 /* Re-emit state base addresses so we get the new surface state base
222 * address before we start emitting binding tables etc.
224 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
226 /* Re-emit all active binding tables */
227 for_each_bit(s
, cmd_buffer
->state
.pipeline
->active_stages
) {
228 result
= flush_descriptor_set(cmd_buffer
, s
);
230 /* It had better succeed this time */
231 assert(result
== VK_SUCCESS
);
235 cmd_buffer
->state
.descriptors_dirty
&= ~cmd_buffer
->state
.pipeline
->active_stages
;
238 static inline int64_t
239 clamp_int64(int64_t x
, int64_t min
, int64_t max
)
250 emit_scissor_state(struct anv_cmd_buffer
*cmd_buffer
,
251 uint32_t count
, const VkRect2D
*scissors
)
253 struct anv_state scissor_state
=
254 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 32, 32);
256 for (uint32_t i
= 0; i
< count
; i
++) {
257 const VkRect2D
*s
= &scissors
[i
];
259 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
260 * ymax < ymin for empty clips. In case clip x, y, width height are all
261 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
262 * what we want. Just special case empty clips and produce a canonical
264 static const struct GEN7_SCISSOR_RECT empty_scissor
= {
265 .ScissorRectangleYMin
= 1,
266 .ScissorRectangleXMin
= 1,
267 .ScissorRectangleYMax
= 0,
268 .ScissorRectangleXMax
= 0
271 const int max
= 0xffff;
272 struct GEN7_SCISSOR_RECT scissor
= {
273 /* Do this math using int64_t so overflow gets clamped correctly. */
274 .ScissorRectangleYMin
= clamp_int64(s
->offset
.y
, 0, max
),
275 .ScissorRectangleXMin
= clamp_int64(s
->offset
.x
, 0, max
),
276 .ScissorRectangleYMax
= clamp_int64((uint64_t) s
->offset
.y
+ s
->extent
.height
- 1, 0, max
),
277 .ScissorRectangleXMax
= clamp_int64((uint64_t) s
->offset
.x
+ s
->extent
.width
- 1, 0, max
)
280 if (s
->extent
.width
<= 0 || s
->extent
.height
<= 0) {
281 GEN7_SCISSOR_RECT_pack(NULL
, scissor_state
.map
+ i
* 32,
284 GEN7_SCISSOR_RECT_pack(NULL
, scissor_state
.map
+ i
* 32, &scissor
);
288 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DSTATE_SCISSOR_STATE_POINTERS
,
289 .ScissorRectPointer
= scissor_state
.offset
);
293 gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer
*cmd_buffer
)
295 if (cmd_buffer
->state
.dynamic
.scissor
.count
> 0) {
296 emit_scissor_state(cmd_buffer
, cmd_buffer
->state
.dynamic
.scissor
.count
,
297 cmd_buffer
->state
.dynamic
.scissor
.scissors
);
299 /* Emit a default scissor based on the currently bound framebuffer */
300 emit_scissor_state(cmd_buffer
, 1,
302 .offset
= { .x
= 0, .y
= 0, },
304 .width
= cmd_buffer
->state
.framebuffer
->width
,
305 .height
= cmd_buffer
->state
.framebuffer
->height
,
311 static const uint32_t vk_to_gen_index_type
[] = {
312 [VK_INDEX_TYPE_UINT16
] = INDEX_WORD
,
313 [VK_INDEX_TYPE_UINT32
] = INDEX_DWORD
,
316 void gen7_CmdBindIndexBuffer(
317 VkCmdBuffer cmdBuffer
,
320 VkIndexType indexType
)
322 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
323 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
325 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_INDEX_BUFFER
;
326 cmd_buffer
->state
.gen7
.index_buffer
= buffer
;
327 cmd_buffer
->state
.gen7
.index_type
= vk_to_gen_index_type
[indexType
];
328 cmd_buffer
->state
.gen7
.index_offset
= offset
;
332 gen7_flush_compute_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
)
334 struct anv_device
*device
= cmd_buffer
->device
;
335 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
336 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
339 result
= anv_cmd_buffer_emit_samplers(cmd_buffer
,
340 VK_SHADER_STAGE_COMPUTE
, &samplers
);
341 if (result
!= VK_SUCCESS
)
343 result
= anv_cmd_buffer_emit_binding_table(cmd_buffer
,
344 VK_SHADER_STAGE_COMPUTE
, &surfaces
);
345 if (result
!= VK_SUCCESS
)
348 struct GEN7_INTERFACE_DESCRIPTOR_DATA desc
= {
349 .KernelStartPointer
= pipeline
->cs_simd
,
350 .BindingTablePointer
= surfaces
.offset
,
351 .SamplerStatePointer
= samplers
.offset
,
352 .NumberofThreadsinGPGPUThreadGroup
= 0 /* FIXME: Really? */
355 uint32_t size
= GEN7_INTERFACE_DESCRIPTOR_DATA_length
* sizeof(uint32_t);
356 struct anv_state state
=
357 anv_state_pool_alloc(&device
->dynamic_state_pool
, size
, 64);
359 GEN7_INTERFACE_DESCRIPTOR_DATA_pack(NULL
, state
.map
, &desc
);
361 anv_batch_emit(&cmd_buffer
->batch
, GEN7_MEDIA_INTERFACE_DESCRIPTOR_LOAD
,
362 .InterfaceDescriptorTotalLength
= size
,
363 .InterfaceDescriptorDataStartAddress
= state
.offset
);
369 gen7_cmd_buffer_flush_compute_state(struct anv_cmd_buffer
*cmd_buffer
)
371 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
374 assert(pipeline
->active_stages
== VK_SHADER_STAGE_COMPUTE_BIT
);
376 if (cmd_buffer
->state
.current_pipeline
!= GPGPU
) {
377 anv_batch_emit(&cmd_buffer
->batch
, GEN7_PIPELINE_SELECT
,
378 .PipelineSelection
= GPGPU
);
379 cmd_buffer
->state
.current_pipeline
= GPGPU
;
382 if (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)
383 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
385 if ((cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_COMPUTE_BIT
) ||
386 (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)) {
387 /* FIXME: figure out descriptors for gen7 */
388 result
= gen7_flush_compute_descriptor_set(cmd_buffer
);
389 assert(result
== VK_SUCCESS
);
390 cmd_buffer
->state
.descriptors_dirty
&= ~VK_SHADER_STAGE_COMPUTE
;
393 cmd_buffer
->state
.compute_dirty
= 0;
397 gen7_cmd_buffer_flush_state(struct anv_cmd_buffer
*cmd_buffer
)
399 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
402 uint32_t vb_emit
= cmd_buffer
->state
.vb_dirty
& pipeline
->vb_used
;
404 assert((pipeline
->active_stages
& VK_SHADER_STAGE_COMPUTE_BIT
) == 0);
406 if (cmd_buffer
->state
.current_pipeline
!= _3D
) {
407 anv_batch_emit(&cmd_buffer
->batch
, GEN7_PIPELINE_SELECT
,
408 .PipelineSelection
= _3D
);
409 cmd_buffer
->state
.current_pipeline
= _3D
;
413 const uint32_t num_buffers
= __builtin_popcount(vb_emit
);
414 const uint32_t num_dwords
= 1 + num_buffers
* 4;
416 p
= anv_batch_emitn(&cmd_buffer
->batch
, num_dwords
,
417 GEN7_3DSTATE_VERTEX_BUFFERS
);
419 for_each_bit(vb
, vb_emit
) {
420 struct anv_buffer
*buffer
= cmd_buffer
->state
.vertex_bindings
[vb
].buffer
;
421 uint32_t offset
= cmd_buffer
->state
.vertex_bindings
[vb
].offset
;
423 struct GEN7_VERTEX_BUFFER_STATE state
= {
424 .VertexBufferIndex
= vb
,
425 .BufferAccessType
= pipeline
->instancing_enable
[vb
] ? INSTANCEDATA
: VERTEXDATA
,
426 .VertexBufferMemoryObjectControlState
= GEN7_MOCS
,
427 .AddressModifyEnable
= true,
428 .BufferPitch
= pipeline
->binding_stride
[vb
],
429 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
430 .EndAddress
= { buffer
->bo
, buffer
->offset
+ buffer
->size
- 1},
431 .InstanceDataStepRate
= 1
434 GEN7_VERTEX_BUFFER_STATE_pack(&cmd_buffer
->batch
, &p
[1 + i
* 4], &state
);
439 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_PIPELINE
) {
440 /* If somebody compiled a pipeline after starting a command buffer the
441 * scratch bo may have grown since we started this cmd buffer (and
442 * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now,
443 * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
444 if (cmd_buffer
->state
.scratch_size
< pipeline
->total_scratch
)
445 gen7_cmd_buffer_emit_state_base_address(cmd_buffer
);
447 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
450 if (cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_VERTEX_BIT
||
451 cmd_buffer
->state
.push_constants_dirty
& VK_SHADER_STAGE_VERTEX_BIT
) {
452 /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
454 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
455 * stall needs to be sent just prior to any 3DSTATE_VS,
456 * 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
457 * 3DSTATE_BINDING_TABLE_POINTER_VS,
458 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one
459 * PIPE_CONTROL needs to be sent before any combination of VS
460 * associated 3DSTATE."
462 anv_batch_emit(&cmd_buffer
->batch
, GEN7_PIPE_CONTROL
,
463 .DepthStallEnable
= true,
464 .PostSyncOperation
= WriteImmediateData
,
465 .Address
= { &cmd_buffer
->device
->workaround_bo
, 0 });
468 if (cmd_buffer
->state
.descriptors_dirty
)
469 gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer
);
471 if (cmd_buffer
->state
.push_constants_dirty
)
472 gen7_cmd_buffer_flush_push_constants(cmd_buffer
);
474 /* We use the gen8 state here because it only contains the additional
475 * min/max fields and, since they occur at the end of the packet and
476 * don't change the stride, they work on gen7 too.
478 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
)
479 gen8_cmd_buffer_emit_viewport(cmd_buffer
);
481 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_SCISSOR
)
482 gen7_cmd_buffer_emit_scissor(cmd_buffer
);
484 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
485 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
|
486 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
)) {
488 bool enable_bias
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
!= 0.0f
||
489 cmd_buffer
->state
.dynamic
.depth_bias
.slope_scaled
!= 0.0f
;
491 uint32_t sf_dw
[GEN7_3DSTATE_SF_length
];
492 struct GEN7_3DSTATE_SF sf
= {
493 GEN7_3DSTATE_SF_header
,
494 .LineWidth
= cmd_buffer
->state
.dynamic
.line_width
,
495 .GlobalDepthOffsetEnableSolid
= enable_bias
,
496 .GlobalDepthOffsetEnableWireframe
= enable_bias
,
497 .GlobalDepthOffsetEnablePoint
= enable_bias
,
498 .GlobalDepthOffsetConstant
= cmd_buffer
->state
.dynamic
.depth_bias
.bias
,
499 .GlobalDepthOffsetScale
= cmd_buffer
->state
.dynamic
.depth_bias
.slope_scaled
,
500 .GlobalDepthOffsetClamp
= cmd_buffer
->state
.dynamic
.depth_bias
.clamp
502 GEN7_3DSTATE_SF_pack(NULL
, sf_dw
, &sf
);
504 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
, pipeline
->gen7
.sf
);
507 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
|
508 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
509 struct anv_state cc_state
=
510 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
511 GEN7_COLOR_CALC_STATE_length
, 64);
512 struct GEN7_COLOR_CALC_STATE cc
= {
513 .BlendConstantColorRed
= cmd_buffer
->state
.dynamic
.blend_constants
[0],
514 .BlendConstantColorGreen
= cmd_buffer
->state
.dynamic
.blend_constants
[1],
515 .BlendConstantColorBlue
= cmd_buffer
->state
.dynamic
.blend_constants
[2],
516 .BlendConstantColorAlpha
= cmd_buffer
->state
.dynamic
.blend_constants
[3],
517 .StencilReferenceValue
=
518 cmd_buffer
->state
.dynamic
.stencil_reference
.front
,
519 .BackFaceStencilReferenceValue
=
520 cmd_buffer
->state
.dynamic
.stencil_reference
.back
,
522 GEN7_COLOR_CALC_STATE_pack(NULL
, cc_state
.map
, &cc
);
524 anv_batch_emit(&cmd_buffer
->batch
,
525 GEN7_3DSTATE_CC_STATE_POINTERS
,
526 .ColorCalcStatePointer
= cc_state
.offset
);
529 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
530 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
531 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
)) {
532 uint32_t depth_stencil_dw
[GEN7_DEPTH_STENCIL_STATE_length
];
534 struct GEN7_DEPTH_STENCIL_STATE depth_stencil
= {
535 /* Is this what we need to do? */
536 .StencilBufferWriteEnable
=
537 cmd_buffer
->state
.dynamic
.stencil_write_mask
.front
!= 0,
540 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.front
& 0xff,
542 cmd_buffer
->state
.dynamic
.stencil_write_mask
.front
& 0xff,
544 .BackfaceStencilTestMask
=
545 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.back
& 0xff,
546 .BackfaceStencilWriteMask
=
547 cmd_buffer
->state
.dynamic
.stencil_write_mask
.back
& 0xff,
549 GEN7_DEPTH_STENCIL_STATE_pack(NULL
, depth_stencil_dw
, &depth_stencil
);
551 struct anv_state ds_state
=
552 anv_cmd_buffer_merge_dynamic(cmd_buffer
, depth_stencil_dw
,
553 pipeline
->gen7
.depth_stencil_state
,
554 GEN7_DEPTH_STENCIL_STATE_length
, 64);
556 anv_batch_emit(&cmd_buffer
->batch
,
557 GEN7_3DSTATE_DEPTH_STENCIL_STATE_POINTERS
,
558 .PointertoDEPTH_STENCIL_STATE
= ds_state
.offset
);
561 if (cmd_buffer
->state
.gen7
.index_buffer
&&
562 cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
563 ANV_CMD_DIRTY_INDEX_BUFFER
)) {
564 struct anv_buffer
*buffer
= cmd_buffer
->state
.gen7
.index_buffer
;
565 uint32_t offset
= cmd_buffer
->state
.gen7
.index_offset
;
567 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DSTATE_INDEX_BUFFER
,
568 .CutIndexEnable
= pipeline
->primitive_restart
,
569 .IndexFormat
= cmd_buffer
->state
.gen7
.index_type
,
570 .MemoryObjectControlState
= GEN7_MOCS
,
571 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
572 .BufferEndingAddress
= { buffer
->bo
, buffer
->offset
+ buffer
->size
});
575 cmd_buffer
->state
.vb_dirty
&= ~vb_emit
;
576 cmd_buffer
->state
.dirty
= 0;
580 VkCmdBuffer cmdBuffer
,
581 uint32_t vertexCount
,
582 uint32_t instanceCount
,
583 uint32_t firstVertex
,
584 uint32_t firstInstance
)
586 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
587 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
589 gen7_cmd_buffer_flush_state(cmd_buffer
);
591 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DPRIMITIVE
,
592 .VertexAccessType
= SEQUENTIAL
,
593 .PrimitiveTopologyType
= pipeline
->topology
,
594 .VertexCountPerInstance
= vertexCount
,
595 .StartVertexLocation
= firstVertex
,
596 .InstanceCount
= instanceCount
,
597 .StartInstanceLocation
= firstInstance
,
598 .BaseVertexLocation
= 0);
601 void gen7_CmdDrawIndexed(
602 VkCmdBuffer cmdBuffer
,
604 uint32_t instanceCount
,
606 int32_t vertexOffset
,
607 uint32_t firstInstance
)
609 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
610 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
612 gen7_cmd_buffer_flush_state(cmd_buffer
);
614 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DPRIMITIVE
,
615 .VertexAccessType
= RANDOM
,
616 .PrimitiveTopologyType
= pipeline
->topology
,
617 .VertexCountPerInstance
= indexCount
,
618 .StartVertexLocation
= firstIndex
,
619 .InstanceCount
= instanceCount
,
620 .StartInstanceLocation
= firstInstance
,
621 .BaseVertexLocation
= vertexOffset
);
625 gen7_batch_lrm(struct anv_batch
*batch
,
626 uint32_t reg
, struct anv_bo
*bo
, uint32_t offset
)
628 anv_batch_emit(batch
, GEN7_MI_LOAD_REGISTER_MEM
,
629 .RegisterAddress
= reg
,
630 .MemoryAddress
= { bo
, offset
});
634 gen7_batch_lri(struct anv_batch
*batch
, uint32_t reg
, uint32_t imm
)
636 anv_batch_emit(batch
, GEN7_MI_LOAD_REGISTER_IMM
,
637 .RegisterOffset
= reg
,
641 /* Auto-Draw / Indirect Registers */
642 #define GEN7_3DPRIM_END_OFFSET 0x2420
643 #define GEN7_3DPRIM_START_VERTEX 0x2430
644 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434
645 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
646 #define GEN7_3DPRIM_START_INSTANCE 0x243C
647 #define GEN7_3DPRIM_BASE_VERTEX 0x2440
649 void gen7_CmdDrawIndirect(
650 VkCmdBuffer cmdBuffer
,
656 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
657 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
658 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
659 struct anv_bo
*bo
= buffer
->bo
;
660 uint32_t bo_offset
= buffer
->offset
+ offset
;
662 gen7_cmd_buffer_flush_state(cmd_buffer
);
664 gen7_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_VERTEX_COUNT
, bo
, bo_offset
);
665 gen7_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_INSTANCE_COUNT
, bo
, bo_offset
+ 4);
666 gen7_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_START_VERTEX
, bo
, bo_offset
+ 8);
667 gen7_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_START_INSTANCE
, bo
, bo_offset
+ 12);
668 gen7_batch_lri(&cmd_buffer
->batch
, GEN7_3DPRIM_BASE_VERTEX
, 0);
670 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DPRIMITIVE
,
671 .IndirectParameterEnable
= true,
672 .VertexAccessType
= SEQUENTIAL
,
673 .PrimitiveTopologyType
= pipeline
->topology
);
676 void gen7_CmdDrawIndexedIndirect(
677 VkCmdBuffer cmdBuffer
,
683 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
684 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
685 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
686 struct anv_bo
*bo
= buffer
->bo
;
687 uint32_t bo_offset
= buffer
->offset
+ offset
;
689 gen7_cmd_buffer_flush_state(cmd_buffer
);
691 gen7_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_VERTEX_COUNT
, bo
, bo_offset
);
692 gen7_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_INSTANCE_COUNT
, bo
, bo_offset
+ 4);
693 gen7_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_START_VERTEX
, bo
, bo_offset
+ 8);
694 gen7_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_BASE_VERTEX
, bo
, bo_offset
+ 12);
695 gen7_batch_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_START_INSTANCE
, bo
, bo_offset
+ 16);
697 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DPRIMITIVE
,
698 .IndirectParameterEnable
= true,
699 .VertexAccessType
= RANDOM
,
700 .PrimitiveTopologyType
= pipeline
->topology
);
703 void gen7_CmdDispatch(
704 VkCmdBuffer cmdBuffer
,
709 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
710 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
711 struct brw_cs_prog_data
*prog_data
= &pipeline
->cs_prog_data
;
713 gen7_cmd_buffer_flush_compute_state(cmd_buffer
);
715 anv_batch_emit(&cmd_buffer
->batch
, GEN7_GPGPU_WALKER
,
716 .SIMDSize
= prog_data
->simd_size
/ 16,
717 .ThreadDepthCounterMaximum
= 0,
718 .ThreadHeightCounterMaximum
= 0,
719 .ThreadWidthCounterMaximum
= pipeline
->cs_thread_width_max
,
720 .ThreadGroupIDXDimension
= x
,
721 .ThreadGroupIDYDimension
= y
,
722 .ThreadGroupIDZDimension
= z
,
723 .RightExecutionMask
= pipeline
->cs_right_mask
,
724 .BottomExecutionMask
= 0xffffffff);
726 anv_batch_emit(&cmd_buffer
->batch
, GEN7_MEDIA_STATE_FLUSH
);
729 #define GPGPU_DISPATCHDIMX 0x2500
730 #define GPGPU_DISPATCHDIMY 0x2504
731 #define GPGPU_DISPATCHDIMZ 0x2508
733 void gen7_CmdDispatchIndirect(
734 VkCmdBuffer cmdBuffer
,
738 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
739 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
740 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
741 struct brw_cs_prog_data
*prog_data
= &pipeline
->cs_prog_data
;
742 struct anv_bo
*bo
= buffer
->bo
;
743 uint32_t bo_offset
= buffer
->offset
+ offset
;
745 gen7_cmd_buffer_flush_compute_state(cmd_buffer
);
747 gen7_batch_lrm(&cmd_buffer
->batch
, GPGPU_DISPATCHDIMX
, bo
, bo_offset
);
748 gen7_batch_lrm(&cmd_buffer
->batch
, GPGPU_DISPATCHDIMY
, bo
, bo_offset
+ 4);
749 gen7_batch_lrm(&cmd_buffer
->batch
, GPGPU_DISPATCHDIMZ
, bo
, bo_offset
+ 8);
751 anv_batch_emit(&cmd_buffer
->batch
, GEN7_GPGPU_WALKER
,
752 .IndirectParameterEnable
= true,
753 .SIMDSize
= prog_data
->simd_size
/ 16,
754 .ThreadDepthCounterMaximum
= 0,
755 .ThreadHeightCounterMaximum
= 0,
756 .ThreadWidthCounterMaximum
= pipeline
->cs_thread_width_max
,
757 .RightExecutionMask
= pipeline
->cs_right_mask
,
758 .BottomExecutionMask
= 0xffffffff);
760 anv_batch_emit(&cmd_buffer
->batch
, GEN7_MEDIA_STATE_FLUSH
);
763 void gen7_CmdPipelineBarrier(
764 VkCmdBuffer cmdBuffer
,
765 VkPipelineStageFlags srcStageMask
,
766 VkPipelineStageFlags destStageMask
,
768 uint32_t memBarrierCount
,
769 const void* const* ppMemBarriers
)
775 gen7_cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer
*cmd_buffer
)
777 const struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
778 const struct anv_image_view
*iview
=
779 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer
);
780 const struct anv_image
*image
= iview
? iview
->image
: NULL
;
781 const bool has_depth
= iview
&& iview
->format
->depth_format
;
782 const bool has_stencil
= iview
&& iview
->format
->has_stencil
;
784 /* Emit 3DSTATE_DEPTH_BUFFER */
786 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DSTATE_DEPTH_BUFFER
,
787 .SurfaceType
= SURFTYPE_2D
,
788 .DepthWriteEnable
= iview
->format
->depth_format
,
789 .StencilWriteEnable
= has_stencil
,
790 .HierarchicalDepthBufferEnable
= false,
791 .SurfaceFormat
= iview
->format
->depth_format
,
792 .SurfacePitch
= image
->depth_surface
.stride
- 1,
793 .SurfaceBaseAddress
= {
795 .offset
= image
->depth_surface
.offset
,
797 .Height
= fb
->height
- 1,
798 .Width
= fb
->width
- 1,
801 .MinimumArrayElement
= 0,
802 .DepthBufferObjectControlState
= GEN7_MOCS
,
803 .RenderTargetViewExtent
= 1 - 1);
805 /* Even when no depth buffer is present, the hardware requires that
806 * 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
808 * If a null depth buffer is bound, the driver must instead bind depth as:
809 * 3DSTATE_DEPTH.SurfaceType = SURFTYPE_2D
810 * 3DSTATE_DEPTH.Width = 1
811 * 3DSTATE_DEPTH.Height = 1
812 * 3DSTATE_DEPTH.SuraceFormat = D16_UNORM
813 * 3DSTATE_DEPTH.SurfaceBaseAddress = 0
814 * 3DSTATE_DEPTH.HierarchicalDepthBufferEnable = 0
815 * 3DSTATE_WM_DEPTH_STENCIL.DepthTestEnable = 0
816 * 3DSTATE_WM_DEPTH_STENCIL.DepthBufferWriteEnable = 0
818 * The PRM is wrong, though. The width and height must be programmed to
819 * actual framebuffer's width and height, even when neither depth buffer
820 * nor stencil buffer is present.
822 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DSTATE_DEPTH_BUFFER
,
823 .SurfaceType
= SURFTYPE_2D
,
824 .SurfaceFormat
= D16_UNORM
,
825 .Width
= fb
->width
- 1,
826 .Height
= fb
->height
- 1,
827 .StencilWriteEnable
= has_stencil
);
830 /* Emit 3DSTATE_STENCIL_BUFFER */
832 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DSTATE_STENCIL_BUFFER
,
833 .StencilBufferObjectControlState
= GEN7_MOCS
,
835 /* Stencil buffers have strange pitch. The PRM says:
837 * The pitch must be set to 2x the value computed based on width,
838 * as the stencil buffer is stored with two rows interleaved.
840 .SurfacePitch
= 2 * image
->stencil_surface
.stride
- 1,
842 .SurfaceBaseAddress
= {
844 .offset
= image
->offset
+ image
->stencil_surface
.offset
,
847 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DSTATE_STENCIL_BUFFER
);
850 /* Disable hierarchial depth buffers. */
851 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DSTATE_HIER_DEPTH_BUFFER
);
853 /* Clear the clear params. */
854 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DSTATE_CLEAR_PARAMS
);
858 gen7_cmd_buffer_begin_subpass(struct anv_cmd_buffer
*cmd_buffer
,
859 struct anv_subpass
*subpass
)
861 cmd_buffer
->state
.subpass
= subpass
;
862 cmd_buffer
->state
.descriptors_dirty
|= VK_SHADER_STAGE_FRAGMENT_BIT
;
864 gen7_cmd_buffer_emit_depth_stencil(cmd_buffer
);
868 begin_render_pass(struct anv_cmd_buffer
*cmd_buffer
,
869 const VkRenderPassBeginInfo
* pRenderPassBegin
)
871 ANV_FROM_HANDLE(anv_render_pass
, pass
, pRenderPassBegin
->renderPass
);
872 ANV_FROM_HANDLE(anv_framebuffer
, framebuffer
, pRenderPassBegin
->framebuffer
);
874 cmd_buffer
->state
.framebuffer
= framebuffer
;
875 cmd_buffer
->state
.pass
= pass
;
877 const VkRect2D
*render_area
= &pRenderPassBegin
->renderArea
;
879 anv_batch_emit(&cmd_buffer
->batch
, GEN7_3DSTATE_DRAWING_RECTANGLE
,
880 .ClippedDrawingRectangleYMin
= render_area
->offset
.y
,
881 .ClippedDrawingRectangleXMin
= render_area
->offset
.x
,
882 .ClippedDrawingRectangleYMax
=
883 render_area
->offset
.y
+ render_area
->extent
.height
- 1,
884 .ClippedDrawingRectangleXMax
=
885 render_area
->offset
.x
+ render_area
->extent
.width
- 1,
886 .DrawingRectangleOriginY
= 0,
887 .DrawingRectangleOriginX
= 0);
889 anv_cmd_buffer_clear_attachments(cmd_buffer
, pass
,
890 pRenderPassBegin
->pClearValues
);
893 void gen7_CmdBeginRenderPass(
894 VkCmdBuffer cmdBuffer
,
895 const VkRenderPassBeginInfo
* pRenderPassBegin
,
896 VkRenderPassContents contents
)
898 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
899 ANV_FROM_HANDLE(anv_render_pass
, pass
, pRenderPassBegin
->renderPass
);
901 begin_render_pass(cmd_buffer
, pRenderPassBegin
);
903 gen7_cmd_buffer_begin_subpass(cmd_buffer
, pass
->subpasses
);
906 void gen7_CmdNextSubpass(
907 VkCmdBuffer cmdBuffer
,
908 VkRenderPassContents contents
)
910 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
912 assert(cmd_buffer
->level
== VK_CMD_BUFFER_LEVEL_PRIMARY
);
914 gen7_cmd_buffer_begin_subpass(cmd_buffer
, cmd_buffer
->state
.subpass
+ 1);
917 void gen7_CmdEndRenderPass(
918 VkCmdBuffer cmdBuffer
)
920 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
922 /* Emit a flushing pipe control at the end of a pass. This is kind of a
923 * hack but it ensures that render targets always actually get written.
924 * Eventually, we should do flushing based on image format transitions
925 * or something of that nature.
927 anv_batch_emit(&cmd_buffer
->batch
, GEN7_PIPE_CONTROL
,
928 .PostSyncOperation
= NoWrite
,
929 .RenderTargetCacheFlushEnable
= true,
930 .InstructionCacheInvalidateEnable
= true,
931 .DepthCacheFlushEnable
= true,
932 .VFCacheInvalidationEnable
= true,
933 .TextureCacheInvalidationEnable
= true,
934 .CommandStreamerStallEnable
= true);