2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "anv_private.h"
29 #include "common/gen_l3_config.h"
30 #include "genxml/gen_macros.h"
31 #include "genxml/genX_pack.h"
34 emit_lrm(struct anv_batch
*batch
,
35 uint32_t reg
, struct anv_bo
*bo
, uint32_t offset
)
37 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
38 lrm
.RegisterAddress
= reg
;
39 lrm
.MemoryAddress
= (struct anv_address
) { bo
, offset
};
44 emit_lri(struct anv_batch
*batch
, uint32_t reg
, uint32_t imm
)
46 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_IMM
), lri
) {
47 lri
.RegisterOffset
= reg
;
53 genX(cmd_buffer_emit_state_base_address
)(struct anv_cmd_buffer
*cmd_buffer
)
55 struct anv_device
*device
= cmd_buffer
->device
;
57 /* XXX: Do we need this on more than just BDW? */
59 /* Emit a render target cache flush.
61 * This isn't documented anywhere in the PRM. However, it seems to be
62 * necessary prior to changing the surface state base adress. Without
63 * this, we get GPU hangs when using multi-level command buffers which
64 * clear depth, reset state base address, and then go render stuff.
66 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
67 pc
.RenderTargetCacheFlushEnable
= true;
71 anv_batch_emit(&cmd_buffer
->batch
, GENX(STATE_BASE_ADDRESS
), sba
) {
72 sba
.GeneralStateBaseAddress
= (struct anv_address
) { NULL
, 0 };
73 sba
.GeneralStateMemoryObjectControlState
= GENX(MOCS
);
74 sba
.GeneralStateBaseAddressModifyEnable
= true;
76 sba
.SurfaceStateBaseAddress
=
77 anv_cmd_buffer_surface_base_address(cmd_buffer
);
78 sba
.SurfaceStateMemoryObjectControlState
= GENX(MOCS
);
79 sba
.SurfaceStateBaseAddressModifyEnable
= true;
81 sba
.DynamicStateBaseAddress
=
82 (struct anv_address
) { &device
->dynamic_state_block_pool
.bo
, 0 };
83 sba
.DynamicStateMemoryObjectControlState
= GENX(MOCS
);
84 sba
.DynamicStateBaseAddressModifyEnable
= true;
86 sba
.IndirectObjectBaseAddress
= (struct anv_address
) { NULL
, 0 };
87 sba
.IndirectObjectMemoryObjectControlState
= GENX(MOCS
);
88 sba
.IndirectObjectBaseAddressModifyEnable
= true;
90 sba
.InstructionBaseAddress
=
91 (struct anv_address
) { &device
->instruction_block_pool
.bo
, 0 };
92 sba
.InstructionMemoryObjectControlState
= GENX(MOCS
);
93 sba
.InstructionBaseAddressModifyEnable
= true;
96 /* Broadwell requires that we specify a buffer size for a bunch of
97 * these fields. However, since we will be growing the BO's live, we
98 * just set them all to the maximum.
100 sba
.GeneralStateBufferSize
= 0xfffff;
101 sba
.GeneralStateBufferSizeModifyEnable
= true;
102 sba
.DynamicStateBufferSize
= 0xfffff;
103 sba
.DynamicStateBufferSizeModifyEnable
= true;
104 sba
.IndirectObjectBufferSize
= 0xfffff;
105 sba
.IndirectObjectBufferSizeModifyEnable
= true;
106 sba
.InstructionBufferSize
= 0xfffff;
107 sba
.InstructionBuffersizeModifyEnable
= true;
111 /* After re-setting the surface state base address, we have to do some
112 * cache flusing so that the sampler engine will pick up the new
113 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
114 * Shared Function > 3D Sampler > State > State Caching (page 96):
116 * Coherency with system memory in the state cache, like the texture
117 * cache is handled partially by software. It is expected that the
118 * command stream or shader will issue Cache Flush operation or
119 * Cache_Flush sampler message to ensure that the L1 cache remains
120 * coherent with system memory.
124 * Whenever the value of the Dynamic_State_Base_Addr,
125 * Surface_State_Base_Addr are altered, the L1 state cache must be
126 * invalidated to ensure the new surface or sampler state is fetched
127 * from system memory.
129 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
130 * which, according the PIPE_CONTROL instruction documentation in the
133 * Setting this bit is independent of any other bit in this packet.
134 * This bit controls the invalidation of the L1 and L2 state caches
135 * at the top of the pipe i.e. at the parsing time.
137 * Unfortunately, experimentation seems to indicate that state cache
138 * invalidation through a PIPE_CONTROL does nothing whatsoever in
139 * regards to surface state and binding tables. In stead, it seems that
140 * invalidating the texture cache is what is actually needed.
142 * XXX: As far as we have been able to determine through
143 * experimentation, shows that flush the texture cache appears to be
144 * sufficient. The theory here is that all of the sampling/rendering
145 * units cache the binding table in the texture cache. However, we have
146 * yet to be able to actually confirm this.
148 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
149 pc
.TextureCacheInvalidationEnable
= true;
154 genX(BeginCommandBuffer
)(
155 VkCommandBuffer commandBuffer
,
156 const VkCommandBufferBeginInfo
* pBeginInfo
)
158 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
160 /* If this is the first vkBeginCommandBuffer, we must *initialize* the
161 * command buffer's state. Otherwise, we must *reset* its state. In both
164 * From the Vulkan 1.0 spec:
166 * If a command buffer is in the executable state and the command buffer
167 * was allocated from a command pool with the
168 * VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
169 * vkBeginCommandBuffer implicitly resets the command buffer, behaving
170 * as if vkResetCommandBuffer had been called with
171 * VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
172 * the command buffer in the recording state.
174 anv_cmd_buffer_reset(cmd_buffer
);
176 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
178 assert(cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
||
179 !(cmd_buffer
->usage_flags
& VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
));
181 genX(cmd_buffer_emit_state_base_address
)(cmd_buffer
);
183 if (cmd_buffer
->usage_flags
&
184 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
) {
185 cmd_buffer
->state
.framebuffer
=
186 anv_framebuffer_from_handle(pBeginInfo
->pInheritanceInfo
->framebuffer
);
187 cmd_buffer
->state
.pass
=
188 anv_render_pass_from_handle(pBeginInfo
->pInheritanceInfo
->renderPass
);
189 cmd_buffer
->state
.subpass
=
190 &cmd_buffer
->state
.pass
->subpasses
[pBeginInfo
->pInheritanceInfo
->subpass
];
192 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_RENDER_TARGETS
;
199 genX(EndCommandBuffer
)(
200 VkCommandBuffer commandBuffer
)
202 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
204 anv_cmd_buffer_end_batch_buffer(cmd_buffer
);
210 genX(CmdExecuteCommands
)(
211 VkCommandBuffer commandBuffer
,
212 uint32_t commandBufferCount
,
213 const VkCommandBuffer
* pCmdBuffers
)
215 ANV_FROM_HANDLE(anv_cmd_buffer
, primary
, commandBuffer
);
217 assert(primary
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
);
219 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
220 ANV_FROM_HANDLE(anv_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
222 assert(secondary
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
);
224 anv_cmd_buffer_add_secondary(primary
, secondary
);
227 /* Each of the secondary command buffers will use its own state base
228 * address. We need to re-emit state base address for the primary after
229 * all of the secondaries are done.
231 * TODO: Maybe we want to make this a dirty bit to avoid extra state base
234 genX(cmd_buffer_emit_state_base_address
)(primary
);
237 #define IVB_L3SQCREG1_SQGHPCI_DEFAULT 0x00730000
238 #define VLV_L3SQCREG1_SQGHPCI_DEFAULT 0x00d30000
239 #define HSW_L3SQCREG1_SQGHPCI_DEFAULT 0x00610000
242 * Program the hardware to use the specified L3 configuration.
245 genX(cmd_buffer_config_l3
)(struct anv_cmd_buffer
*cmd_buffer
,
246 const struct gen_l3_config
*cfg
)
249 if (cfg
== cmd_buffer
->state
.current_l3_config
)
252 if (unlikely(INTEL_DEBUG
& DEBUG_L3
)) {
253 fprintf(stderr
, "L3 config transition: ");
254 gen_dump_l3_config(cfg
, stderr
);
257 const bool has_slm
= cfg
->n
[GEN_L3P_SLM
];
259 /* According to the hardware docs, the L3 partitioning can only be changed
260 * while the pipeline is completely drained and the caches are flushed,
261 * which involves a first PIPE_CONTROL flush which stalls the pipeline...
263 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
264 pc
.DCFlushEnable
= true;
265 pc
.PostSyncOperation
= NoWrite
;
266 pc
.CommandStreamerStallEnable
= true;
269 /* ...followed by a second pipelined PIPE_CONTROL that initiates
270 * invalidation of the relevant caches. Note that because RO invalidation
271 * happens at the top of the pipeline (i.e. right away as the PIPE_CONTROL
272 * command is processed by the CS) we cannot combine it with the previous
273 * stalling flush as the hardware documentation suggests, because that
274 * would cause the CS to stall on previous rendering *after* RO
275 * invalidation and wouldn't prevent the RO caches from being polluted by
276 * concurrent rendering before the stall completes. This intentionally
277 * doesn't implement the SKL+ hardware workaround suggesting to enable CS
278 * stall on PIPE_CONTROLs with the texture cache invalidation bit set for
279 * GPGPU workloads because the previous and subsequent PIPE_CONTROLs
280 * already guarantee that there is no concurrent GPGPU kernel execution
281 * (see SKL HSD 2132585).
283 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
284 pc
.TextureCacheInvalidationEnable
= true;
285 pc
.ConstantCacheInvalidationEnable
= true;
286 pc
.InstructionCacheInvalidateEnable
= true;
287 pc
.StateCacheInvalidationEnable
= true;
288 pc
.PostSyncOperation
= NoWrite
;
291 /* Now send a third stalling flush to make sure that invalidation is
292 * complete when the L3 configuration registers are modified.
294 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
295 pc
.DCFlushEnable
= true;
296 pc
.PostSyncOperation
= NoWrite
;
297 pc
.CommandStreamerStallEnable
= true;
302 assert(!cfg
->n
[GEN_L3P_IS
] && !cfg
->n
[GEN_L3P_C
] && !cfg
->n
[GEN_L3P_T
]);
305 anv_pack_struct(&l3cr
, GENX(L3CNTLREG
),
306 .SLMEnable
= has_slm
,
307 .URBAllocation
= cfg
->n
[GEN_L3P_URB
],
308 .ROAllocation
= cfg
->n
[GEN_L3P_RO
],
309 .DCAllocation
= cfg
->n
[GEN_L3P_DC
],
310 .AllAllocation
= cfg
->n
[GEN_L3P_ALL
]);
312 /* Set up the L3 partitioning. */
313 emit_lri(&cmd_buffer
->batch
, GENX(L3CNTLREG_num
), l3cr
);
317 const bool has_dc
= cfg
->n
[GEN_L3P_DC
] || cfg
->n
[GEN_L3P_ALL
];
318 const bool has_is
= cfg
->n
[GEN_L3P_IS
] || cfg
->n
[GEN_L3P_RO
] ||
320 const bool has_c
= cfg
->n
[GEN_L3P_C
] || cfg
->n
[GEN_L3P_RO
] ||
322 const bool has_t
= cfg
->n
[GEN_L3P_T
] || cfg
->n
[GEN_L3P_RO
] ||
325 assert(!cfg
->n
[GEN_L3P_ALL
]);
327 /* When enabled SLM only uses a portion of the L3 on half of the banks,
328 * the matching space on the remaining banks has to be allocated to a
329 * client (URB for all validated configurations) set to the
330 * lower-bandwidth 2-bank address hashing mode.
332 const struct gen_device_info
*devinfo
= &cmd_buffer
->device
->info
;
333 const bool urb_low_bw
= has_slm
&& !devinfo
->is_baytrail
;
334 assert(!urb_low_bw
|| cfg
->n
[GEN_L3P_URB
] == cfg
->n
[GEN_L3P_SLM
]);
336 /* Minimum number of ways that can be allocated to the URB. */
337 const unsigned n0_urb
= (devinfo
->is_baytrail
? 32 : 0);
338 assert(cfg
->n
[GEN_L3P_URB
] >= n0_urb
);
340 uint32_t l3sqcr1
, l3cr2
, l3cr3
;
341 anv_pack_struct(&l3sqcr1
, GENX(L3SQCREG1
),
342 .ConvertDC_UC
= !has_dc
,
343 .ConvertIS_UC
= !has_is
,
344 .ConvertC_UC
= !has_c
,
345 .ConvertT_UC
= !has_t
);
347 GEN_IS_HASWELL
? HSW_L3SQCREG1_SQGHPCI_DEFAULT
:
348 devinfo
->is_baytrail
? VLV_L3SQCREG1_SQGHPCI_DEFAULT
:
349 IVB_L3SQCREG1_SQGHPCI_DEFAULT
;
351 anv_pack_struct(&l3cr2
, GENX(L3CNTLREG2
),
352 .SLMEnable
= has_slm
,
353 .URBLowBandwidth
= urb_low_bw
,
354 .URBAllocation
= cfg
->n
[GEN_L3P_URB
],
356 .ALLAllocation
= cfg
->n
[GEN_L3P_ALL
],
358 .ROAllocation
= cfg
->n
[GEN_L3P_RO
],
359 .DCAllocation
= cfg
->n
[GEN_L3P_DC
]);
361 anv_pack_struct(&l3cr3
, GENX(L3CNTLREG3
),
362 .ISAllocation
= cfg
->n
[GEN_L3P_IS
],
364 .CAllocation
= cfg
->n
[GEN_L3P_C
],
366 .TAllocation
= cfg
->n
[GEN_L3P_T
],
369 /* Set up the L3 partitioning. */
370 emit_lri(&cmd_buffer
->batch
, GENX(L3SQCREG1_num
), l3sqcr1
);
371 emit_lri(&cmd_buffer
->batch
, GENX(L3CNTLREG2_num
), l3cr2
);
372 emit_lri(&cmd_buffer
->batch
, GENX(L3CNTLREG3_num
), l3cr3
);
375 if (cmd_buffer
->device
->instance
->physicalDevice
.cmd_parser_version
>= 4) {
376 /* Enable L3 atomics on HSW if we have a DC partition, otherwise keep
377 * them disabled to avoid crashing the system hard.
379 uint32_t scratch1
, chicken3
;
380 anv_pack_struct(&scratch1
, GENX(SCRATCH1
),
381 .L3AtomicDisable
= !has_dc
);
382 anv_pack_struct(&chicken3
, GENX(CHICKEN3
),
383 .L3AtomicDisableMask
= true,
384 .L3AtomicDisable
= !has_dc
);
385 emit_lri(&cmd_buffer
->batch
, GENX(SCRATCH1_num
), scratch1
);
386 emit_lri(&cmd_buffer
->batch
, GENX(CHICKEN3_num
), chicken3
);
392 cmd_buffer
->state
.current_l3_config
= cfg
;
396 genX(cmd_buffer_apply_pipe_flushes
)(struct anv_cmd_buffer
*cmd_buffer
)
398 enum anv_pipe_bits bits
= cmd_buffer
->state
.pending_pipe_bits
;
400 /* Flushes are pipelined while invalidations are handled immediately.
401 * Therefore, if we're flushing anything then we need to schedule a stall
402 * before any invalidations can happen.
404 if (bits
& ANV_PIPE_FLUSH_BITS
)
405 bits
|= ANV_PIPE_NEEDS_CS_STALL_BIT
;
407 /* If we're going to do an invalidate and we have a pending CS stall that
408 * has yet to be resolved, we do the CS stall now.
410 if ((bits
& ANV_PIPE_INVALIDATE_BITS
) &&
411 (bits
& ANV_PIPE_NEEDS_CS_STALL_BIT
)) {
412 bits
|= ANV_PIPE_CS_STALL_BIT
;
413 bits
&= ~ANV_PIPE_NEEDS_CS_STALL_BIT
;
416 if (bits
& (ANV_PIPE_FLUSH_BITS
| ANV_PIPE_CS_STALL_BIT
)) {
417 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pipe
) {
418 pipe
.DepthCacheFlushEnable
= bits
& ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
419 pipe
.DCFlushEnable
= bits
& ANV_PIPE_DATA_CACHE_FLUSH_BIT
;
420 pipe
.RenderTargetCacheFlushEnable
=
421 bits
& ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
423 pipe
.DepthStallEnable
= bits
& ANV_PIPE_DEPTH_STALL_BIT
;
424 pipe
.CommandStreamerStallEnable
= bits
& ANV_PIPE_CS_STALL_BIT
;
425 pipe
.StallAtPixelScoreboard
= bits
& ANV_PIPE_STALL_AT_SCOREBOARD_BIT
;
428 * According to the Broadwell documentation, any PIPE_CONTROL with the
429 * "Command Streamer Stall" bit set must also have another bit set,
430 * with five different options:
432 * - Render Target Cache Flush
433 * - Depth Cache Flush
434 * - Stall at Pixel Scoreboard
435 * - Post-Sync Operation
439 * I chose "Stall at Pixel Scoreboard" since that's what we use in
440 * mesa and it seems to work fine. The choice is fairly arbitrary.
442 if ((bits
& ANV_PIPE_CS_STALL_BIT
) &&
443 !(bits
& (ANV_PIPE_FLUSH_BITS
| ANV_PIPE_DEPTH_STALL_BIT
|
444 ANV_PIPE_STALL_AT_SCOREBOARD_BIT
)))
445 pipe
.StallAtPixelScoreboard
= true;
448 bits
&= ~(ANV_PIPE_FLUSH_BITS
| ANV_PIPE_CS_STALL_BIT
);
451 if (bits
& ANV_PIPE_INVALIDATE_BITS
) {
452 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pipe
) {
453 pipe
.StateCacheInvalidationEnable
=
454 bits
& ANV_PIPE_STATE_CACHE_INVALIDATE_BIT
;
455 pipe
.ConstantCacheInvalidationEnable
=
456 bits
& ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
;
457 pipe
.VFCacheInvalidationEnable
=
458 bits
& ANV_PIPE_VF_CACHE_INVALIDATE_BIT
;
459 pipe
.TextureCacheInvalidationEnable
=
460 bits
& ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
461 pipe
.InstructionCacheInvalidateEnable
=
462 bits
& ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT
;
465 bits
&= ~ANV_PIPE_INVALIDATE_BITS
;
468 cmd_buffer
->state
.pending_pipe_bits
= bits
;
471 void genX(CmdPipelineBarrier
)(
472 VkCommandBuffer commandBuffer
,
473 VkPipelineStageFlags srcStageMask
,
474 VkPipelineStageFlags destStageMask
,
476 uint32_t memoryBarrierCount
,
477 const VkMemoryBarrier
* pMemoryBarriers
,
478 uint32_t bufferMemoryBarrierCount
,
479 const VkBufferMemoryBarrier
* pBufferMemoryBarriers
,
480 uint32_t imageMemoryBarrierCount
,
481 const VkImageMemoryBarrier
* pImageMemoryBarriers
)
483 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
486 /* XXX: Right now, we're really dumb and just flush whatever categories
487 * the app asks for. One of these days we may make this a bit better
488 * but right now that's all the hardware allows for in most areas.
490 VkAccessFlags src_flags
= 0;
491 VkAccessFlags dst_flags
= 0;
493 for (uint32_t i
= 0; i
< memoryBarrierCount
; i
++) {
494 src_flags
|= pMemoryBarriers
[i
].srcAccessMask
;
495 dst_flags
|= pMemoryBarriers
[i
].dstAccessMask
;
498 for (uint32_t i
= 0; i
< bufferMemoryBarrierCount
; i
++) {
499 src_flags
|= pBufferMemoryBarriers
[i
].srcAccessMask
;
500 dst_flags
|= pBufferMemoryBarriers
[i
].dstAccessMask
;
503 for (uint32_t i
= 0; i
< imageMemoryBarrierCount
; i
++) {
504 src_flags
|= pImageMemoryBarriers
[i
].srcAccessMask
;
505 dst_flags
|= pImageMemoryBarriers
[i
].dstAccessMask
;
508 enum anv_pipe_bits pipe_bits
= 0;
510 for_each_bit(b
, src_flags
) {
511 switch ((VkAccessFlagBits
)(1 << b
)) {
512 case VK_ACCESS_SHADER_WRITE_BIT
:
513 pipe_bits
|= ANV_PIPE_DATA_CACHE_FLUSH_BIT
;
515 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
:
516 pipe_bits
|= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
518 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
:
519 pipe_bits
|= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
521 case VK_ACCESS_TRANSFER_WRITE_BIT
:
522 pipe_bits
|= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
;
523 pipe_bits
|= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT
;
526 break; /* Nothing to do */
530 for_each_bit(b
, dst_flags
) {
531 switch ((VkAccessFlagBits
)(1 << b
)) {
532 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT
:
533 case VK_ACCESS_INDEX_READ_BIT
:
534 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
:
535 pipe_bits
|= ANV_PIPE_VF_CACHE_INVALIDATE_BIT
;
537 case VK_ACCESS_UNIFORM_READ_BIT
:
538 pipe_bits
|= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT
;
539 pipe_bits
|= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
541 case VK_ACCESS_SHADER_READ_BIT
:
542 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
:
543 case VK_ACCESS_TRANSFER_READ_BIT
:
544 pipe_bits
|= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT
;
547 break; /* Nothing to do */
551 cmd_buffer
->state
.pending_pipe_bits
|= pipe_bits
;
555 cmd_buffer_alloc_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
557 VkShaderStageFlags stages
= cmd_buffer
->state
.pipeline
->active_stages
;
559 /* In order to avoid thrash, we assume that vertex and fragment stages
560 * always exist. In the rare case where one is missing *and* the other
561 * uses push concstants, this may be suboptimal. However, avoiding stalls
562 * seems more important.
564 stages
|= VK_SHADER_STAGE_FRAGMENT_BIT
| VK_SHADER_STAGE_VERTEX_BIT
;
566 if (stages
== cmd_buffer
->state
.push_constant_stages
)
570 const unsigned push_constant_kb
= 32;
572 const unsigned push_constant_kb
= cmd_buffer
->device
->info
.gt
== 3 ? 32 : 16;
574 const unsigned push_constant_kb
= 16;
577 const unsigned num_stages
=
578 _mesa_bitcount(stages
& VK_SHADER_STAGE_ALL_GRAPHICS
);
579 unsigned size_per_stage
= push_constant_kb
/ num_stages
;
581 /* Broadwell+ and Haswell gt3 require that the push constant sizes be in
582 * units of 2KB. Incidentally, these are the same platforms that have
583 * 32KB worth of push constant space.
585 if (push_constant_kb
== 32)
586 size_per_stage
&= ~1u;
588 uint32_t kb_used
= 0;
589 for (int i
= MESA_SHADER_VERTEX
; i
< MESA_SHADER_FRAGMENT
; i
++) {
590 unsigned push_size
= (stages
& (1 << i
)) ? size_per_stage
: 0;
591 anv_batch_emit(&cmd_buffer
->batch
,
592 GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS
), alloc
) {
593 alloc
._3DCommandSubOpcode
= 18 + i
;
594 alloc
.ConstantBufferOffset
= (push_size
> 0) ? kb_used
: 0;
595 alloc
.ConstantBufferSize
= push_size
;
597 kb_used
+= push_size
;
600 anv_batch_emit(&cmd_buffer
->batch
,
601 GENX(3DSTATE_PUSH_CONSTANT_ALLOC_PS
), alloc
) {
602 alloc
.ConstantBufferOffset
= kb_used
;
603 alloc
.ConstantBufferSize
= push_constant_kb
- kb_used
;
606 cmd_buffer
->state
.push_constant_stages
= stages
;
608 /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
610 * "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
611 * the next 3DPRIMITIVE command after programming the
612 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
614 * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
615 * pipeline setup, we need to dirty push constants.
617 cmd_buffer
->state
.push_constants_dirty
|= VK_SHADER_STAGE_ALL_GRAPHICS
;
621 add_surface_state_reloc(struct anv_cmd_buffer
*cmd_buffer
,
622 struct anv_state state
, struct anv_bo
*bo
,
625 /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
626 * 9 for gen8+. We only write the first dword for gen8+ here and rely on
627 * the initial state to set the high bits to 0. */
629 const uint32_t dword
= GEN_GEN
< 8 ? 1 : 8;
631 anv_reloc_list_add(&cmd_buffer
->surface_relocs
, &cmd_buffer
->pool
->alloc
,
632 state
.offset
+ dword
* 4, bo
, offset
);
635 static struct anv_state
636 alloc_null_surface_state(struct anv_cmd_buffer
*cmd_buffer
,
637 struct anv_framebuffer
*fb
)
639 struct anv_state state
=
640 anv_state_stream_alloc(&cmd_buffer
->surface_state_stream
, 64, 64);
642 struct GENX(RENDER_SURFACE_STATE
) null_ss
= {
643 .SurfaceType
= SURFTYPE_NULL
,
644 .SurfaceArray
= fb
->layers
> 0,
645 .SurfaceFormat
= ISL_FORMAT_R8G8B8A8_UNORM
,
649 .TiledSurface
= true,
651 .Width
= fb
->width
- 1,
652 .Height
= fb
->height
- 1,
653 .Depth
= fb
->layers
- 1,
654 .RenderTargetViewExtent
= fb
->layers
- 1,
657 GENX(RENDER_SURFACE_STATE_pack
)(NULL
, state
.map
, &null_ss
);
659 if (!cmd_buffer
->device
->info
.has_llc
)
660 anv_state_clflush(state
);
667 emit_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
668 gl_shader_stage stage
,
669 struct anv_state
*bt_state
)
671 struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
672 struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
673 struct anv_pipeline
*pipeline
;
674 uint32_t bias
, state_offset
;
677 case MESA_SHADER_COMPUTE
:
678 pipeline
= cmd_buffer
->state
.compute_pipeline
;
682 pipeline
= cmd_buffer
->state
.pipeline
;
687 if (!anv_pipeline_has_stage(pipeline
, stage
)) {
688 *bt_state
= (struct anv_state
) { 0, };
692 struct anv_pipeline_bind_map
*map
= &pipeline
->shaders
[stage
]->bind_map
;
693 if (bias
+ map
->surface_count
== 0) {
694 *bt_state
= (struct anv_state
) { 0, };
698 *bt_state
= anv_cmd_buffer_alloc_binding_table(cmd_buffer
,
699 bias
+ map
->surface_count
,
701 uint32_t *bt_map
= bt_state
->map
;
703 if (bt_state
->map
== NULL
)
704 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
706 if (stage
== MESA_SHADER_COMPUTE
&&
707 get_cs_prog_data(cmd_buffer
->state
.compute_pipeline
)->uses_num_work_groups
) {
708 struct anv_bo
*bo
= cmd_buffer
->state
.num_workgroups_bo
;
709 uint32_t bo_offset
= cmd_buffer
->state
.num_workgroups_offset
;
711 struct anv_state surface_state
;
713 anv_cmd_buffer_alloc_surface_state(cmd_buffer
);
715 const enum isl_format format
=
716 anv_isl_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
);
717 anv_fill_buffer_surface_state(cmd_buffer
->device
, surface_state
,
718 format
, bo_offset
, 12, 1);
720 bt_map
[0] = surface_state
.offset
+ state_offset
;
721 add_surface_state_reloc(cmd_buffer
, surface_state
, bo
, bo_offset
);
724 if (map
->surface_count
== 0)
727 if (map
->image_count
> 0) {
729 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, stage
, images
);
730 if (result
!= VK_SUCCESS
)
733 cmd_buffer
->state
.push_constants_dirty
|= 1 << stage
;
737 for (uint32_t s
= 0; s
< map
->surface_count
; s
++) {
738 struct anv_pipeline_binding
*binding
= &map
->surface_to_descriptor
[s
];
740 struct anv_state surface_state
;
744 if (binding
->set
== ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS
) {
745 /* Color attachment binding */
746 assert(stage
== MESA_SHADER_FRAGMENT
);
747 assert(binding
->binding
== 0);
748 if (binding
->index
< subpass
->color_count
) {
749 const struct anv_image_view
*iview
=
750 fb
->attachments
[subpass
->color_attachments
[binding
->index
]];
752 assert(iview
->color_rt_surface_state
.alloc_size
);
753 surface_state
= iview
->color_rt_surface_state
;
754 add_surface_state_reloc(cmd_buffer
, iview
->color_rt_surface_state
,
755 iview
->bo
, iview
->offset
);
757 /* Null render target */
758 struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
759 surface_state
= alloc_null_surface_state(cmd_buffer
, fb
);
762 bt_map
[bias
+ s
] = surface_state
.offset
+ state_offset
;
766 struct anv_descriptor_set
*set
=
767 cmd_buffer
->state
.descriptors
[binding
->set
];
768 uint32_t offset
= set
->layout
->binding
[binding
->binding
].descriptor_index
;
769 struct anv_descriptor
*desc
= &set
->descriptors
[offset
+ binding
->index
];
771 switch (desc
->type
) {
772 case VK_DESCRIPTOR_TYPE_SAMPLER
:
773 /* Nothing for us to do here */
776 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
777 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
778 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
779 surface_state
= desc
->image_view
->sampler_surface_state
;
780 assert(surface_state
.alloc_size
);
781 bo
= desc
->image_view
->bo
;
782 bo_offset
= desc
->image_view
->offset
;
785 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
: {
786 surface_state
= desc
->image_view
->storage_surface_state
;
787 assert(surface_state
.alloc_size
);
788 bo
= desc
->image_view
->bo
;
789 bo_offset
= desc
->image_view
->offset
;
791 struct brw_image_param
*image_param
=
792 &cmd_buffer
->state
.push_constants
[stage
]->images
[image
++];
794 *image_param
= desc
->image_view
->storage_image_param
;
795 image_param
->surface_idx
= bias
+ s
;
799 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
800 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
801 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
802 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
803 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
804 surface_state
= desc
->buffer_view
->surface_state
;
805 assert(surface_state
.alloc_size
);
806 bo
= desc
->buffer_view
->bo
;
807 bo_offset
= desc
->buffer_view
->offset
;
810 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
811 surface_state
= desc
->buffer_view
->storage_surface_state
;
812 assert(surface_state
.alloc_size
);
813 bo
= desc
->buffer_view
->bo
;
814 bo_offset
= desc
->buffer_view
->offset
;
816 struct brw_image_param
*image_param
=
817 &cmd_buffer
->state
.push_constants
[stage
]->images
[image
++];
819 *image_param
= desc
->buffer_view
->storage_image_param
;
820 image_param
->surface_idx
= bias
+ s
;
824 assert(!"Invalid descriptor type");
828 bt_map
[bias
+ s
] = surface_state
.offset
+ state_offset
;
829 add_surface_state_reloc(cmd_buffer
, surface_state
, bo
, bo_offset
);
831 assert(image
== map
->image_count
);
834 if (!cmd_buffer
->device
->info
.has_llc
)
835 anv_state_clflush(*bt_state
);
841 emit_samplers(struct anv_cmd_buffer
*cmd_buffer
,
842 gl_shader_stage stage
,
843 struct anv_state
*state
)
845 struct anv_pipeline
*pipeline
;
847 if (stage
== MESA_SHADER_COMPUTE
)
848 pipeline
= cmd_buffer
->state
.compute_pipeline
;
850 pipeline
= cmd_buffer
->state
.pipeline
;
852 if (!anv_pipeline_has_stage(pipeline
, stage
)) {
853 *state
= (struct anv_state
) { 0, };
857 struct anv_pipeline_bind_map
*map
= &pipeline
->shaders
[stage
]->bind_map
;
858 if (map
->sampler_count
== 0) {
859 *state
= (struct anv_state
) { 0, };
863 uint32_t size
= map
->sampler_count
* 16;
864 *state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, 32);
866 if (state
->map
== NULL
)
867 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
869 for (uint32_t s
= 0; s
< map
->sampler_count
; s
++) {
870 struct anv_pipeline_binding
*binding
= &map
->sampler_to_descriptor
[s
];
871 struct anv_descriptor_set
*set
=
872 cmd_buffer
->state
.descriptors
[binding
->set
];
873 uint32_t offset
= set
->layout
->binding
[binding
->binding
].descriptor_index
;
874 struct anv_descriptor
*desc
= &set
->descriptors
[offset
+ binding
->index
];
876 if (desc
->type
!= VK_DESCRIPTOR_TYPE_SAMPLER
&&
877 desc
->type
!= VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
880 struct anv_sampler
*sampler
= desc
->sampler
;
882 /* This can happen if we have an unfilled slot since TYPE_SAMPLER
883 * happens to be zero.
888 memcpy(state
->map
+ (s
* 16),
889 sampler
->state
, sizeof(sampler
->state
));
892 if (!cmd_buffer
->device
->info
.has_llc
)
893 anv_state_clflush(*state
);
899 flush_descriptor_sets(struct anv_cmd_buffer
*cmd_buffer
)
901 VkShaderStageFlags dirty
= cmd_buffer
->state
.descriptors_dirty
&
902 cmd_buffer
->state
.pipeline
->active_stages
;
904 VkResult result
= VK_SUCCESS
;
905 anv_foreach_stage(s
, dirty
) {
906 result
= emit_samplers(cmd_buffer
, s
, &cmd_buffer
->state
.samplers
[s
]);
907 if (result
!= VK_SUCCESS
)
909 result
= emit_binding_table(cmd_buffer
, s
,
910 &cmd_buffer
->state
.binding_tables
[s
]);
911 if (result
!= VK_SUCCESS
)
915 if (result
!= VK_SUCCESS
) {
916 assert(result
== VK_ERROR_OUT_OF_DEVICE_MEMORY
);
918 result
= anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
919 assert(result
== VK_SUCCESS
);
921 /* Re-emit state base addresses so we get the new surface state base
922 * address before we start emitting binding tables etc.
924 genX(cmd_buffer_emit_state_base_address
)(cmd_buffer
);
926 /* Re-emit all active binding tables */
927 dirty
|= cmd_buffer
->state
.pipeline
->active_stages
;
928 anv_foreach_stage(s
, dirty
) {
929 result
= emit_samplers(cmd_buffer
, s
, &cmd_buffer
->state
.samplers
[s
]);
930 if (result
!= VK_SUCCESS
)
932 result
= emit_binding_table(cmd_buffer
, s
,
933 &cmd_buffer
->state
.binding_tables
[s
]);
934 if (result
!= VK_SUCCESS
)
939 cmd_buffer
->state
.descriptors_dirty
&= ~dirty
;
945 cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer
*cmd_buffer
,
948 static const uint32_t sampler_state_opcodes
[] = {
949 [MESA_SHADER_VERTEX
] = 43,
950 [MESA_SHADER_TESS_CTRL
] = 44, /* HS */
951 [MESA_SHADER_TESS_EVAL
] = 45, /* DS */
952 [MESA_SHADER_GEOMETRY
] = 46,
953 [MESA_SHADER_FRAGMENT
] = 47,
954 [MESA_SHADER_COMPUTE
] = 0,
957 static const uint32_t binding_table_opcodes
[] = {
958 [MESA_SHADER_VERTEX
] = 38,
959 [MESA_SHADER_TESS_CTRL
] = 39,
960 [MESA_SHADER_TESS_EVAL
] = 40,
961 [MESA_SHADER_GEOMETRY
] = 41,
962 [MESA_SHADER_FRAGMENT
] = 42,
963 [MESA_SHADER_COMPUTE
] = 0,
966 anv_foreach_stage(s
, stages
) {
967 if (cmd_buffer
->state
.samplers
[s
].alloc_size
> 0) {
968 anv_batch_emit(&cmd_buffer
->batch
,
969 GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS
), ssp
) {
970 ssp
._3DCommandSubOpcode
= sampler_state_opcodes
[s
];
971 ssp
.PointertoVSSamplerState
= cmd_buffer
->state
.samplers
[s
].offset
;
975 /* Always emit binding table pointers if we're asked to, since on SKL
976 * this is what flushes push constants. */
977 anv_batch_emit(&cmd_buffer
->batch
,
978 GENX(3DSTATE_BINDING_TABLE_POINTERS_VS
), btp
) {
979 btp
._3DCommandSubOpcode
= binding_table_opcodes
[s
];
980 btp
.PointertoVSBindingTable
= cmd_buffer
->state
.binding_tables
[s
].offset
;
986 cmd_buffer_flush_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
988 static const uint32_t push_constant_opcodes
[] = {
989 [MESA_SHADER_VERTEX
] = 21,
990 [MESA_SHADER_TESS_CTRL
] = 25, /* HS */
991 [MESA_SHADER_TESS_EVAL
] = 26, /* DS */
992 [MESA_SHADER_GEOMETRY
] = 22,
993 [MESA_SHADER_FRAGMENT
] = 23,
994 [MESA_SHADER_COMPUTE
] = 0,
997 VkShaderStageFlags flushed
= 0;
999 anv_foreach_stage(stage
, cmd_buffer
->state
.push_constants_dirty
) {
1000 if (stage
== MESA_SHADER_COMPUTE
)
1003 struct anv_state state
= anv_cmd_buffer_push_constants(cmd_buffer
, stage
);
1005 if (state
.offset
== 0) {
1006 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CONSTANT_VS
), c
)
1007 c
._3DCommandSubOpcode
= push_constant_opcodes
[stage
];
1009 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CONSTANT_VS
), c
) {
1010 c
._3DCommandSubOpcode
= push_constant_opcodes
[stage
],
1011 c
.ConstantBody
= (struct GENX(3DSTATE_CONSTANT_BODY
)) {
1013 .PointerToConstantBuffer2
= { &cmd_buffer
->device
->dynamic_state_block_pool
.bo
, state
.offset
},
1014 .ConstantBuffer2ReadLength
= DIV_ROUND_UP(state
.alloc_size
, 32),
1016 .PointerToConstantBuffer0
= { .offset
= state
.offset
},
1017 .ConstantBuffer0ReadLength
= DIV_ROUND_UP(state
.alloc_size
, 32),
1023 flushed
|= mesa_to_vk_shader_stage(stage
);
1026 cmd_buffer
->state
.push_constants_dirty
&= ~VK_SHADER_STAGE_ALL_GRAPHICS
;
1032 genX(cmd_buffer_flush_state
)(struct anv_cmd_buffer
*cmd_buffer
)
1034 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
1037 uint32_t vb_emit
= cmd_buffer
->state
.vb_dirty
& pipeline
->vb_used
;
1039 assert((pipeline
->active_stages
& VK_SHADER_STAGE_COMPUTE_BIT
) == 0);
1041 genX(cmd_buffer_config_l3
)(cmd_buffer
, pipeline
->urb
.l3_config
);
1043 genX(flush_pipeline_select_3d
)(cmd_buffer
);
1046 const uint32_t num_buffers
= __builtin_popcount(vb_emit
);
1047 const uint32_t num_dwords
= 1 + num_buffers
* 4;
1049 p
= anv_batch_emitn(&cmd_buffer
->batch
, num_dwords
,
1050 GENX(3DSTATE_VERTEX_BUFFERS
));
1052 for_each_bit(vb
, vb_emit
) {
1053 struct anv_buffer
*buffer
= cmd_buffer
->state
.vertex_bindings
[vb
].buffer
;
1054 uint32_t offset
= cmd_buffer
->state
.vertex_bindings
[vb
].offset
;
1056 struct GENX(VERTEX_BUFFER_STATE
) state
= {
1057 .VertexBufferIndex
= vb
,
1060 .MemoryObjectControlState
= GENX(MOCS
),
1062 .BufferAccessType
= pipeline
->instancing_enable
[vb
] ? INSTANCEDATA
: VERTEXDATA
,
1063 .InstanceDataStepRate
= 1,
1064 .VertexBufferMemoryObjectControlState
= GENX(MOCS
),
1067 .AddressModifyEnable
= true,
1068 .BufferPitch
= pipeline
->binding_stride
[vb
],
1069 .BufferStartingAddress
= { buffer
->bo
, buffer
->offset
+ offset
},
1072 .BufferSize
= buffer
->size
- offset
1074 .EndAddress
= { buffer
->bo
, buffer
->offset
+ buffer
->size
- 1},
1078 GENX(VERTEX_BUFFER_STATE_pack
)(&cmd_buffer
->batch
, &p
[1 + i
* 4], &state
);
1083 cmd_buffer
->state
.vb_dirty
&= ~vb_emit
;
1085 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_PIPELINE
) {
1086 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
1088 /* The exact descriptor layout is pulled from the pipeline, so we need
1089 * to re-emit binding tables on every pipeline change.
1091 cmd_buffer
->state
.descriptors_dirty
|=
1092 cmd_buffer
->state
.pipeline
->active_stages
;
1094 /* If the pipeline changed, we may need to re-allocate push constant
1097 cmd_buffer_alloc_push_constants(cmd_buffer
);
1101 if (cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_VERTEX_BIT
||
1102 cmd_buffer
->state
.push_constants_dirty
& VK_SHADER_STAGE_VERTEX_BIT
) {
1103 /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
1105 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
1106 * stall needs to be sent just prior to any 3DSTATE_VS,
1107 * 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
1108 * 3DSTATE_BINDING_TABLE_POINTER_VS,
1109 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one
1110 * PIPE_CONTROL needs to be sent before any combination of VS
1111 * associated 3DSTATE."
1113 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
1114 pc
.DepthStallEnable
= true;
1115 pc
.PostSyncOperation
= WriteImmediateData
;
1117 (struct anv_address
) { &cmd_buffer
->device
->workaround_bo
, 0 };
1122 /* Render targets live in the same binding table as fragment descriptors */
1123 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_RENDER_TARGETS
)
1124 cmd_buffer
->state
.descriptors_dirty
|= VK_SHADER_STAGE_FRAGMENT_BIT
;
1126 /* We emit the binding tables and sampler tables first, then emit push
1127 * constants and then finally emit binding table and sampler table
1128 * pointers. It has to happen in this order, since emitting the binding
1129 * tables may change the push constants (in case of storage images). After
1130 * emitting push constants, on SKL+ we have to emit the corresponding
1131 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
1134 if (cmd_buffer
->state
.descriptors_dirty
)
1135 dirty
= flush_descriptor_sets(cmd_buffer
);
1137 if (cmd_buffer
->state
.push_constants_dirty
) {
1139 /* On Sky Lake and later, the binding table pointers commands are
1140 * what actually flush the changes to push constant state so we need
1141 * to dirty them so they get re-emitted below.
1143 dirty
|= cmd_buffer_flush_push_constants(cmd_buffer
);
1145 cmd_buffer_flush_push_constants(cmd_buffer
);
1150 cmd_buffer_emit_descriptor_pointers(cmd_buffer
, dirty
);
1152 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
)
1153 gen8_cmd_buffer_emit_viewport(cmd_buffer
);
1155 if (cmd_buffer
->state
.dirty
& (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
|
1156 ANV_CMD_DIRTY_PIPELINE
)) {
1157 gen8_cmd_buffer_emit_depth_viewport(cmd_buffer
,
1158 pipeline
->depth_clamp_enable
);
1161 if (cmd_buffer
->state
.dirty
& ANV_CMD_DIRTY_DYNAMIC_SCISSOR
)
1162 gen7_cmd_buffer_emit_scissor(cmd_buffer
);
1164 genX(cmd_buffer_flush_dynamic_state
)(cmd_buffer
);
1166 genX(cmd_buffer_apply_pipe_flushes
)(cmd_buffer
);
1170 emit_base_vertex_instance_bo(struct anv_cmd_buffer
*cmd_buffer
,
1171 struct anv_bo
*bo
, uint32_t offset
)
1173 uint32_t *p
= anv_batch_emitn(&cmd_buffer
->batch
, 5,
1174 GENX(3DSTATE_VERTEX_BUFFERS
));
1176 GENX(VERTEX_BUFFER_STATE_pack
)(&cmd_buffer
->batch
, p
+ 1,
1177 &(struct GENX(VERTEX_BUFFER_STATE
)) {
1178 .VertexBufferIndex
= 32, /* Reserved for this */
1179 .AddressModifyEnable
= true,
1182 .MemoryObjectControlState
= GENX(MOCS
),
1183 .BufferStartingAddress
= { bo
, offset
},
1186 .VertexBufferMemoryObjectControlState
= GENX(MOCS
),
1187 .BufferStartingAddress
= { bo
, offset
},
1188 .EndAddress
= { bo
, offset
+ 8 },
1194 emit_base_vertex_instance(struct anv_cmd_buffer
*cmd_buffer
,
1195 uint32_t base_vertex
, uint32_t base_instance
)
1197 struct anv_state id_state
=
1198 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, 8, 4);
1200 ((uint32_t *)id_state
.map
)[0] = base_vertex
;
1201 ((uint32_t *)id_state
.map
)[1] = base_instance
;
1203 if (!cmd_buffer
->device
->info
.has_llc
)
1204 anv_state_clflush(id_state
);
1206 emit_base_vertex_instance_bo(cmd_buffer
,
1207 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
, id_state
.offset
);
1211 VkCommandBuffer commandBuffer
,
1212 uint32_t vertexCount
,
1213 uint32_t instanceCount
,
1214 uint32_t firstVertex
,
1215 uint32_t firstInstance
)
1217 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1218 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
1219 const struct brw_vs_prog_data
*vs_prog_data
= get_vs_prog_data(pipeline
);
1221 genX(cmd_buffer_flush_state
)(cmd_buffer
);
1223 if (vs_prog_data
->uses_basevertex
|| vs_prog_data
->uses_baseinstance
)
1224 emit_base_vertex_instance(cmd_buffer
, firstVertex
, firstInstance
);
1226 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DPRIMITIVE
), prim
) {
1227 prim
.VertexAccessType
= SEQUENTIAL
;
1228 prim
.PrimitiveTopologyType
= pipeline
->topology
;
1229 prim
.VertexCountPerInstance
= vertexCount
;
1230 prim
.StartVertexLocation
= firstVertex
;
1231 prim
.InstanceCount
= instanceCount
;
1232 prim
.StartInstanceLocation
= firstInstance
;
1233 prim
.BaseVertexLocation
= 0;
1237 void genX(CmdDrawIndexed
)(
1238 VkCommandBuffer commandBuffer
,
1239 uint32_t indexCount
,
1240 uint32_t instanceCount
,
1241 uint32_t firstIndex
,
1242 int32_t vertexOffset
,
1243 uint32_t firstInstance
)
1245 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1246 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
1247 const struct brw_vs_prog_data
*vs_prog_data
= get_vs_prog_data(pipeline
);
1249 genX(cmd_buffer_flush_state
)(cmd_buffer
);
1251 if (vs_prog_data
->uses_basevertex
|| vs_prog_data
->uses_baseinstance
)
1252 emit_base_vertex_instance(cmd_buffer
, vertexOffset
, firstInstance
);
1254 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DPRIMITIVE
), prim
) {
1255 prim
.VertexAccessType
= RANDOM
;
1256 prim
.PrimitiveTopologyType
= pipeline
->topology
;
1257 prim
.VertexCountPerInstance
= indexCount
;
1258 prim
.StartVertexLocation
= firstIndex
;
1259 prim
.InstanceCount
= instanceCount
;
1260 prim
.StartInstanceLocation
= firstInstance
;
1261 prim
.BaseVertexLocation
= vertexOffset
;
1265 /* Auto-Draw / Indirect Registers */
1266 #define GEN7_3DPRIM_END_OFFSET 0x2420
1267 #define GEN7_3DPRIM_START_VERTEX 0x2430
1268 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434
1269 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
1270 #define GEN7_3DPRIM_START_INSTANCE 0x243C
1271 #define GEN7_3DPRIM_BASE_VERTEX 0x2440
1273 void genX(CmdDrawIndirect
)(
1274 VkCommandBuffer commandBuffer
,
1276 VkDeviceSize offset
,
1280 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1281 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1282 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
1283 const struct brw_vs_prog_data
*vs_prog_data
= get_vs_prog_data(pipeline
);
1284 struct anv_bo
*bo
= buffer
->bo
;
1285 uint32_t bo_offset
= buffer
->offset
+ offset
;
1287 genX(cmd_buffer_flush_state
)(cmd_buffer
);
1289 if (vs_prog_data
->uses_basevertex
|| vs_prog_data
->uses_baseinstance
)
1290 emit_base_vertex_instance_bo(cmd_buffer
, bo
, bo_offset
+ 8);
1292 emit_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_VERTEX_COUNT
, bo
, bo_offset
);
1293 emit_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_INSTANCE_COUNT
, bo
, bo_offset
+ 4);
1294 emit_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_START_VERTEX
, bo
, bo_offset
+ 8);
1295 emit_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_START_INSTANCE
, bo
, bo_offset
+ 12);
1296 emit_lri(&cmd_buffer
->batch
, GEN7_3DPRIM_BASE_VERTEX
, 0);
1298 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DPRIMITIVE
), prim
) {
1299 prim
.IndirectParameterEnable
= true;
1300 prim
.VertexAccessType
= SEQUENTIAL
;
1301 prim
.PrimitiveTopologyType
= pipeline
->topology
;
1305 void genX(CmdDrawIndexedIndirect
)(
1306 VkCommandBuffer commandBuffer
,
1308 VkDeviceSize offset
,
1312 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1313 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1314 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.pipeline
;
1315 const struct brw_vs_prog_data
*vs_prog_data
= get_vs_prog_data(pipeline
);
1316 struct anv_bo
*bo
= buffer
->bo
;
1317 uint32_t bo_offset
= buffer
->offset
+ offset
;
1319 genX(cmd_buffer_flush_state
)(cmd_buffer
);
1321 /* TODO: We need to stomp base vertex to 0 somehow */
1322 if (vs_prog_data
->uses_basevertex
|| vs_prog_data
->uses_baseinstance
)
1323 emit_base_vertex_instance_bo(cmd_buffer
, bo
, bo_offset
+ 12);
1325 emit_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_VERTEX_COUNT
, bo
, bo_offset
);
1326 emit_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_INSTANCE_COUNT
, bo
, bo_offset
+ 4);
1327 emit_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_START_VERTEX
, bo
, bo_offset
+ 8);
1328 emit_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_BASE_VERTEX
, bo
, bo_offset
+ 12);
1329 emit_lrm(&cmd_buffer
->batch
, GEN7_3DPRIM_START_INSTANCE
, bo
, bo_offset
+ 16);
1331 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DPRIMITIVE
), prim
) {
1332 prim
.IndirectParameterEnable
= true;
1333 prim
.VertexAccessType
= RANDOM
;
1334 prim
.PrimitiveTopologyType
= pipeline
->topology
;
1339 flush_compute_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
)
1341 struct anv_device
*device
= cmd_buffer
->device
;
1342 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
1343 struct anv_state surfaces
= { 0, }, samplers
= { 0, };
1346 result
= emit_samplers(cmd_buffer
, MESA_SHADER_COMPUTE
, &samplers
);
1347 if (result
!= VK_SUCCESS
)
1349 result
= emit_binding_table(cmd_buffer
, MESA_SHADER_COMPUTE
, &surfaces
);
1350 if (result
!= VK_SUCCESS
)
1353 struct anv_state push_state
= anv_cmd_buffer_cs_push_constants(cmd_buffer
);
1355 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
1356 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
1358 if (push_state
.alloc_size
) {
1359 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_CURBE_LOAD
), curbe
) {
1360 curbe
.CURBETotalDataLength
= push_state
.alloc_size
;
1361 curbe
.CURBEDataStartAddress
= push_state
.offset
;
1365 const uint32_t slm_size
= encode_slm_size(GEN_GEN
, prog_data
->total_shared
);
1367 struct anv_state state
=
1368 anv_state_pool_emit(&device
->dynamic_state_pool
,
1369 GENX(INTERFACE_DESCRIPTOR_DATA
), 64,
1370 .KernelStartPointer
= pipeline
->cs_simd
,
1371 .BindingTablePointer
= surfaces
.offset
,
1372 .BindingTableEntryCount
= 0,
1373 .SamplerStatePointer
= samplers
.offset
,
1376 .ConstantURBEntryReadOffset
= 0,
1378 .ConstantURBEntryReadLength
=
1379 cs_prog_data
->push
.per_thread
.regs
,
1380 #if GEN_GEN >= 8 || GEN_IS_HASWELL
1381 .CrossThreadConstantDataReadLength
=
1382 cs_prog_data
->push
.cross_thread
.regs
,
1384 .BarrierEnable
= cs_prog_data
->uses_barrier
,
1385 .SharedLocalMemorySize
= slm_size
,
1386 .NumberofThreadsinGPGPUThreadGroup
=
1387 cs_prog_data
->threads
);
1389 uint32_t size
= GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
1390 anv_batch_emit(&cmd_buffer
->batch
,
1391 GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
), mid
) {
1392 mid
.InterfaceDescriptorTotalLength
= size
;
1393 mid
.InterfaceDescriptorDataStartAddress
= state
.offset
;
1400 genX(cmd_buffer_flush_compute_state
)(struct anv_cmd_buffer
*cmd_buffer
)
1402 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
1403 MAYBE_UNUSED VkResult result
;
1405 assert(pipeline
->active_stages
== VK_SHADER_STAGE_COMPUTE_BIT
);
1407 genX(cmd_buffer_config_l3
)(cmd_buffer
, pipeline
->urb
.l3_config
);
1409 genX(flush_pipeline_select_gpgpu
)(cmd_buffer
);
1411 if (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)
1412 anv_batch_emit_batch(&cmd_buffer
->batch
, &pipeline
->batch
);
1414 if ((cmd_buffer
->state
.descriptors_dirty
& VK_SHADER_STAGE_COMPUTE_BIT
) ||
1415 (cmd_buffer
->state
.compute_dirty
& ANV_CMD_DIRTY_PIPELINE
)) {
1416 /* FIXME: figure out descriptors for gen7 */
1417 result
= flush_compute_descriptor_set(cmd_buffer
);
1418 assert(result
== VK_SUCCESS
);
1419 cmd_buffer
->state
.descriptors_dirty
&= ~VK_SHADER_STAGE_COMPUTE_BIT
;
1422 cmd_buffer
->state
.compute_dirty
= 0;
1424 genX(cmd_buffer_apply_pipe_flushes
)(cmd_buffer
);
1430 verify_cmd_parser(const struct anv_device
*device
,
1431 int required_version
,
1432 const char *function
)
1434 if (device
->instance
->physicalDevice
.cmd_parser_version
< required_version
) {
1435 vk_errorf(VK_ERROR_FEATURE_NOT_PRESENT
,
1436 "cmd parser version %d is required for %s",
1437 required_version
, function
);
1446 void genX(CmdDispatch
)(
1447 VkCommandBuffer commandBuffer
,
1452 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1453 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
1454 const struct brw_cs_prog_data
*prog_data
= get_cs_prog_data(pipeline
);
1456 if (prog_data
->uses_num_work_groups
) {
1457 struct anv_state state
=
1458 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, 12, 4);
1459 uint32_t *sizes
= state
.map
;
1463 if (!cmd_buffer
->device
->info
.has_llc
)
1464 anv_state_clflush(state
);
1465 cmd_buffer
->state
.num_workgroups_offset
= state
.offset
;
1466 cmd_buffer
->state
.num_workgroups_bo
=
1467 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
;
1470 genX(cmd_buffer_flush_compute_state
)(cmd_buffer
);
1472 anv_batch_emit(&cmd_buffer
->batch
, GENX(GPGPU_WALKER
), ggw
) {
1473 ggw
.SIMDSize
= prog_data
->simd_size
/ 16;
1474 ggw
.ThreadDepthCounterMaximum
= 0;
1475 ggw
.ThreadHeightCounterMaximum
= 0;
1476 ggw
.ThreadWidthCounterMaximum
= prog_data
->threads
- 1;
1477 ggw
.ThreadGroupIDXDimension
= x
;
1478 ggw
.ThreadGroupIDYDimension
= y
;
1479 ggw
.ThreadGroupIDZDimension
= z
;
1480 ggw
.RightExecutionMask
= pipeline
->cs_right_mask
;
1481 ggw
.BottomExecutionMask
= 0xffffffff;
1484 anv_batch_emit(&cmd_buffer
->batch
, GENX(MEDIA_STATE_FLUSH
), msf
);
1487 #define GPGPU_DISPATCHDIMX 0x2500
1488 #define GPGPU_DISPATCHDIMY 0x2504
1489 #define GPGPU_DISPATCHDIMZ 0x2508
1491 #define MI_PREDICATE_SRC0 0x2400
1492 #define MI_PREDICATE_SRC1 0x2408
1494 void genX(CmdDispatchIndirect
)(
1495 VkCommandBuffer commandBuffer
,
1497 VkDeviceSize offset
)
1499 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1500 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1501 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
1502 const struct brw_cs_prog_data
*prog_data
= get_cs_prog_data(pipeline
);
1503 struct anv_bo
*bo
= buffer
->bo
;
1504 uint32_t bo_offset
= buffer
->offset
+ offset
;
1505 struct anv_batch
*batch
= &cmd_buffer
->batch
;
1508 /* Linux 4.4 added command parser version 5 which allows the GPGPU
1509 * indirect dispatch registers to be written.
1511 if (!verify_cmd_parser(cmd_buffer
->device
, 5, "vkCmdDispatchIndirect"))
1515 if (prog_data
->uses_num_work_groups
) {
1516 cmd_buffer
->state
.num_workgroups_offset
= bo_offset
;
1517 cmd_buffer
->state
.num_workgroups_bo
= bo
;
1520 genX(cmd_buffer_flush_compute_state
)(cmd_buffer
);
1522 emit_lrm(batch
, GPGPU_DISPATCHDIMX
, bo
, bo_offset
);
1523 emit_lrm(batch
, GPGPU_DISPATCHDIMY
, bo
, bo_offset
+ 4);
1524 emit_lrm(batch
, GPGPU_DISPATCHDIMZ
, bo
, bo_offset
+ 8);
1527 /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
1528 emit_lri(batch
, MI_PREDICATE_SRC0
+ 4, 0);
1529 emit_lri(batch
, MI_PREDICATE_SRC1
+ 0, 0);
1530 emit_lri(batch
, MI_PREDICATE_SRC1
+ 4, 0);
1532 /* Load compute_dispatch_indirect_x_size into SRC0 */
1533 emit_lrm(batch
, MI_PREDICATE_SRC0
, bo
, bo_offset
+ 0);
1535 /* predicate = (compute_dispatch_indirect_x_size == 0); */
1536 anv_batch_emit(batch
, GENX(MI_PREDICATE
), mip
) {
1537 mip
.LoadOperation
= LOAD_LOAD
;
1538 mip
.CombineOperation
= COMBINE_SET
;
1539 mip
.CompareOperation
= COMPARE_SRCS_EQUAL
;
1542 /* Load compute_dispatch_indirect_y_size into SRC0 */
1543 emit_lrm(batch
, MI_PREDICATE_SRC0
, bo
, bo_offset
+ 4);
1545 /* predicate |= (compute_dispatch_indirect_y_size == 0); */
1546 anv_batch_emit(batch
, GENX(MI_PREDICATE
), mip
) {
1547 mip
.LoadOperation
= LOAD_LOAD
;
1548 mip
.CombineOperation
= COMBINE_OR
;
1549 mip
.CompareOperation
= COMPARE_SRCS_EQUAL
;
1552 /* Load compute_dispatch_indirect_z_size into SRC0 */
1553 emit_lrm(batch
, MI_PREDICATE_SRC0
, bo
, bo_offset
+ 8);
1555 /* predicate |= (compute_dispatch_indirect_z_size == 0); */
1556 anv_batch_emit(batch
, GENX(MI_PREDICATE
), mip
) {
1557 mip
.LoadOperation
= LOAD_LOAD
;
1558 mip
.CombineOperation
= COMBINE_OR
;
1559 mip
.CompareOperation
= COMPARE_SRCS_EQUAL
;
1562 /* predicate = !predicate; */
1563 #define COMPARE_FALSE 1
1564 anv_batch_emit(batch
, GENX(MI_PREDICATE
), mip
) {
1565 mip
.LoadOperation
= LOAD_LOADINV
;
1566 mip
.CombineOperation
= COMBINE_OR
;
1567 mip
.CompareOperation
= COMPARE_FALSE
;
1571 anv_batch_emit(batch
, GENX(GPGPU_WALKER
), ggw
) {
1572 ggw
.IndirectParameterEnable
= true;
1573 ggw
.PredicateEnable
= GEN_GEN
<= 7;
1574 ggw
.SIMDSize
= prog_data
->simd_size
/ 16;
1575 ggw
.ThreadDepthCounterMaximum
= 0;
1576 ggw
.ThreadHeightCounterMaximum
= 0;
1577 ggw
.ThreadWidthCounterMaximum
= prog_data
->threads
- 1;
1578 ggw
.RightExecutionMask
= pipeline
->cs_right_mask
;
1579 ggw
.BottomExecutionMask
= 0xffffffff;
1582 anv_batch_emit(batch
, GENX(MEDIA_STATE_FLUSH
), msf
);
1586 flush_pipeline_before_pipeline_select(struct anv_cmd_buffer
*cmd_buffer
,
1589 #if GEN_GEN >= 8 && GEN_GEN < 10
1590 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
1592 * Software must clear the COLOR_CALC_STATE Valid field in
1593 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
1594 * with Pipeline Select set to GPGPU.
1596 * The internal hardware docs recommend the same workaround for Gen9
1599 if (pipeline
== GPGPU
)
1600 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CC_STATE_POINTERS
), t
);
1602 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
1603 * PIPELINE_SELECT [DevBWR+]":
1607 * Software must ensure all the write caches are flushed through a
1608 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
1609 * command to invalidate read only caches prior to programming
1610 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
1612 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
1613 pc
.RenderTargetCacheFlushEnable
= true;
1614 pc
.DepthCacheFlushEnable
= true;
1615 pc
.DCFlushEnable
= true;
1616 pc
.PostSyncOperation
= NoWrite
;
1617 pc
.CommandStreamerStallEnable
= true;
1620 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
1621 pc
.TextureCacheInvalidationEnable
= true;
1622 pc
.ConstantCacheInvalidationEnable
= true;
1623 pc
.StateCacheInvalidationEnable
= true;
1624 pc
.InstructionCacheInvalidateEnable
= true;
1625 pc
.PostSyncOperation
= NoWrite
;
1631 genX(flush_pipeline_select_3d
)(struct anv_cmd_buffer
*cmd_buffer
)
1633 if (cmd_buffer
->state
.current_pipeline
!= _3D
) {
1634 flush_pipeline_before_pipeline_select(cmd_buffer
, _3D
);
1636 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPELINE_SELECT
), ps
) {
1640 ps
.PipelineSelection
= _3D
;
1643 cmd_buffer
->state
.current_pipeline
= _3D
;
1648 genX(flush_pipeline_select_gpgpu
)(struct anv_cmd_buffer
*cmd_buffer
)
1650 if (cmd_buffer
->state
.current_pipeline
!= GPGPU
) {
1651 flush_pipeline_before_pipeline_select(cmd_buffer
, GPGPU
);
1653 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPELINE_SELECT
), ps
) {
1657 ps
.PipelineSelection
= GPGPU
;
1660 cmd_buffer
->state
.current_pipeline
= GPGPU
;
1665 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer
*cmd_buffer
)
1667 struct anv_device
*device
= cmd_buffer
->device
;
1668 const struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
1669 const struct anv_image_view
*iview
=
1670 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer
);
1671 const struct anv_image
*image
= iview
? iview
->image
: NULL
;
1672 const bool has_depth
= image
&& (image
->aspects
& VK_IMAGE_ASPECT_DEPTH_BIT
);
1673 const bool has_hiz
= image
!= NULL
&& anv_image_has_hiz(image
);
1674 const bool has_stencil
=
1675 image
&& (image
->aspects
& VK_IMAGE_ASPECT_STENCIL_BIT
);
1677 /* FIXME: Implement the PMA stall W/A */
1678 /* FIXME: Width and Height are wrong */
1680 /* Emit 3DSTATE_DEPTH_BUFFER */
1682 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_DEPTH_BUFFER
), db
) {
1683 db
.SurfaceType
= SURFTYPE_2D
;
1684 db
.DepthWriteEnable
= true;
1685 db
.StencilWriteEnable
= has_stencil
;
1687 if (cmd_buffer
->state
.pass
->subpass_count
== 1) {
1688 db
.HierarchicalDepthBufferEnable
= has_hiz
;
1690 anv_finishme("Multiple-subpass HiZ not implemented");
1693 db
.SurfaceFormat
= isl_surf_get_depth_format(&device
->isl_dev
,
1694 &image
->depth_surface
.isl
);
1696 db
.SurfaceBaseAddress
= (struct anv_address
) {
1698 .offset
= image
->offset
+ image
->depth_surface
.offset
,
1700 db
.DepthBufferObjectControlState
= GENX(MOCS
);
1702 db
.SurfacePitch
= image
->depth_surface
.isl
.row_pitch
- 1;
1703 db
.Height
= image
->extent
.height
- 1;
1704 db
.Width
= image
->extent
.width
- 1;
1705 db
.LOD
= iview
->isl
.base_level
;
1706 db
.Depth
= image
->array_size
- 1; /* FIXME: 3-D */
1707 db
.MinimumArrayElement
= iview
->isl
.base_array_layer
;
1711 isl_surf_get_array_pitch_el_rows(&image
->depth_surface
.isl
) >> 2;
1713 db
.RenderTargetViewExtent
= 1 - 1;
1716 /* Even when no depth buffer is present, the hardware requires that
1717 * 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
1719 * If a null depth buffer is bound, the driver must instead bind depth as:
1720 * 3DSTATE_DEPTH.SurfaceType = SURFTYPE_2D
1721 * 3DSTATE_DEPTH.Width = 1
1722 * 3DSTATE_DEPTH.Height = 1
1723 * 3DSTATE_DEPTH.SuraceFormat = D16_UNORM
1724 * 3DSTATE_DEPTH.SurfaceBaseAddress = 0
1725 * 3DSTATE_DEPTH.HierarchicalDepthBufferEnable = 0
1726 * 3DSTATE_WM_DEPTH_STENCIL.DepthTestEnable = 0
1727 * 3DSTATE_WM_DEPTH_STENCIL.DepthBufferWriteEnable = 0
1729 * The PRM is wrong, though. The width and height must be programmed to
1730 * actual framebuffer's width and height, even when neither depth buffer
1731 * nor stencil buffer is present. Also, D16_UNORM is not allowed to
1732 * be combined with a stencil buffer so we use D32_FLOAT instead.
1734 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_DEPTH_BUFFER
), db
) {
1735 db
.SurfaceType
= SURFTYPE_2D
;
1736 db
.SurfaceFormat
= D32_FLOAT
;
1737 db
.Width
= fb
->width
- 1;
1738 db
.Height
= fb
->height
- 1;
1739 db
.StencilWriteEnable
= has_stencil
;
1744 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_HIER_DEPTH_BUFFER
), hdb
) {
1745 hdb
.HierarchicalDepthBufferObjectControlState
= GENX(MOCS
);
1746 hdb
.SurfacePitch
= image
->hiz_surface
.isl
.row_pitch
- 1;
1747 hdb
.SurfaceBaseAddress
= (struct anv_address
) {
1749 .offset
= image
->offset
+ image
->hiz_surface
.offset
,
1752 /* From the SKL PRM Vol2a:
1754 * The interpretation of this field is dependent on Surface Type
1756 * - SURFTYPE_1D: distance in pixels between array slices
1757 * - SURFTYPE_2D/CUBE: distance in rows between array slices
1758 * - SURFTYPE_3D: distance in rows between R - slices
1761 image
->hiz_surface
.isl
.dim
== ISL_SURF_DIM_1D
?
1762 isl_surf_get_array_pitch_el(&image
->hiz_surface
.isl
) >> 2 :
1763 isl_surf_get_array_pitch_el_rows(&image
->hiz_surface
.isl
) >> 2;
1767 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_HIER_DEPTH_BUFFER
), hdb
);
1770 /* Emit 3DSTATE_STENCIL_BUFFER */
1772 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_STENCIL_BUFFER
), sb
) {
1773 #if GEN_GEN >= 8 || GEN_IS_HASWELL
1774 sb
.StencilBufferEnable
= true;
1776 sb
.StencilBufferObjectControlState
= GENX(MOCS
);
1778 sb
.SurfacePitch
= image
->stencil_surface
.isl
.row_pitch
- 1;
1781 sb
.SurfaceQPitch
= isl_surf_get_array_pitch_el_rows(&image
->stencil_surface
.isl
) >> 2;
1783 sb
.SurfaceBaseAddress
= (struct anv_address
) {
1785 .offset
= image
->offset
+ image
->stencil_surface
.offset
,
1789 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_STENCIL_BUFFER
), sb
);
1792 /* From the IVB PRM Vol2P1, 11.5.5.4 3DSTATE_CLEAR_PARAMS:
1794 * 3DSTATE_CLEAR_PARAMS must always be programmed in the along with
1795 * the other Depth/Stencil state commands(i.e. 3DSTATE_DEPTH_BUFFER,
1796 * 3DSTATE_STENCIL_BUFFER, or 3DSTATE_HIER_DEPTH_BUFFER)
1798 * Testing also shows that some variant of this restriction may exist HSW+.
1799 * On BDW+, it is not possible to emit 2 of these packets consecutively when
1800 * both have DepthClearValueValid set. An analysis of such state programming
1801 * on SKL showed that the GPU doesn't register the latter packet's clear
1804 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CLEAR_PARAMS
), cp
) {
1806 cp
.DepthClearValueValid
= true;
1808 cmd_buffer
->state
.subpass
->depth_stencil_attachment
;
1809 cp
.DepthClearValue
=
1810 cmd_buffer
->state
.attachments
[ds
].clear_value
.depthStencil
.depth
;
1816 genX(cmd_buffer_set_subpass
)(struct anv_cmd_buffer
*cmd_buffer
,
1817 struct anv_subpass
*subpass
)
1819 cmd_buffer
->state
.subpass
= subpass
;
1821 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_RENDER_TARGETS
;
1823 cmd_buffer_emit_depth_stencil(cmd_buffer
);
1824 genX(cmd_buffer_emit_hz_op
)(cmd_buffer
, BLORP_HIZ_OP_HIZ_RESOLVE
);
1825 genX(cmd_buffer_emit_hz_op
)(cmd_buffer
, BLORP_HIZ_OP_DEPTH_CLEAR
);
1827 anv_cmd_buffer_clear_subpass(cmd_buffer
);
1830 void genX(CmdBeginRenderPass
)(
1831 VkCommandBuffer commandBuffer
,
1832 const VkRenderPassBeginInfo
* pRenderPassBegin
,
1833 VkSubpassContents contents
)
1835 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1836 ANV_FROM_HANDLE(anv_render_pass
, pass
, pRenderPassBegin
->renderPass
);
1837 ANV_FROM_HANDLE(anv_framebuffer
, framebuffer
, pRenderPassBegin
->framebuffer
);
1839 cmd_buffer
->state
.framebuffer
= framebuffer
;
1840 cmd_buffer
->state
.pass
= pass
;
1841 cmd_buffer
->state
.render_area
= pRenderPassBegin
->renderArea
;
1842 anv_cmd_state_setup_attachments(cmd_buffer
, pRenderPassBegin
);
1844 genX(flush_pipeline_select_3d
)(cmd_buffer
);
1846 genX(cmd_buffer_set_subpass
)(cmd_buffer
, pass
->subpasses
);
1849 void genX(CmdNextSubpass
)(
1850 VkCommandBuffer commandBuffer
,
1851 VkSubpassContents contents
)
1853 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1855 assert(cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
);
1857 anv_cmd_buffer_resolve_subpass(cmd_buffer
);
1858 genX(cmd_buffer_set_subpass
)(cmd_buffer
, cmd_buffer
->state
.subpass
+ 1);
1861 void genX(CmdEndRenderPass
)(
1862 VkCommandBuffer commandBuffer
)
1864 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1866 genX(cmd_buffer_emit_hz_op
)(cmd_buffer
, BLORP_HIZ_OP_DEPTH_RESOLVE
);
1867 anv_cmd_buffer_resolve_subpass(cmd_buffer
);
1870 anv_dump_add_framebuffer(cmd_buffer
, cmd_buffer
->state
.framebuffer
);
1875 emit_ps_depth_count(struct anv_cmd_buffer
*cmd_buffer
,
1876 struct anv_bo
*bo
, uint32_t offset
)
1878 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
1879 pc
.DestinationAddressType
= DAT_PPGTT
;
1880 pc
.PostSyncOperation
= WritePSDepthCount
;
1881 pc
.DepthStallEnable
= true;
1882 pc
.Address
= (struct anv_address
) { bo
, offset
};
1884 if (GEN_GEN
== 9 && cmd_buffer
->device
->info
.gt
== 4)
1885 pc
.CommandStreamerStallEnable
= true;
1890 emit_query_availability(struct anv_cmd_buffer
*cmd_buffer
,
1891 struct anv_bo
*bo
, uint32_t offset
)
1893 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
1894 pc
.DestinationAddressType
= DAT_PPGTT
;
1895 pc
.PostSyncOperation
= WriteImmediateData
;
1896 pc
.Address
= (struct anv_address
) { bo
, offset
};
1897 pc
.ImmediateData
= 1;
1901 void genX(CmdBeginQuery
)(
1902 VkCommandBuffer commandBuffer
,
1903 VkQueryPool queryPool
,
1905 VkQueryControlFlags flags
)
1907 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1908 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
1910 /* Workaround: When meta uses the pipeline with the VS disabled, it seems
1911 * that the pipelining of the depth write breaks. What we see is that
1912 * samples from the render pass clear leaks into the first query
1913 * immediately after the clear. Doing a pipecontrol with a post-sync
1914 * operation and DepthStallEnable seems to work around the issue.
1916 if (cmd_buffer
->state
.need_query_wa
) {
1917 cmd_buffer
->state
.need_query_wa
= false;
1918 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
1919 pc
.DepthCacheFlushEnable
= true;
1920 pc
.DepthStallEnable
= true;
1924 switch (pool
->type
) {
1925 case VK_QUERY_TYPE_OCCLUSION
:
1926 emit_ps_depth_count(cmd_buffer
, &pool
->bo
,
1927 query
* sizeof(struct anv_query_pool_slot
));
1930 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1936 void genX(CmdEndQuery
)(
1937 VkCommandBuffer commandBuffer
,
1938 VkQueryPool queryPool
,
1941 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1942 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
1944 switch (pool
->type
) {
1945 case VK_QUERY_TYPE_OCCLUSION
:
1946 emit_ps_depth_count(cmd_buffer
, &pool
->bo
,
1947 query
* sizeof(struct anv_query_pool_slot
) + 8);
1949 emit_query_availability(cmd_buffer
, &pool
->bo
,
1950 query
* sizeof(struct anv_query_pool_slot
) + 16);
1953 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
1959 #define TIMESTAMP 0x2358
1961 void genX(CmdWriteTimestamp
)(
1962 VkCommandBuffer commandBuffer
,
1963 VkPipelineStageFlagBits pipelineStage
,
1964 VkQueryPool queryPool
,
1967 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1968 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
1969 uint32_t offset
= query
* sizeof(struct anv_query_pool_slot
);
1971 assert(pool
->type
== VK_QUERY_TYPE_TIMESTAMP
);
1973 switch (pipelineStage
) {
1974 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
:
1975 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_STORE_REGISTER_MEM
), srm
) {
1976 srm
.RegisterAddress
= TIMESTAMP
;
1977 srm
.MemoryAddress
= (struct anv_address
) { &pool
->bo
, offset
};
1979 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_STORE_REGISTER_MEM
), srm
) {
1980 srm
.RegisterAddress
= TIMESTAMP
+ 4;
1981 srm
.MemoryAddress
= (struct anv_address
) { &pool
->bo
, offset
+ 4 };
1986 /* Everything else is bottom-of-pipe */
1987 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
1988 pc
.DestinationAddressType
= DAT_PPGTT
;
1989 pc
.PostSyncOperation
= WriteTimestamp
;
1990 pc
.Address
= (struct anv_address
) { &pool
->bo
, offset
};
1992 if (GEN_GEN
== 9 && cmd_buffer
->device
->info
.gt
== 4)
1993 pc
.CommandStreamerStallEnable
= true;
1998 emit_query_availability(cmd_buffer
, &pool
->bo
, query
+ 16);
2001 #if GEN_GEN > 7 || GEN_IS_HASWELL
2003 #define alu_opcode(v) __gen_uint((v), 20, 31)
2004 #define alu_operand1(v) __gen_uint((v), 10, 19)
2005 #define alu_operand2(v) __gen_uint((v), 0, 9)
2006 #define alu(opcode, operand1, operand2) \
2007 alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
2009 #define OPCODE_NOOP 0x000
2010 #define OPCODE_LOAD 0x080
2011 #define OPCODE_LOADINV 0x480
2012 #define OPCODE_LOAD0 0x081
2013 #define OPCODE_LOAD1 0x481
2014 #define OPCODE_ADD 0x100
2015 #define OPCODE_SUB 0x101
2016 #define OPCODE_AND 0x102
2017 #define OPCODE_OR 0x103
2018 #define OPCODE_XOR 0x104
2019 #define OPCODE_STORE 0x180
2020 #define OPCODE_STOREINV 0x580
2022 #define OPERAND_R0 0x00
2023 #define OPERAND_R1 0x01
2024 #define OPERAND_R2 0x02
2025 #define OPERAND_R3 0x03
2026 #define OPERAND_R4 0x04
2027 #define OPERAND_SRCA 0x20
2028 #define OPERAND_SRCB 0x21
2029 #define OPERAND_ACCU 0x31
2030 #define OPERAND_ZF 0x32
2031 #define OPERAND_CF 0x33
2033 #define CS_GPR(n) (0x2600 + (n) * 8)
2036 emit_load_alu_reg_u64(struct anv_batch
*batch
, uint32_t reg
,
2037 struct anv_bo
*bo
, uint32_t offset
)
2039 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
2040 lrm
.RegisterAddress
= reg
,
2041 lrm
.MemoryAddress
= (struct anv_address
) { bo
, offset
};
2043 anv_batch_emit(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
2044 lrm
.RegisterAddress
= reg
+ 4;
2045 lrm
.MemoryAddress
= (struct anv_address
) { bo
, offset
+ 4 };
2050 store_query_result(struct anv_batch
*batch
, uint32_t reg
,
2051 struct anv_bo
*bo
, uint32_t offset
, VkQueryResultFlags flags
)
2053 anv_batch_emit(batch
, GENX(MI_STORE_REGISTER_MEM
), srm
) {
2054 srm
.RegisterAddress
= reg
;
2055 srm
.MemoryAddress
= (struct anv_address
) { bo
, offset
};
2058 if (flags
& VK_QUERY_RESULT_64_BIT
) {
2059 anv_batch_emit(batch
, GENX(MI_STORE_REGISTER_MEM
), srm
) {
2060 srm
.RegisterAddress
= reg
+ 4;
2061 srm
.MemoryAddress
= (struct anv_address
) { bo
, offset
+ 4 };
2066 void genX(CmdCopyQueryPoolResults
)(
2067 VkCommandBuffer commandBuffer
,
2068 VkQueryPool queryPool
,
2069 uint32_t firstQuery
,
2070 uint32_t queryCount
,
2071 VkBuffer destBuffer
,
2072 VkDeviceSize destOffset
,
2073 VkDeviceSize destStride
,
2074 VkQueryResultFlags flags
)
2076 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
2077 ANV_FROM_HANDLE(anv_query_pool
, pool
, queryPool
);
2078 ANV_FROM_HANDLE(anv_buffer
, buffer
, destBuffer
);
2079 uint32_t slot_offset
, dst_offset
;
2081 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
2082 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
), pc
) {
2083 pc
.CommandStreamerStallEnable
= true;
2084 pc
.StallAtPixelScoreboard
= true;
2088 dst_offset
= buffer
->offset
+ destOffset
;
2089 for (uint32_t i
= 0; i
< queryCount
; i
++) {
2091 slot_offset
= (firstQuery
+ i
) * sizeof(struct anv_query_pool_slot
);
2092 switch (pool
->type
) {
2093 case VK_QUERY_TYPE_OCCLUSION
:
2094 emit_load_alu_reg_u64(&cmd_buffer
->batch
,
2095 CS_GPR(0), &pool
->bo
, slot_offset
);
2096 emit_load_alu_reg_u64(&cmd_buffer
->batch
,
2097 CS_GPR(1), &pool
->bo
, slot_offset
+ 8);
2099 /* FIXME: We need to clamp the result for 32 bit. */
2101 uint32_t *dw
= anv_batch_emitn(&cmd_buffer
->batch
, 5, GENX(MI_MATH
));
2102 dw
[1] = alu(OPCODE_LOAD
, OPERAND_SRCA
, OPERAND_R1
);
2103 dw
[2] = alu(OPCODE_LOAD
, OPERAND_SRCB
, OPERAND_R0
);
2104 dw
[3] = alu(OPCODE_SUB
, 0, 0);
2105 dw
[4] = alu(OPCODE_STORE
, OPERAND_R2
, OPERAND_ACCU
);
2108 case VK_QUERY_TYPE_TIMESTAMP
:
2109 emit_load_alu_reg_u64(&cmd_buffer
->batch
,
2110 CS_GPR(2), &pool
->bo
, slot_offset
);
2114 unreachable("unhandled query type");
2117 store_query_result(&cmd_buffer
->batch
,
2118 CS_GPR(2), buffer
->bo
, dst_offset
, flags
);
2120 if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
2121 emit_load_alu_reg_u64(&cmd_buffer
->batch
, CS_GPR(0),
2122 &pool
->bo
, slot_offset
+ 16);
2123 if (flags
& VK_QUERY_RESULT_64_BIT
)
2124 store_query_result(&cmd_buffer
->batch
,
2125 CS_GPR(0), buffer
->bo
, dst_offset
+ 8, flags
);
2127 store_query_result(&cmd_buffer
->batch
,
2128 CS_GPR(0), buffer
->bo
, dst_offset
+ 4, flags
);
2131 dst_offset
+= destStride
;
2136 void genX(CmdCopyQueryPoolResults
)(
2137 VkCommandBuffer commandBuffer
,
2138 VkQueryPool queryPool
,
2139 uint32_t firstQuery
,
2140 uint32_t queryCount
,
2141 VkBuffer destBuffer
,
2142 VkDeviceSize destOffset
,
2143 VkDeviceSize destStride
,
2144 VkQueryResultFlags flags
)
2146 anv_finishme("Queries not yet supported on Ivy Bridge");