2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "anv_private.h"
30 # include "gen8_pack.h"
31 #elif (ANV_IS_HASWELL)
32 # include "gen75_pack.h"
34 # include "gen7_pack.h"
38 genX(cmd_buffer_emit_state_base_address
)(struct anv_cmd_buffer
*cmd_buffer
)
40 struct anv_device
*device
= cmd_buffer
->device
;
41 struct anv_bo
*scratch_bo
= NULL
;
43 cmd_buffer
->state
.scratch_size
=
44 anv_block_pool_size(&device
->scratch_block_pool
);
45 if (cmd_buffer
->state
.scratch_size
> 0)
46 scratch_bo
= &device
->scratch_block_pool
.bo
;
48 /* XXX: Do we need this on more than just BDW? */
50 /* Emit a render target cache flush.
52 * This isn't documented anywhere in the PRM. However, it seems to be
53 * necessary prior to changing the surface state base adress. Without
54 * this, we get GPU hangs when using multi-level command buffers which
55 * clear depth, reset state base address, and then go render stuff.
57 anv_batch_emit(&cmd_buffer
->batch
, GEN8_PIPE_CONTROL
,
58 .RenderTargetCacheFlushEnable
= true);
61 anv_batch_emit(&cmd_buffer
->batch
, GENX(STATE_BASE_ADDRESS
),
62 .GeneralStateBaseAddress
= { scratch_bo
, 0 },
63 .GeneralStateMemoryObjectControlState
= GENX(MOCS
),
64 .GeneralStateBaseAddressModifyEnable
= true,
66 .SurfaceStateBaseAddress
= anv_cmd_buffer_surface_base_address(cmd_buffer
),
67 .SurfaceStateMemoryObjectControlState
= GENX(MOCS
),
68 .SurfaceStateBaseAddressModifyEnable
= true,
70 .DynamicStateBaseAddress
= { &device
->dynamic_state_block_pool
.bo
, 0 },
71 .DynamicStateMemoryObjectControlState
= GENX(MOCS
),
72 .DynamicStateBaseAddressModifyEnable
= true,
74 .IndirectObjectBaseAddress
= { NULL
, 0 },
75 .IndirectObjectMemoryObjectControlState
= GENX(MOCS
),
76 .IndirectObjectBaseAddressModifyEnable
= true,
78 .InstructionBaseAddress
= { &device
->instruction_block_pool
.bo
, 0 },
79 .InstructionMemoryObjectControlState
= GENX(MOCS
),
80 .InstructionBaseAddressModifyEnable
= true,
83 /* Broadwell requires that we specify a buffer size for a bunch of
84 * these fields. However, since we will be growing the BO's live, we
85 * just set them all to the maximum.
87 .GeneralStateBufferSize
= 0xfffff,
88 .GeneralStateBufferSizeModifyEnable
= true,
89 .DynamicStateBufferSize
= 0xfffff,
90 .DynamicStateBufferSizeModifyEnable
= true,
91 .IndirectObjectBufferSize
= 0xfffff,
92 .IndirectObjectBufferSizeModifyEnable
= true,
93 .InstructionBufferSize
= 0xfffff,
94 .InstructionBuffersizeModifyEnable
= true,
98 /* After re-setting the surface state base address, we have to do some
99 * cache flusing so that the sampler engine will pick up the new
100 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
101 * Shared Function > 3D Sampler > State > State Caching (page 96):
103 * Coherency with system memory in the state cache, like the texture
104 * cache is handled partially by software. It is expected that the
105 * command stream or shader will issue Cache Flush operation or
106 * Cache_Flush sampler message to ensure that the L1 cache remains
107 * coherent with system memory.
111 * Whenever the value of the Dynamic_State_Base_Addr,
112 * Surface_State_Base_Addr are altered, the L1 state cache must be
113 * invalidated to ensure the new surface or sampler state is fetched
114 * from system memory.
116 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
117 * which, according the PIPE_CONTROL instruction documentation in the
120 * Setting this bit is independent of any other bit in this packet.
121 * This bit controls the invalidation of the L1 and L2 state caches
122 * at the top of the pipe i.e. at the parsing time.
124 * Unfortunately, experimentation seems to indicate that state cache
125 * invalidation through a PIPE_CONTROL does nothing whatsoever in
126 * regards to surface state and binding tables. In stead, it seems that
127 * invalidating the texture cache is what is actually needed.
129 * XXX: As far as we have been able to determine through
130 * experimentation, shows that flush the texture cache appears to be
131 * sufficient. The theory here is that all of the sampling/rendering
132 * units cache the binding table in the texture cache. However, we have
133 * yet to be able to actually confirm this.
135 anv_batch_emit(&cmd_buffer
->batch
, GENX(PIPE_CONTROL
),
136 .TextureCacheInvalidationEnable
= true);
139 void genX(CmdPipelineBarrier
)(
140 VkCmdBuffer cmdBuffer
,
141 VkPipelineStageFlags srcStageMask
,
142 VkPipelineStageFlags destStageMask
,
144 uint32_t memBarrierCount
,
145 const void* const* ppMemBarriers
)
147 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, cmdBuffer
);
150 struct GENX(PIPE_CONTROL
) cmd
= {
151 GENX(PIPE_CONTROL_header
),
152 .PostSyncOperation
= NoWrite
,
155 /* XXX: I think waitEvent is a no-op on our HW. We should verify that. */
157 if (anv_clear_mask(&srcStageMask
, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
)) {
158 /* This is just what PIPE_CONTROL does */
161 if (anv_clear_mask(&srcStageMask
,
162 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT
|
163 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT
|
164 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
|
165 VK_PIPELINE_STAGE_TESS_CONTROL_SHADER_BIT
|
166 VK_PIPELINE_STAGE_TESS_EVALUATION_SHADER_BIT
|
167 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT
|
168 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
|
169 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
|
170 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT
|
171 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
)) {
172 cmd
.StallAtPixelScoreboard
= true;
175 if (anv_clear_mask(&srcStageMask
,
176 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT
|
177 VK_PIPELINE_STAGE_TRANSFER_BIT
)) {
178 cmd
.CommandStreamerStallEnable
= true;
181 if (anv_clear_mask(&srcStageMask
, VK_PIPELINE_STAGE_HOST_BIT
)) {
182 anv_finishme("VK_PIPE_EVENT_CPU_SIGNAL_BIT");
185 /* On our hardware, all stages will wait for execution as needed. */
188 /* We checked all known VkPipeEventFlags. */
189 anv_assert(srcStageMask
== 0);
191 /* XXX: Right now, we're really dumb and just flush whatever categories
192 * the app asks for. One of these days we may make this a bit better
193 * but right now that's all the hardware allows for in most areas.
195 VkMemoryOutputFlags out_flags
= 0;
196 VkMemoryInputFlags in_flags
= 0;
198 for (uint32_t i
= 0; i
< memBarrierCount
; i
++) {
199 const struct anv_common
*common
= ppMemBarriers
[i
];
200 switch (common
->sType
) {
201 case VK_STRUCTURE_TYPE_MEMORY_BARRIER
: {
202 ANV_COMMON_TO_STRUCT(VkMemoryBarrier
, barrier
, common
);
203 out_flags
|= barrier
->outputMask
;
204 in_flags
|= barrier
->inputMask
;
207 case VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER
: {
208 ANV_COMMON_TO_STRUCT(VkBufferMemoryBarrier
, barrier
, common
);
209 out_flags
|= barrier
->outputMask
;
210 in_flags
|= barrier
->inputMask
;
213 case VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER
: {
214 ANV_COMMON_TO_STRUCT(VkImageMemoryBarrier
, barrier
, common
);
215 out_flags
|= barrier
->outputMask
;
216 in_flags
|= barrier
->inputMask
;
220 unreachable("Invalid memory barrier type");
224 for_each_bit(b
, out_flags
) {
225 switch ((VkMemoryOutputFlags
)(1 << b
)) {
226 case VK_MEMORY_OUTPUT_HOST_WRITE_BIT
:
227 break; /* FIXME: Little-core systems */
228 case VK_MEMORY_OUTPUT_SHADER_WRITE_BIT
:
229 cmd
.DCFlushEnable
= true;
231 case VK_MEMORY_OUTPUT_COLOR_ATTACHMENT_BIT
:
232 cmd
.RenderTargetCacheFlushEnable
= true;
234 case VK_MEMORY_OUTPUT_DEPTH_STENCIL_ATTACHMENT_BIT
:
235 cmd
.DepthCacheFlushEnable
= true;
237 case VK_MEMORY_OUTPUT_TRANSFER_BIT
:
238 cmd
.RenderTargetCacheFlushEnable
= true;
239 cmd
.DepthCacheFlushEnable
= true;
242 unreachable("Invalid memory output flag");
246 for_each_bit(b
, out_flags
) {
247 switch ((VkMemoryInputFlags
)(1 << b
)) {
248 case VK_MEMORY_INPUT_HOST_READ_BIT
:
249 break; /* FIXME: Little-core systems */
250 case VK_MEMORY_INPUT_INDIRECT_COMMAND_BIT
:
251 case VK_MEMORY_INPUT_INDEX_FETCH_BIT
:
252 case VK_MEMORY_INPUT_VERTEX_ATTRIBUTE_FETCH_BIT
:
253 cmd
.VFCacheInvalidationEnable
= true;
255 case VK_MEMORY_INPUT_UNIFORM_READ_BIT
:
256 cmd
.ConstantCacheInvalidationEnable
= true;
258 case VK_MEMORY_INPUT_SHADER_READ_BIT
:
259 cmd
.DCFlushEnable
= true;
260 cmd
.TextureCacheInvalidationEnable
= true;
262 case VK_MEMORY_INPUT_COLOR_ATTACHMENT_BIT
:
263 case VK_MEMORY_INPUT_DEPTH_STENCIL_ATTACHMENT_BIT
:
264 break; /* XXX: Hunh? */
265 case VK_MEMORY_INPUT_TRANSFER_BIT
:
266 cmd
.TextureCacheInvalidationEnable
= true;
271 dw
= anv_batch_emit_dwords(&cmd_buffer
->batch
, GENX(PIPE_CONTROL_length
));
272 GENX(PIPE_CONTROL_pack
)(&cmd_buffer
->batch
, dw
, &cmd
);