anv/gen7: Add stall and flushes before switching pipelines
[mesa.git] / src / intel / vulkan / genX_cmd_buffer.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26
27 #include "anv_private.h"
28
29 #include "genxml/gen_macros.h"
30 #include "genxml/genX_pack.h"
31
32 void
33 genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
34 {
35 struct anv_device *device = cmd_buffer->device;
36 struct anv_bo *scratch_bo = NULL;
37
38 cmd_buffer->state.scratch_size =
39 anv_block_pool_size(&device->scratch_block_pool);
40 if (cmd_buffer->state.scratch_size > 0)
41 scratch_bo = &device->scratch_block_pool.bo;
42
43 /* XXX: Do we need this on more than just BDW? */
44 #if (GEN_GEN >= 8)
45 /* Emit a render target cache flush.
46 *
47 * This isn't documented anywhere in the PRM. However, it seems to be
48 * necessary prior to changing the surface state base adress. Without
49 * this, we get GPU hangs when using multi-level command buffers which
50 * clear depth, reset state base address, and then go render stuff.
51 */
52 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
53 .RenderTargetCacheFlushEnable = true);
54 #endif
55
56 anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS),
57 .GeneralStateBaseAddress = { scratch_bo, 0 },
58 .GeneralStateMemoryObjectControlState = GENX(MOCS),
59 .GeneralStateBaseAddressModifyEnable = true,
60
61 .SurfaceStateBaseAddress = anv_cmd_buffer_surface_base_address(cmd_buffer),
62 .SurfaceStateMemoryObjectControlState = GENX(MOCS),
63 .SurfaceStateBaseAddressModifyEnable = true,
64
65 .DynamicStateBaseAddress = { &device->dynamic_state_block_pool.bo, 0 },
66 .DynamicStateMemoryObjectControlState = GENX(MOCS),
67 .DynamicStateBaseAddressModifyEnable = true,
68
69 .IndirectObjectBaseAddress = { NULL, 0 },
70 .IndirectObjectMemoryObjectControlState = GENX(MOCS),
71 .IndirectObjectBaseAddressModifyEnable = true,
72
73 .InstructionBaseAddress = { &device->instruction_block_pool.bo, 0 },
74 .InstructionMemoryObjectControlState = GENX(MOCS),
75 .InstructionBaseAddressModifyEnable = true,
76
77 # if (GEN_GEN >= 8)
78 /* Broadwell requires that we specify a buffer size for a bunch of
79 * these fields. However, since we will be growing the BO's live, we
80 * just set them all to the maximum.
81 */
82 .GeneralStateBufferSize = 0xfffff,
83 .GeneralStateBufferSizeModifyEnable = true,
84 .DynamicStateBufferSize = 0xfffff,
85 .DynamicStateBufferSizeModifyEnable = true,
86 .IndirectObjectBufferSize = 0xfffff,
87 .IndirectObjectBufferSizeModifyEnable = true,
88 .InstructionBufferSize = 0xfffff,
89 .InstructionBuffersizeModifyEnable = true,
90 # endif
91 );
92
93 /* After re-setting the surface state base address, we have to do some
94 * cache flusing so that the sampler engine will pick up the new
95 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
96 * Shared Function > 3D Sampler > State > State Caching (page 96):
97 *
98 * Coherency with system memory in the state cache, like the texture
99 * cache is handled partially by software. It is expected that the
100 * command stream or shader will issue Cache Flush operation or
101 * Cache_Flush sampler message to ensure that the L1 cache remains
102 * coherent with system memory.
103 *
104 * [...]
105 *
106 * Whenever the value of the Dynamic_State_Base_Addr,
107 * Surface_State_Base_Addr are altered, the L1 state cache must be
108 * invalidated to ensure the new surface or sampler state is fetched
109 * from system memory.
110 *
111 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
112 * which, according the PIPE_CONTROL instruction documentation in the
113 * Broadwell PRM:
114 *
115 * Setting this bit is independent of any other bit in this packet.
116 * This bit controls the invalidation of the L1 and L2 state caches
117 * at the top of the pipe i.e. at the parsing time.
118 *
119 * Unfortunately, experimentation seems to indicate that state cache
120 * invalidation through a PIPE_CONTROL does nothing whatsoever in
121 * regards to surface state and binding tables. In stead, it seems that
122 * invalidating the texture cache is what is actually needed.
123 *
124 * XXX: As far as we have been able to determine through
125 * experimentation, shows that flush the texture cache appears to be
126 * sufficient. The theory here is that all of the sampling/rendering
127 * units cache the binding table in the texture cache. However, we have
128 * yet to be able to actually confirm this.
129 */
130 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
131 .TextureCacheInvalidationEnable = true);
132 }
133
134 void genX(CmdPipelineBarrier)(
135 VkCommandBuffer commandBuffer,
136 VkPipelineStageFlags srcStageMask,
137 VkPipelineStageFlags destStageMask,
138 VkBool32 byRegion,
139 uint32_t memoryBarrierCount,
140 const VkMemoryBarrier* pMemoryBarriers,
141 uint32_t bufferMemoryBarrierCount,
142 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
143 uint32_t imageMemoryBarrierCount,
144 const VkImageMemoryBarrier* pImageMemoryBarriers)
145 {
146 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
147 uint32_t b, *dw;
148
149 /* XXX: Right now, we're really dumb and just flush whatever categories
150 * the app asks for. One of these days we may make this a bit better
151 * but right now that's all the hardware allows for in most areas.
152 */
153 VkAccessFlags src_flags = 0;
154 VkAccessFlags dst_flags = 0;
155
156 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
157 src_flags |= pMemoryBarriers[i].srcAccessMask;
158 dst_flags |= pMemoryBarriers[i].dstAccessMask;
159 }
160
161 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
162 src_flags |= pBufferMemoryBarriers[i].srcAccessMask;
163 dst_flags |= pBufferMemoryBarriers[i].dstAccessMask;
164 }
165
166 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
167 src_flags |= pImageMemoryBarriers[i].srcAccessMask;
168 dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
169 }
170
171 /* Mask out the Source access flags we care about */
172 const uint32_t src_mask =
173 VK_ACCESS_SHADER_WRITE_BIT |
174 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
175 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
176 VK_ACCESS_TRANSFER_WRITE_BIT;
177
178 src_flags = src_flags & src_mask;
179
180 /* Mask out the destination access flags we care about */
181 const uint32_t dst_mask =
182 VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
183 VK_ACCESS_INDEX_READ_BIT |
184 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
185 VK_ACCESS_UNIFORM_READ_BIT |
186 VK_ACCESS_SHADER_READ_BIT |
187 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
188 VK_ACCESS_TRANSFER_READ_BIT;
189
190 dst_flags = dst_flags & dst_mask;
191
192 /* The src flags represent how things were used previously. This is
193 * what we use for doing flushes.
194 */
195 struct GENX(PIPE_CONTROL) flush_cmd = {
196 GENX(PIPE_CONTROL_header),
197 .PostSyncOperation = NoWrite,
198 };
199
200 for_each_bit(b, src_flags) {
201 switch ((VkAccessFlagBits)(1 << b)) {
202 case VK_ACCESS_SHADER_WRITE_BIT:
203 flush_cmd.DCFlushEnable = true;
204 break;
205 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
206 flush_cmd.RenderTargetCacheFlushEnable = true;
207 break;
208 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
209 flush_cmd.DepthCacheFlushEnable = true;
210 break;
211 case VK_ACCESS_TRANSFER_WRITE_BIT:
212 flush_cmd.RenderTargetCacheFlushEnable = true;
213 flush_cmd.DepthCacheFlushEnable = true;
214 break;
215 default:
216 unreachable("should've masked this out by now");
217 }
218 }
219
220 /* If we end up doing two PIPE_CONTROLs, the first, flusing one also has to
221 * stall and wait for the flushing to finish, so we don't re-dirty the
222 * caches with in-flight rendering after the second PIPE_CONTROL
223 * invalidates.
224 */
225
226 if (dst_flags)
227 flush_cmd.CommandStreamerStallEnable = true;
228
229 if (src_flags && dst_flags) {
230 dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
231 GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &flush_cmd);
232 }
233
234 /* The dst flags represent how things will be used in the future. This
235 * is what we use for doing cache invalidations.
236 */
237 struct GENX(PIPE_CONTROL) invalidate_cmd = {
238 GENX(PIPE_CONTROL_header),
239 .PostSyncOperation = NoWrite,
240 };
241
242 for_each_bit(b, dst_flags) {
243 switch ((VkAccessFlagBits)(1 << b)) {
244 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
245 case VK_ACCESS_INDEX_READ_BIT:
246 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
247 invalidate_cmd.VFCacheInvalidationEnable = true;
248 break;
249 case VK_ACCESS_UNIFORM_READ_BIT:
250 invalidate_cmd.ConstantCacheInvalidationEnable = true;
251 /* fallthrough */
252 case VK_ACCESS_SHADER_READ_BIT:
253 invalidate_cmd.TextureCacheInvalidationEnable = true;
254 break;
255 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
256 invalidate_cmd.TextureCacheInvalidationEnable = true;
257 break;
258 case VK_ACCESS_TRANSFER_READ_BIT:
259 invalidate_cmd.TextureCacheInvalidationEnable = true;
260 break;
261 default:
262 unreachable("should've masked this out by now");
263 }
264 }
265
266 if (dst_flags) {
267 dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
268 GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &invalidate_cmd);
269 }
270 }
271
272 static uint32_t
273 cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
274 {
275 static const uint32_t push_constant_opcodes[] = {
276 [MESA_SHADER_VERTEX] = 21,
277 [MESA_SHADER_TESS_CTRL] = 25, /* HS */
278 [MESA_SHADER_TESS_EVAL] = 26, /* DS */
279 [MESA_SHADER_GEOMETRY] = 22,
280 [MESA_SHADER_FRAGMENT] = 23,
281 [MESA_SHADER_COMPUTE] = 0,
282 };
283
284 VkShaderStageFlags flushed = 0;
285
286 anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) {
287 if (stage == MESA_SHADER_COMPUTE)
288 continue;
289
290 struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
291
292 if (state.offset == 0) {
293 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS),
294 ._3DCommandSubOpcode = push_constant_opcodes[stage]);
295 } else {
296 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS),
297 ._3DCommandSubOpcode = push_constant_opcodes[stage],
298 .ConstantBody = {
299 #if GEN_GEN >= 9
300 .PointerToConstantBuffer2 = { &cmd_buffer->device->dynamic_state_block_pool.bo, state.offset },
301 .ConstantBuffer2ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
302 #else
303 .PointerToConstantBuffer0 = { .offset = state.offset },
304 .ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
305 #endif
306 });
307 }
308
309 flushed |= mesa_to_vk_shader_stage(stage);
310 }
311
312 cmd_buffer->state.push_constants_dirty &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
313
314 return flushed;
315 }
316
317 void
318 genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
319 {
320 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
321 uint32_t *p;
322
323 uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
324
325 assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
326
327 #if GEN_GEN >= 8
328 /* FIXME (jason): Currently, the config_l3 function causes problems on
329 * Haswell and prior if you have a kernel older than 4.4. In order to
330 * work, it requires a couple of registers be white-listed in the
331 * command parser and they weren't added until 4.4. What we should do
332 * is check the command parser version and make it a no-op if your
333 * command parser is either off or too old. Compute won't work 100%,
334 * but at least 3-D will. In the mean time, I'm going to make this
335 * gen8+ only so that we can get Haswell working again.
336 */
337 genX(cmd_buffer_config_l3)(cmd_buffer, false);
338 #endif
339
340 genX(flush_pipeline_select_3d)(cmd_buffer);
341
342 if (vb_emit) {
343 const uint32_t num_buffers = __builtin_popcount(vb_emit);
344 const uint32_t num_dwords = 1 + num_buffers * 4;
345
346 p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
347 GENX(3DSTATE_VERTEX_BUFFERS));
348 uint32_t vb, i = 0;
349 for_each_bit(vb, vb_emit) {
350 struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
351 uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
352
353 struct GENX(VERTEX_BUFFER_STATE) state = {
354 .VertexBufferIndex = vb,
355
356 #if GEN_GEN >= 8
357 .MemoryObjectControlState = GENX(MOCS),
358 #else
359 .BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA,
360 .InstanceDataStepRate = 1,
361 .VertexBufferMemoryObjectControlState = GENX(MOCS),
362 #endif
363
364 .AddressModifyEnable = true,
365 .BufferPitch = pipeline->binding_stride[vb],
366 .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
367
368 #if GEN_GEN >= 8
369 .BufferSize = buffer->size - offset
370 #else
371 .EndAddress = { buffer->bo, buffer->offset + buffer->size - 1},
372 #endif
373 };
374
375 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
376 i++;
377 }
378 }
379
380 cmd_buffer->state.vb_dirty &= ~vb_emit;
381
382 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
383 /* If somebody compiled a pipeline after starting a command buffer the
384 * scratch bo may have grown since we started this cmd buffer (and
385 * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now,
386 * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
387 if (cmd_buffer->state.scratch_size < pipeline->total_scratch)
388 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
389
390 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
391
392 /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
393 *
394 * "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
395 * the next 3DPRIMITIVE command after programming the
396 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
397 *
398 * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
399 * pipeline setup, we need to dirty push constants.
400 */
401 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
402 }
403
404 #if GEN_GEN <= 7
405 if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT ||
406 cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) {
407 /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
408 *
409 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
410 * stall needs to be sent just prior to any 3DSTATE_VS,
411 * 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
412 * 3DSTATE_BINDING_TABLE_POINTER_VS,
413 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one
414 * PIPE_CONTROL needs to be sent before any combination of VS
415 * associated 3DSTATE."
416 */
417 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
418 .DepthStallEnable = true,
419 .PostSyncOperation = WriteImmediateData,
420 .Address = { &cmd_buffer->device->workaround_bo, 0 });
421 }
422 #endif
423
424 /* We emit the binding tables and sampler tables first, then emit push
425 * constants and then finally emit binding table and sampler table
426 * pointers. It has to happen in this order, since emitting the binding
427 * tables may change the push constants (in case of storage images). After
428 * emitting push constants, on SKL+ we have to emit the corresponding
429 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
430 */
431 uint32_t dirty = 0;
432 if (cmd_buffer->state.descriptors_dirty)
433 dirty = gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer);
434
435 if (cmd_buffer->state.push_constants_dirty) {
436 #if GEN_GEN >= 9
437 /* On Sky Lake and later, the binding table pointers commands are
438 * what actually flush the changes to push constant state so we need
439 * to dirty them so they get re-emitted below.
440 */
441 dirty |= cmd_buffer_flush_push_constants(cmd_buffer);
442 #else
443 cmd_buffer_flush_push_constants(cmd_buffer);
444 #endif
445 }
446
447 if (dirty)
448 gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
449
450 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
451 gen8_cmd_buffer_emit_viewport(cmd_buffer);
452
453 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
454 gen7_cmd_buffer_emit_scissor(cmd_buffer);
455
456 genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
457 }
458
459 static void
460 emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
461 struct anv_bo *bo, uint32_t offset)
462 {
463 uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
464 GENX(3DSTATE_VERTEX_BUFFERS));
465
466 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
467 &(struct GENX(VERTEX_BUFFER_STATE)) {
468 .VertexBufferIndex = 32, /* Reserved for this */
469 .AddressModifyEnable = true,
470 .BufferPitch = 0,
471 #if (GEN_GEN >= 8)
472 .MemoryObjectControlState = GENX(MOCS),
473 .BufferStartingAddress = { bo, offset },
474 .BufferSize = 8
475 #else
476 .VertexBufferMemoryObjectControlState = GENX(MOCS),
477 .BufferStartingAddress = { bo, offset },
478 .EndAddress = { bo, offset + 8 },
479 #endif
480 });
481 }
482
483 static void
484 emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
485 uint32_t base_vertex, uint32_t base_instance)
486 {
487 struct anv_state id_state =
488 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 8, 4);
489
490 ((uint32_t *)id_state.map)[0] = base_vertex;
491 ((uint32_t *)id_state.map)[1] = base_instance;
492
493 if (!cmd_buffer->device->info.has_llc)
494 anv_state_clflush(id_state);
495
496 emit_base_vertex_instance_bo(cmd_buffer,
497 &cmd_buffer->device->dynamic_state_block_pool.bo, id_state.offset);
498 }
499
500 void genX(CmdDraw)(
501 VkCommandBuffer commandBuffer,
502 uint32_t vertexCount,
503 uint32_t instanceCount,
504 uint32_t firstVertex,
505 uint32_t firstInstance)
506 {
507 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
508 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
509 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
510
511 genX(cmd_buffer_flush_state)(cmd_buffer);
512
513 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
514 emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
515
516 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
517 .VertexAccessType = SEQUENTIAL,
518 .PrimitiveTopologyType = pipeline->topology,
519 .VertexCountPerInstance = vertexCount,
520 .StartVertexLocation = firstVertex,
521 .InstanceCount = instanceCount,
522 .StartInstanceLocation = firstInstance,
523 .BaseVertexLocation = 0);
524 }
525
526 void genX(CmdDrawIndexed)(
527 VkCommandBuffer commandBuffer,
528 uint32_t indexCount,
529 uint32_t instanceCount,
530 uint32_t firstIndex,
531 int32_t vertexOffset,
532 uint32_t firstInstance)
533 {
534 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
535 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
536 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
537
538 genX(cmd_buffer_flush_state)(cmd_buffer);
539
540 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
541 emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
542
543 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
544 .VertexAccessType = RANDOM,
545 .PrimitiveTopologyType = pipeline->topology,
546 .VertexCountPerInstance = indexCount,
547 .StartVertexLocation = firstIndex,
548 .InstanceCount = instanceCount,
549 .StartInstanceLocation = firstInstance,
550 .BaseVertexLocation = vertexOffset);
551 }
552
553 /* Auto-Draw / Indirect Registers */
554 #define GEN7_3DPRIM_END_OFFSET 0x2420
555 #define GEN7_3DPRIM_START_VERTEX 0x2430
556 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434
557 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
558 #define GEN7_3DPRIM_START_INSTANCE 0x243C
559 #define GEN7_3DPRIM_BASE_VERTEX 0x2440
560
561 static void
562 emit_lrm(struct anv_batch *batch,
563 uint32_t reg, struct anv_bo *bo, uint32_t offset)
564 {
565 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
566 .RegisterAddress = reg,
567 .MemoryAddress = { bo, offset });
568 }
569
570 static void
571 emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
572 {
573 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM),
574 .RegisterOffset = reg,
575 .DataDWord = imm);
576 }
577
578 void genX(CmdDrawIndirect)(
579 VkCommandBuffer commandBuffer,
580 VkBuffer _buffer,
581 VkDeviceSize offset,
582 uint32_t drawCount,
583 uint32_t stride)
584 {
585 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
586 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
587 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
588 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
589 struct anv_bo *bo = buffer->bo;
590 uint32_t bo_offset = buffer->offset + offset;
591
592 genX(cmd_buffer_flush_state)(cmd_buffer);
593
594 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
595 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
596
597 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
598 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
599 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
600 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
601 emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
602
603 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
604 .IndirectParameterEnable = true,
605 .VertexAccessType = SEQUENTIAL,
606 .PrimitiveTopologyType = pipeline->topology);
607 }
608
609 void genX(CmdDrawIndexedIndirect)(
610 VkCommandBuffer commandBuffer,
611 VkBuffer _buffer,
612 VkDeviceSize offset,
613 uint32_t drawCount,
614 uint32_t stride)
615 {
616 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
617 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
618 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
619 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
620 struct anv_bo *bo = buffer->bo;
621 uint32_t bo_offset = buffer->offset + offset;
622
623 genX(cmd_buffer_flush_state)(cmd_buffer);
624
625 /* TODO: We need to stomp base vertex to 0 somehow */
626 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
627 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
628
629 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
630 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
631 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
632 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
633 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
634
635 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
636 .IndirectParameterEnable = true,
637 .VertexAccessType = RANDOM,
638 .PrimitiveTopologyType = pipeline->topology);
639 }
640
641
642 void genX(CmdDispatch)(
643 VkCommandBuffer commandBuffer,
644 uint32_t x,
645 uint32_t y,
646 uint32_t z)
647 {
648 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
649 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
650 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
651
652 if (prog_data->uses_num_work_groups) {
653 struct anv_state state =
654 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 12, 4);
655 uint32_t *sizes = state.map;
656 sizes[0] = x;
657 sizes[1] = y;
658 sizes[2] = z;
659 if (!cmd_buffer->device->info.has_llc)
660 anv_state_clflush(state);
661 cmd_buffer->state.num_workgroups_offset = state.offset;
662 cmd_buffer->state.num_workgroups_bo =
663 &cmd_buffer->device->dynamic_state_block_pool.bo;
664 }
665
666 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
667
668 anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER),
669 .SIMDSize = prog_data->simd_size / 16,
670 .ThreadDepthCounterMaximum = 0,
671 .ThreadHeightCounterMaximum = 0,
672 .ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1,
673 .ThreadGroupIDXDimension = x,
674 .ThreadGroupIDYDimension = y,
675 .ThreadGroupIDZDimension = z,
676 .RightExecutionMask = pipeline->cs_right_mask,
677 .BottomExecutionMask = 0xffffffff);
678
679 anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH));
680 }
681
682 #define GPGPU_DISPATCHDIMX 0x2500
683 #define GPGPU_DISPATCHDIMY 0x2504
684 #define GPGPU_DISPATCHDIMZ 0x2508
685
686 #define MI_PREDICATE_SRC0 0x2400
687 #define MI_PREDICATE_SRC1 0x2408
688
689 void genX(CmdDispatchIndirect)(
690 VkCommandBuffer commandBuffer,
691 VkBuffer _buffer,
692 VkDeviceSize offset)
693 {
694 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
695 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
696 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
697 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
698 struct anv_bo *bo = buffer->bo;
699 uint32_t bo_offset = buffer->offset + offset;
700 struct anv_batch *batch = &cmd_buffer->batch;
701
702 if (prog_data->uses_num_work_groups) {
703 cmd_buffer->state.num_workgroups_offset = bo_offset;
704 cmd_buffer->state.num_workgroups_bo = bo;
705 }
706
707 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
708
709 emit_lrm(batch, GPGPU_DISPATCHDIMX, bo, bo_offset);
710 emit_lrm(batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4);
711 emit_lrm(batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8);
712
713 #if GEN_GEN <= 7
714 /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
715 emit_lri(batch, MI_PREDICATE_SRC0 + 4, 0);
716 emit_lri(batch, MI_PREDICATE_SRC1 + 0, 0);
717 emit_lri(batch, MI_PREDICATE_SRC1 + 4, 0);
718
719 /* Load compute_dispatch_indirect_x_size into SRC0 */
720 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0);
721
722 /* predicate = (compute_dispatch_indirect_x_size == 0); */
723 anv_batch_emit(batch, GENX(MI_PREDICATE),
724 .LoadOperation = LOAD_LOAD,
725 .CombineOperation = COMBINE_SET,
726 .CompareOperation = COMPARE_SRCS_EQUAL);
727
728 /* Load compute_dispatch_indirect_y_size into SRC0 */
729 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4);
730
731 /* predicate |= (compute_dispatch_indirect_y_size == 0); */
732 anv_batch_emit(batch, GENX(MI_PREDICATE),
733 .LoadOperation = LOAD_LOAD,
734 .CombineOperation = COMBINE_OR,
735 .CompareOperation = COMPARE_SRCS_EQUAL);
736
737 /* Load compute_dispatch_indirect_z_size into SRC0 */
738 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8);
739
740 /* predicate |= (compute_dispatch_indirect_z_size == 0); */
741 anv_batch_emit(batch, GENX(MI_PREDICATE),
742 .LoadOperation = LOAD_LOAD,
743 .CombineOperation = COMBINE_OR,
744 .CompareOperation = COMPARE_SRCS_EQUAL);
745
746 /* predicate = !predicate; */
747 #define COMPARE_FALSE 1
748 anv_batch_emit(batch, GENX(MI_PREDICATE),
749 .LoadOperation = LOAD_LOADINV,
750 .CombineOperation = COMBINE_OR,
751 .CompareOperation = COMPARE_FALSE);
752 #endif
753
754 anv_batch_emit(batch, GENX(GPGPU_WALKER),
755 .IndirectParameterEnable = true,
756 .PredicateEnable = GEN_GEN <= 7,
757 .SIMDSize = prog_data->simd_size / 16,
758 .ThreadDepthCounterMaximum = 0,
759 .ThreadHeightCounterMaximum = 0,
760 .ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1,
761 .RightExecutionMask = pipeline->cs_right_mask,
762 .BottomExecutionMask = 0xffffffff);
763
764 anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH));
765 }
766
767 static void
768 flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer,
769 uint32_t pipeline)
770 {
771 #if GEN_GEN >= 8 && GEN_GEN < 10
772 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
773 *
774 * Software must clear the COLOR_CALC_STATE Valid field in
775 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
776 * with Pipeline Select set to GPGPU.
777 *
778 * The internal hardware docs recommend the same workaround for Gen9
779 * hardware too.
780 */
781 if (pipeline == GPGPU)
782 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS));
783 #elif GEN_GEN <= 7
784 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
785 * PIPELINE_SELECT [DevBWR+]":
786 *
787 * Project: DEVSNB+
788 *
789 * Software must ensure all the write caches are flushed through a
790 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
791 * command to invalidate read only caches prior to programming
792 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
793 */
794 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
795 .RenderTargetCacheFlushEnable = true,
796 .DepthCacheFlushEnable = true,
797 .DCFlushEnable = true,
798 .PostSyncOperation = NoWrite,
799 .CommandStreamerStallEnable = true);
800
801 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
802 .TextureCacheInvalidationEnable = true,
803 .ConstantCacheInvalidationEnable = true,
804 .StateCacheInvalidationEnable = true,
805 .InstructionCacheInvalidateEnable = true,
806 .PostSyncOperation = NoWrite);
807 #endif
808 }
809
810 void
811 genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
812 {
813 if (cmd_buffer->state.current_pipeline != _3D) {
814 flush_pipeline_before_pipeline_select(cmd_buffer, _3D);
815
816 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT),
817 #if GEN_GEN >= 9
818 .MaskBits = 3,
819 #endif
820 .PipelineSelection = _3D);
821 cmd_buffer->state.current_pipeline = _3D;
822 }
823 }
824
825 void
826 genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
827 {
828 if (cmd_buffer->state.current_pipeline != GPGPU) {
829 flush_pipeline_before_pipeline_select(cmd_buffer, GPGPU);
830
831 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT),
832 #if GEN_GEN >= 9
833 .MaskBits = 3,
834 #endif
835 .PipelineSelection = GPGPU);
836 cmd_buffer->state.current_pipeline = GPGPU;
837 }
838 }
839
840 struct anv_state
841 genX(cmd_buffer_alloc_null_surface_state)(struct anv_cmd_buffer *cmd_buffer,
842 struct anv_framebuffer *fb)
843 {
844 struct anv_state state =
845 anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
846
847 struct GENX(RENDER_SURFACE_STATE) null_ss = {
848 .SurfaceType = SURFTYPE_NULL,
849 .SurfaceArray = fb->layers > 0,
850 .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
851 #if GEN_GEN >= 8
852 .TileMode = YMAJOR,
853 #else
854 .TiledSurface = true,
855 #endif
856 .Width = fb->width - 1,
857 .Height = fb->height - 1,
858 .Depth = fb->layers - 1,
859 .RenderTargetViewExtent = fb->layers - 1,
860 };
861
862 GENX(RENDER_SURFACE_STATE_pack)(NULL, state.map, &null_ss);
863
864 if (!cmd_buffer->device->info.has_llc)
865 anv_state_clflush(state);
866
867 return state;
868 }
869
870 static void
871 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
872 {
873 struct anv_device *device = cmd_buffer->device;
874 const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
875 const struct anv_image_view *iview =
876 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
877 const struct anv_image *image = iview ? iview->image : NULL;
878 const struct anv_format *anv_format =
879 iview ? anv_format_for_vk_format(iview->vk_format) : NULL;
880 const bool has_depth = iview && anv_format->has_depth;
881 const bool has_stencil = iview && anv_format->has_stencil;
882
883 /* FIXME: Implement the PMA stall W/A */
884 /* FIXME: Width and Height are wrong */
885
886 /* Emit 3DSTATE_DEPTH_BUFFER */
887 if (has_depth) {
888 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER),
889 .SurfaceType = SURFTYPE_2D,
890 .DepthWriteEnable = true,
891 .StencilWriteEnable = has_stencil,
892 .HierarchicalDepthBufferEnable = false,
893 .SurfaceFormat = isl_surf_get_depth_format(&device->isl_dev,
894 &image->depth_surface.isl),
895 .SurfacePitch = image->depth_surface.isl.row_pitch - 1,
896 .SurfaceBaseAddress = {
897 .bo = image->bo,
898 .offset = image->offset + image->depth_surface.offset,
899 },
900 .Height = fb->height - 1,
901 .Width = fb->width - 1,
902 .LOD = 0,
903 .Depth = 1 - 1,
904 .MinimumArrayElement = 0,
905 .DepthBufferObjectControlState = GENX(MOCS),
906 #if GEN_GEN >= 8
907 .SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2,
908 #endif
909 .RenderTargetViewExtent = 1 - 1);
910 } else {
911 /* Even when no depth buffer is present, the hardware requires that
912 * 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
913 *
914 * If a null depth buffer is bound, the driver must instead bind depth as:
915 * 3DSTATE_DEPTH.SurfaceType = SURFTYPE_2D
916 * 3DSTATE_DEPTH.Width = 1
917 * 3DSTATE_DEPTH.Height = 1
918 * 3DSTATE_DEPTH.SuraceFormat = D16_UNORM
919 * 3DSTATE_DEPTH.SurfaceBaseAddress = 0
920 * 3DSTATE_DEPTH.HierarchicalDepthBufferEnable = 0
921 * 3DSTATE_WM_DEPTH_STENCIL.DepthTestEnable = 0
922 * 3DSTATE_WM_DEPTH_STENCIL.DepthBufferWriteEnable = 0
923 *
924 * The PRM is wrong, though. The width and height must be programmed to
925 * actual framebuffer's width and height, even when neither depth buffer
926 * nor stencil buffer is present. Also, D16_UNORM is not allowed to
927 * be combined with a stencil buffer so we use D32_FLOAT instead.
928 */
929 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER),
930 .SurfaceType = SURFTYPE_2D,
931 .SurfaceFormat = D32_FLOAT,
932 .Width = fb->width - 1,
933 .Height = fb->height - 1,
934 .StencilWriteEnable = has_stencil);
935 }
936
937 /* Emit 3DSTATE_STENCIL_BUFFER */
938 if (has_stencil) {
939 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER),
940 #if GEN_GEN >= 8 || GEN_IS_HASWELL
941 .StencilBufferEnable = true,
942 #endif
943 .StencilBufferObjectControlState = GENX(MOCS),
944
945 /* Stencil buffers have strange pitch. The PRM says:
946 *
947 * The pitch must be set to 2x the value computed based on width,
948 * as the stencil buffer is stored with two rows interleaved.
949 */
950 .SurfacePitch = 2 * image->stencil_surface.isl.row_pitch - 1,
951
952 #if GEN_GEN >= 8
953 .SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2,
954 #endif
955 .SurfaceBaseAddress = {
956 .bo = image->bo,
957 .offset = image->offset + image->stencil_surface.offset,
958 });
959 } else {
960 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER));
961 }
962
963 /* Disable hierarchial depth buffers. */
964 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER));
965
966 /* Clear the clear params. */
967 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS));
968 }
969
970 /**
971 * @see anv_cmd_buffer_set_subpass()
972 */
973 void
974 genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
975 struct anv_subpass *subpass)
976 {
977 cmd_buffer->state.subpass = subpass;
978
979 cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
980
981 cmd_buffer_emit_depth_stencil(cmd_buffer);
982 }
983
984 void genX(CmdBeginRenderPass)(
985 VkCommandBuffer commandBuffer,
986 const VkRenderPassBeginInfo* pRenderPassBegin,
987 VkSubpassContents contents)
988 {
989 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
990 ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
991 ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
992
993 cmd_buffer->state.framebuffer = framebuffer;
994 cmd_buffer->state.pass = pass;
995 anv_cmd_state_setup_attachments(cmd_buffer, pRenderPassBegin);
996
997 genX(flush_pipeline_select_3d)(cmd_buffer);
998
999 const VkRect2D *render_area = &pRenderPassBegin->renderArea;
1000
1001 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DRAWING_RECTANGLE),
1002 .ClippedDrawingRectangleYMin = MAX2(render_area->offset.y, 0),
1003 .ClippedDrawingRectangleXMin = MAX2(render_area->offset.x, 0),
1004 .ClippedDrawingRectangleYMax =
1005 render_area->offset.y + render_area->extent.height - 1,
1006 .ClippedDrawingRectangleXMax =
1007 render_area->offset.x + render_area->extent.width - 1,
1008 .DrawingRectangleOriginY = 0,
1009 .DrawingRectangleOriginX = 0);
1010
1011 genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses);
1012 anv_cmd_buffer_clear_subpass(cmd_buffer);
1013 }
1014
1015 void genX(CmdNextSubpass)(
1016 VkCommandBuffer commandBuffer,
1017 VkSubpassContents contents)
1018 {
1019 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1020
1021 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1022
1023 anv_cmd_buffer_resolve_subpass(cmd_buffer);
1024 genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
1025 anv_cmd_buffer_clear_subpass(cmd_buffer);
1026 }
1027
1028 void genX(CmdEndRenderPass)(
1029 VkCommandBuffer commandBuffer)
1030 {
1031 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1032
1033 anv_cmd_buffer_resolve_subpass(cmd_buffer);
1034 }
1035
1036 static void
1037 emit_ps_depth_count(struct anv_batch *batch,
1038 struct anv_bo *bo, uint32_t offset)
1039 {
1040 anv_batch_emit(batch, GENX(PIPE_CONTROL),
1041 .DestinationAddressType = DAT_PPGTT,
1042 .PostSyncOperation = WritePSDepthCount,
1043 .DepthStallEnable = true,
1044 .Address = { bo, offset });
1045 }
1046
1047 static void
1048 emit_query_availability(struct anv_batch *batch,
1049 struct anv_bo *bo, uint32_t offset)
1050 {
1051 anv_batch_emit(batch, GENX(PIPE_CONTROL),
1052 .DestinationAddressType = DAT_PPGTT,
1053 .PostSyncOperation = WriteImmediateData,
1054 .Address = { bo, offset },
1055 .ImmediateData = 1);
1056 }
1057
1058 void genX(CmdBeginQuery)(
1059 VkCommandBuffer commandBuffer,
1060 VkQueryPool queryPool,
1061 uint32_t query,
1062 VkQueryControlFlags flags)
1063 {
1064 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1065 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1066
1067 /* Workaround: When meta uses the pipeline with the VS disabled, it seems
1068 * that the pipelining of the depth write breaks. What we see is that
1069 * samples from the render pass clear leaks into the first query
1070 * immediately after the clear. Doing a pipecontrol with a post-sync
1071 * operation and DepthStallEnable seems to work around the issue.
1072 */
1073 if (cmd_buffer->state.need_query_wa) {
1074 cmd_buffer->state.need_query_wa = false;
1075 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
1076 .DepthCacheFlushEnable = true,
1077 .DepthStallEnable = true);
1078 }
1079
1080 switch (pool->type) {
1081 case VK_QUERY_TYPE_OCCLUSION:
1082 emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
1083 query * sizeof(struct anv_query_pool_slot));
1084 break;
1085
1086 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1087 default:
1088 unreachable("");
1089 }
1090 }
1091
1092 void genX(CmdEndQuery)(
1093 VkCommandBuffer commandBuffer,
1094 VkQueryPool queryPool,
1095 uint32_t query)
1096 {
1097 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1098 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1099
1100 switch (pool->type) {
1101 case VK_QUERY_TYPE_OCCLUSION:
1102 emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
1103 query * sizeof(struct anv_query_pool_slot) + 8);
1104
1105 emit_query_availability(&cmd_buffer->batch, &pool->bo,
1106 query * sizeof(struct anv_query_pool_slot) + 16);
1107 break;
1108
1109 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1110 default:
1111 unreachable("");
1112 }
1113 }
1114
1115 #define TIMESTAMP 0x2358
1116
1117 void genX(CmdWriteTimestamp)(
1118 VkCommandBuffer commandBuffer,
1119 VkPipelineStageFlagBits pipelineStage,
1120 VkQueryPool queryPool,
1121 uint32_t query)
1122 {
1123 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1124 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1125 uint32_t offset = query * sizeof(struct anv_query_pool_slot);
1126
1127 assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
1128
1129 switch (pipelineStage) {
1130 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
1131 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
1132 .RegisterAddress = TIMESTAMP,
1133 .MemoryAddress = { &pool->bo, offset });
1134 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
1135 .RegisterAddress = TIMESTAMP + 4,
1136 .MemoryAddress = { &pool->bo, offset + 4 });
1137 break;
1138
1139 default:
1140 /* Everything else is bottom-of-pipe */
1141 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
1142 .DestinationAddressType = DAT_PPGTT,
1143 .PostSyncOperation = WriteTimestamp,
1144 .Address = { &pool->bo, offset });
1145 break;
1146 }
1147
1148 emit_query_availability(&cmd_buffer->batch, &pool->bo, query + 16);
1149 }
1150
1151 #if GEN_GEN > 7 || GEN_IS_HASWELL
1152
1153 #define alu_opcode(v) __gen_uint((v), 20, 31)
1154 #define alu_operand1(v) __gen_uint((v), 10, 19)
1155 #define alu_operand2(v) __gen_uint((v), 0, 9)
1156 #define alu(opcode, operand1, operand2) \
1157 alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
1158
1159 #define OPCODE_NOOP 0x000
1160 #define OPCODE_LOAD 0x080
1161 #define OPCODE_LOADINV 0x480
1162 #define OPCODE_LOAD0 0x081
1163 #define OPCODE_LOAD1 0x481
1164 #define OPCODE_ADD 0x100
1165 #define OPCODE_SUB 0x101
1166 #define OPCODE_AND 0x102
1167 #define OPCODE_OR 0x103
1168 #define OPCODE_XOR 0x104
1169 #define OPCODE_STORE 0x180
1170 #define OPCODE_STOREINV 0x580
1171
1172 #define OPERAND_R0 0x00
1173 #define OPERAND_R1 0x01
1174 #define OPERAND_R2 0x02
1175 #define OPERAND_R3 0x03
1176 #define OPERAND_R4 0x04
1177 #define OPERAND_SRCA 0x20
1178 #define OPERAND_SRCB 0x21
1179 #define OPERAND_ACCU 0x31
1180 #define OPERAND_ZF 0x32
1181 #define OPERAND_CF 0x33
1182
1183 #define CS_GPR(n) (0x2600 + (n) * 8)
1184
1185 static void
1186 emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
1187 struct anv_bo *bo, uint32_t offset)
1188 {
1189 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
1190 .RegisterAddress = reg,
1191 .MemoryAddress = { bo, offset });
1192 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
1193 .RegisterAddress = reg + 4,
1194 .MemoryAddress = { bo, offset + 4 });
1195 }
1196
1197 static void
1198 store_query_result(struct anv_batch *batch, uint32_t reg,
1199 struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
1200 {
1201 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM),
1202 .RegisterAddress = reg,
1203 .MemoryAddress = { bo, offset });
1204
1205 if (flags & VK_QUERY_RESULT_64_BIT)
1206 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM),
1207 .RegisterAddress = reg + 4,
1208 .MemoryAddress = { bo, offset + 4 });
1209 }
1210
1211 void genX(CmdCopyQueryPoolResults)(
1212 VkCommandBuffer commandBuffer,
1213 VkQueryPool queryPool,
1214 uint32_t firstQuery,
1215 uint32_t queryCount,
1216 VkBuffer destBuffer,
1217 VkDeviceSize destOffset,
1218 VkDeviceSize destStride,
1219 VkQueryResultFlags flags)
1220 {
1221 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1222 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1223 ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
1224 uint32_t slot_offset, dst_offset;
1225
1226 if (flags & VK_QUERY_RESULT_WAIT_BIT)
1227 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
1228 .CommandStreamerStallEnable = true,
1229 .StallAtPixelScoreboard = true);
1230
1231 dst_offset = buffer->offset + destOffset;
1232 for (uint32_t i = 0; i < queryCount; i++) {
1233
1234 slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
1235 switch (pool->type) {
1236 case VK_QUERY_TYPE_OCCLUSION:
1237 emit_load_alu_reg_u64(&cmd_buffer->batch,
1238 CS_GPR(0), &pool->bo, slot_offset);
1239 emit_load_alu_reg_u64(&cmd_buffer->batch,
1240 CS_GPR(1), &pool->bo, slot_offset + 8);
1241
1242 /* FIXME: We need to clamp the result for 32 bit. */
1243
1244 uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
1245 dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
1246 dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
1247 dw[3] = alu(OPCODE_SUB, 0, 0);
1248 dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
1249 break;
1250
1251 case VK_QUERY_TYPE_TIMESTAMP:
1252 emit_load_alu_reg_u64(&cmd_buffer->batch,
1253 CS_GPR(2), &pool->bo, slot_offset);
1254 break;
1255
1256 default:
1257 unreachable("unhandled query type");
1258 }
1259
1260 store_query_result(&cmd_buffer->batch,
1261 CS_GPR(2), buffer->bo, dst_offset, flags);
1262
1263 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1264 emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
1265 &pool->bo, slot_offset + 16);
1266 if (flags & VK_QUERY_RESULT_64_BIT)
1267 store_query_result(&cmd_buffer->batch,
1268 CS_GPR(0), buffer->bo, dst_offset + 8, flags);
1269 else
1270 store_query_result(&cmd_buffer->batch,
1271 CS_GPR(0), buffer->bo, dst_offset + 4, flags);
1272 }
1273
1274 dst_offset += destStride;
1275 }
1276 }
1277
1278 #endif