Merge remote-tracking branch 'public/master' into vulkan
[mesa.git] / src / intel / vulkan / genX_cmd_buffer.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26
27 #include "anv_private.h"
28
29 #include "genxml/gen_macros.h"
30 #include "genxml/genX_pack.h"
31
32 void
33 genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
34 {
35 struct anv_device *device = cmd_buffer->device;
36 struct anv_bo *scratch_bo = NULL;
37
38 cmd_buffer->state.scratch_size =
39 anv_block_pool_size(&device->scratch_block_pool);
40 if (cmd_buffer->state.scratch_size > 0)
41 scratch_bo = &device->scratch_block_pool.bo;
42
43 /* XXX: Do we need this on more than just BDW? */
44 #if (GEN_GEN >= 8)
45 /* Emit a render target cache flush.
46 *
47 * This isn't documented anywhere in the PRM. However, it seems to be
48 * necessary prior to changing the surface state base adress. Without
49 * this, we get GPU hangs when using multi-level command buffers which
50 * clear depth, reset state base address, and then go render stuff.
51 */
52 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
53 .RenderTargetCacheFlushEnable = true);
54 #endif
55
56 anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS),
57 .GeneralStateBaseAddress = { scratch_bo, 0 },
58 .GeneralStateMemoryObjectControlState = GENX(MOCS),
59 .GeneralStateBaseAddressModifyEnable = true,
60
61 .SurfaceStateBaseAddress = anv_cmd_buffer_surface_base_address(cmd_buffer),
62 .SurfaceStateMemoryObjectControlState = GENX(MOCS),
63 .SurfaceStateBaseAddressModifyEnable = true,
64
65 .DynamicStateBaseAddress = { &device->dynamic_state_block_pool.bo, 0 },
66 .DynamicStateMemoryObjectControlState = GENX(MOCS),
67 .DynamicStateBaseAddressModifyEnable = true,
68
69 .IndirectObjectBaseAddress = { NULL, 0 },
70 .IndirectObjectMemoryObjectControlState = GENX(MOCS),
71 .IndirectObjectBaseAddressModifyEnable = true,
72
73 .InstructionBaseAddress = { &device->instruction_block_pool.bo, 0 },
74 .InstructionMemoryObjectControlState = GENX(MOCS),
75 .InstructionBaseAddressModifyEnable = true,
76
77 # if (GEN_GEN >= 8)
78 /* Broadwell requires that we specify a buffer size for a bunch of
79 * these fields. However, since we will be growing the BO's live, we
80 * just set them all to the maximum.
81 */
82 .GeneralStateBufferSize = 0xfffff,
83 .GeneralStateBufferSizeModifyEnable = true,
84 .DynamicStateBufferSize = 0xfffff,
85 .DynamicStateBufferSizeModifyEnable = true,
86 .IndirectObjectBufferSize = 0xfffff,
87 .IndirectObjectBufferSizeModifyEnable = true,
88 .InstructionBufferSize = 0xfffff,
89 .InstructionBuffersizeModifyEnable = true,
90 # endif
91 );
92
93 /* After re-setting the surface state base address, we have to do some
94 * cache flusing so that the sampler engine will pick up the new
95 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
96 * Shared Function > 3D Sampler > State > State Caching (page 96):
97 *
98 * Coherency with system memory in the state cache, like the texture
99 * cache is handled partially by software. It is expected that the
100 * command stream or shader will issue Cache Flush operation or
101 * Cache_Flush sampler message to ensure that the L1 cache remains
102 * coherent with system memory.
103 *
104 * [...]
105 *
106 * Whenever the value of the Dynamic_State_Base_Addr,
107 * Surface_State_Base_Addr are altered, the L1 state cache must be
108 * invalidated to ensure the new surface or sampler state is fetched
109 * from system memory.
110 *
111 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
112 * which, according the PIPE_CONTROL instruction documentation in the
113 * Broadwell PRM:
114 *
115 * Setting this bit is independent of any other bit in this packet.
116 * This bit controls the invalidation of the L1 and L2 state caches
117 * at the top of the pipe i.e. at the parsing time.
118 *
119 * Unfortunately, experimentation seems to indicate that state cache
120 * invalidation through a PIPE_CONTROL does nothing whatsoever in
121 * regards to surface state and binding tables. In stead, it seems that
122 * invalidating the texture cache is what is actually needed.
123 *
124 * XXX: As far as we have been able to determine through
125 * experimentation, shows that flush the texture cache appears to be
126 * sufficient. The theory here is that all of the sampling/rendering
127 * units cache the binding table in the texture cache. However, we have
128 * yet to be able to actually confirm this.
129 */
130 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
131 .TextureCacheInvalidationEnable = true);
132 }
133
134 void genX(CmdPipelineBarrier)(
135 VkCommandBuffer commandBuffer,
136 VkPipelineStageFlags srcStageMask,
137 VkPipelineStageFlags destStageMask,
138 VkBool32 byRegion,
139 uint32_t memoryBarrierCount,
140 const VkMemoryBarrier* pMemoryBarriers,
141 uint32_t bufferMemoryBarrierCount,
142 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
143 uint32_t imageMemoryBarrierCount,
144 const VkImageMemoryBarrier* pImageMemoryBarriers)
145 {
146 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
147 uint32_t b, *dw;
148
149 /* XXX: Right now, we're really dumb and just flush whatever categories
150 * the app asks for. One of these days we may make this a bit better
151 * but right now that's all the hardware allows for in most areas.
152 */
153 VkAccessFlags src_flags = 0;
154 VkAccessFlags dst_flags = 0;
155
156 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
157 src_flags |= pMemoryBarriers[i].srcAccessMask;
158 dst_flags |= pMemoryBarriers[i].dstAccessMask;
159 }
160
161 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
162 src_flags |= pBufferMemoryBarriers[i].srcAccessMask;
163 dst_flags |= pBufferMemoryBarriers[i].dstAccessMask;
164 }
165
166 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
167 src_flags |= pImageMemoryBarriers[i].srcAccessMask;
168 dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
169 }
170
171 /* Mask out the Source access flags we care about */
172 const uint32_t src_mask =
173 VK_ACCESS_SHADER_WRITE_BIT |
174 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
175 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
176 VK_ACCESS_TRANSFER_WRITE_BIT;
177
178 src_flags = src_flags & src_mask;
179
180 /* Mask out the destination access flags we care about */
181 const uint32_t dst_mask =
182 VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
183 VK_ACCESS_INDEX_READ_BIT |
184 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
185 VK_ACCESS_UNIFORM_READ_BIT |
186 VK_ACCESS_SHADER_READ_BIT |
187 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
188 VK_ACCESS_TRANSFER_READ_BIT;
189
190 dst_flags = dst_flags & dst_mask;
191
192 /* The src flags represent how things were used previously. This is
193 * what we use for doing flushes.
194 */
195 struct GENX(PIPE_CONTROL) flush_cmd = {
196 GENX(PIPE_CONTROL_header),
197 .PostSyncOperation = NoWrite,
198 };
199
200 for_each_bit(b, src_flags) {
201 switch ((VkAccessFlagBits)(1 << b)) {
202 case VK_ACCESS_SHADER_WRITE_BIT:
203 flush_cmd.DCFlushEnable = true;
204 break;
205 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
206 flush_cmd.RenderTargetCacheFlushEnable = true;
207 break;
208 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
209 flush_cmd.DepthCacheFlushEnable = true;
210 break;
211 case VK_ACCESS_TRANSFER_WRITE_BIT:
212 flush_cmd.RenderTargetCacheFlushEnable = true;
213 flush_cmd.DepthCacheFlushEnable = true;
214 break;
215 default:
216 unreachable("should've masked this out by now");
217 }
218 }
219
220 /* If we end up doing two PIPE_CONTROLs, the first, flusing one also has to
221 * stall and wait for the flushing to finish, so we don't re-dirty the
222 * caches with in-flight rendering after the second PIPE_CONTROL
223 * invalidates.
224 */
225
226 if (dst_flags)
227 flush_cmd.CommandStreamerStallEnable = true;
228
229 if (src_flags && dst_flags) {
230 dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
231 GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &flush_cmd);
232 }
233
234 /* The dst flags represent how things will be used in the future. This
235 * is what we use for doing cache invalidations.
236 */
237 struct GENX(PIPE_CONTROL) invalidate_cmd = {
238 GENX(PIPE_CONTROL_header),
239 .PostSyncOperation = NoWrite,
240 };
241
242 for_each_bit(b, dst_flags) {
243 switch ((VkAccessFlagBits)(1 << b)) {
244 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
245 case VK_ACCESS_INDEX_READ_BIT:
246 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
247 invalidate_cmd.VFCacheInvalidationEnable = true;
248 break;
249 case VK_ACCESS_UNIFORM_READ_BIT:
250 invalidate_cmd.ConstantCacheInvalidationEnable = true;
251 /* fallthrough */
252 case VK_ACCESS_SHADER_READ_BIT:
253 invalidate_cmd.TextureCacheInvalidationEnable = true;
254 break;
255 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
256 invalidate_cmd.TextureCacheInvalidationEnable = true;
257 break;
258 case VK_ACCESS_TRANSFER_READ_BIT:
259 invalidate_cmd.TextureCacheInvalidationEnable = true;
260 break;
261 default:
262 unreachable("should've masked this out by now");
263 }
264 }
265
266 if (dst_flags) {
267 dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
268 GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &invalidate_cmd);
269 }
270 }
271
272 static uint32_t
273 cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
274 {
275 static const uint32_t push_constant_opcodes[] = {
276 [MESA_SHADER_VERTEX] = 21,
277 [MESA_SHADER_TESS_CTRL] = 25, /* HS */
278 [MESA_SHADER_TESS_EVAL] = 26, /* DS */
279 [MESA_SHADER_GEOMETRY] = 22,
280 [MESA_SHADER_FRAGMENT] = 23,
281 [MESA_SHADER_COMPUTE] = 0,
282 };
283
284 VkShaderStageFlags flushed = 0;
285
286 anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) {
287 if (stage == MESA_SHADER_COMPUTE)
288 continue;
289
290 struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
291
292 if (state.offset == 0) {
293 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS),
294 ._3DCommandSubOpcode = push_constant_opcodes[stage]);
295 } else {
296 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS),
297 ._3DCommandSubOpcode = push_constant_opcodes[stage],
298 .ConstantBody = {
299 #if GEN_GEN >= 9
300 .PointerToConstantBuffer2 = { &cmd_buffer->device->dynamic_state_block_pool.bo, state.offset },
301 .ConstantBuffer2ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
302 #else
303 .PointerToConstantBuffer0 = { .offset = state.offset },
304 .ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
305 #endif
306 });
307 }
308
309 flushed |= mesa_to_vk_shader_stage(stage);
310 }
311
312 cmd_buffer->state.push_constants_dirty &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
313
314 return flushed;
315 }
316
317 void
318 genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
319 {
320 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
321 uint32_t *p;
322
323 uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
324
325 assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
326
327 #if GEN_GEN >= 8
328 /* FIXME (jason): Currently, the config_l3 function causes problems on
329 * Haswell and prior if you have a kernel older than 4.4. In order to
330 * work, it requires a couple of registers be white-listed in the
331 * command parser and they weren't added until 4.4. What we should do
332 * is check the command parser version and make it a no-op if your
333 * command parser is either off or too old. Compute won't work 100%,
334 * but at least 3-D will. In the mean time, I'm going to make this
335 * gen8+ only so that we can get Haswell working again.
336 */
337 genX(cmd_buffer_config_l3)(cmd_buffer, false);
338 #endif
339
340 genX(flush_pipeline_select_3d)(cmd_buffer);
341
342 if (vb_emit) {
343 const uint32_t num_buffers = __builtin_popcount(vb_emit);
344 const uint32_t num_dwords = 1 + num_buffers * 4;
345
346 p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
347 GENX(3DSTATE_VERTEX_BUFFERS));
348 uint32_t vb, i = 0;
349 for_each_bit(vb, vb_emit) {
350 struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
351 uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
352
353 struct GENX(VERTEX_BUFFER_STATE) state = {
354 .VertexBufferIndex = vb,
355
356 #if GEN_GEN >= 8
357 .MemoryObjectControlState = GENX(MOCS),
358 #else
359 .BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA,
360 .InstanceDataStepRate = 1,
361 .VertexBufferMemoryObjectControlState = GENX(MOCS),
362 #endif
363
364 .AddressModifyEnable = true,
365 .BufferPitch = pipeline->binding_stride[vb],
366 .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
367
368 #if GEN_GEN >= 8
369 .BufferSize = buffer->size - offset
370 #else
371 .EndAddress = { buffer->bo, buffer->offset + buffer->size - 1},
372 #endif
373 };
374
375 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
376 i++;
377 }
378 }
379
380 cmd_buffer->state.vb_dirty &= ~vb_emit;
381
382 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
383 /* If somebody compiled a pipeline after starting a command buffer the
384 * scratch bo may have grown since we started this cmd buffer (and
385 * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now,
386 * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
387 if (cmd_buffer->state.scratch_size < pipeline->total_scratch)
388 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
389
390 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
391
392 /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
393 *
394 * "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
395 * the next 3DPRIMITIVE command after programming the
396 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
397 *
398 * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
399 * pipeline setup, we need to dirty push constants.
400 */
401 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
402 }
403
404 #if GEN_GEN <= 7
405 if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT ||
406 cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) {
407 /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
408 *
409 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
410 * stall needs to be sent just prior to any 3DSTATE_VS,
411 * 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
412 * 3DSTATE_BINDING_TABLE_POINTER_VS,
413 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one
414 * PIPE_CONTROL needs to be sent before any combination of VS
415 * associated 3DSTATE."
416 */
417 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
418 .DepthStallEnable = true,
419 .PostSyncOperation = WriteImmediateData,
420 .Address = { &cmd_buffer->device->workaround_bo, 0 });
421 }
422 #endif
423
424 /* We emit the binding tables and sampler tables first, then emit push
425 * constants and then finally emit binding table and sampler table
426 * pointers. It has to happen in this order, since emitting the binding
427 * tables may change the push constants (in case of storage images). After
428 * emitting push constants, on SKL+ we have to emit the corresponding
429 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
430 */
431 uint32_t dirty = 0;
432 if (cmd_buffer->state.descriptors_dirty)
433 dirty = gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer);
434
435 if (cmd_buffer->state.push_constants_dirty) {
436 #if GEN_GEN >= 9
437 /* On Sky Lake and later, the binding table pointers commands are
438 * what actually flush the changes to push constant state so we need
439 * to dirty them so they get re-emitted below.
440 */
441 dirty |= cmd_buffer_flush_push_constants(cmd_buffer);
442 #else
443 cmd_buffer_flush_push_constants(cmd_buffer);
444 #endif
445 }
446
447 if (dirty)
448 gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
449
450 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
451 gen8_cmd_buffer_emit_viewport(cmd_buffer);
452
453 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
454 gen7_cmd_buffer_emit_scissor(cmd_buffer);
455
456 genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
457 }
458
459 static void
460 emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
461 struct anv_bo *bo, uint32_t offset)
462 {
463 uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
464 GENX(3DSTATE_VERTEX_BUFFERS));
465
466 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
467 &(struct GENX(VERTEX_BUFFER_STATE)) {
468 .VertexBufferIndex = 32, /* Reserved for this */
469 .AddressModifyEnable = true,
470 .BufferPitch = 0,
471 #if (GEN_GEN >= 8)
472 .MemoryObjectControlState = GENX(MOCS),
473 .BufferStartingAddress = { bo, offset },
474 .BufferSize = 8
475 #else
476 .VertexBufferMemoryObjectControlState = GENX(MOCS),
477 .BufferStartingAddress = { bo, offset },
478 .EndAddress = { bo, offset + 8 },
479 #endif
480 });
481 }
482
483 static void
484 emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
485 uint32_t base_vertex, uint32_t base_instance)
486 {
487 struct anv_state id_state =
488 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 8, 4);
489
490 ((uint32_t *)id_state.map)[0] = base_vertex;
491 ((uint32_t *)id_state.map)[1] = base_instance;
492
493 if (!cmd_buffer->device->info.has_llc)
494 anv_state_clflush(id_state);
495
496 emit_base_vertex_instance_bo(cmd_buffer,
497 &cmd_buffer->device->dynamic_state_block_pool.bo, id_state.offset);
498 }
499
500 void genX(CmdDraw)(
501 VkCommandBuffer commandBuffer,
502 uint32_t vertexCount,
503 uint32_t instanceCount,
504 uint32_t firstVertex,
505 uint32_t firstInstance)
506 {
507 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
508 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
509 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
510
511 genX(cmd_buffer_flush_state)(cmd_buffer);
512
513 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
514 emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
515
516 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
517 .VertexAccessType = SEQUENTIAL,
518 .PrimitiveTopologyType = pipeline->topology,
519 .VertexCountPerInstance = vertexCount,
520 .StartVertexLocation = firstVertex,
521 .InstanceCount = instanceCount,
522 .StartInstanceLocation = firstInstance,
523 .BaseVertexLocation = 0);
524 }
525
526 void genX(CmdDrawIndexed)(
527 VkCommandBuffer commandBuffer,
528 uint32_t indexCount,
529 uint32_t instanceCount,
530 uint32_t firstIndex,
531 int32_t vertexOffset,
532 uint32_t firstInstance)
533 {
534 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
535 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
536 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
537
538 genX(cmd_buffer_flush_state)(cmd_buffer);
539
540 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
541 emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
542
543 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
544 .VertexAccessType = RANDOM,
545 .PrimitiveTopologyType = pipeline->topology,
546 .VertexCountPerInstance = indexCount,
547 .StartVertexLocation = firstIndex,
548 .InstanceCount = instanceCount,
549 .StartInstanceLocation = firstInstance,
550 .BaseVertexLocation = vertexOffset);
551 }
552
553 /* Auto-Draw / Indirect Registers */
554 #define GEN7_3DPRIM_END_OFFSET 0x2420
555 #define GEN7_3DPRIM_START_VERTEX 0x2430
556 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434
557 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
558 #define GEN7_3DPRIM_START_INSTANCE 0x243C
559 #define GEN7_3DPRIM_BASE_VERTEX 0x2440
560
561 static void
562 emit_lrm(struct anv_batch *batch,
563 uint32_t reg, struct anv_bo *bo, uint32_t offset)
564 {
565 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
566 .RegisterAddress = reg,
567 .MemoryAddress = { bo, offset });
568 }
569
570 static void
571 emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
572 {
573 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM),
574 .RegisterOffset = reg,
575 .DataDWord = imm);
576 }
577
578 void genX(CmdDrawIndirect)(
579 VkCommandBuffer commandBuffer,
580 VkBuffer _buffer,
581 VkDeviceSize offset,
582 uint32_t drawCount,
583 uint32_t stride)
584 {
585 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
586 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
587 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
588 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
589 struct anv_bo *bo = buffer->bo;
590 uint32_t bo_offset = buffer->offset + offset;
591
592 genX(cmd_buffer_flush_state)(cmd_buffer);
593
594 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
595 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
596
597 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
598 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
599 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
600 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
601 emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
602
603 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
604 .IndirectParameterEnable = true,
605 .VertexAccessType = SEQUENTIAL,
606 .PrimitiveTopologyType = pipeline->topology);
607 }
608
609 void genX(CmdDrawIndexedIndirect)(
610 VkCommandBuffer commandBuffer,
611 VkBuffer _buffer,
612 VkDeviceSize offset,
613 uint32_t drawCount,
614 uint32_t stride)
615 {
616 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
617 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
618 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
619 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
620 struct anv_bo *bo = buffer->bo;
621 uint32_t bo_offset = buffer->offset + offset;
622
623 genX(cmd_buffer_flush_state)(cmd_buffer);
624
625 /* TODO: We need to stomp base vertex to 0 somehow */
626 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
627 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
628
629 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
630 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
631 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
632 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
633 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
634
635 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
636 .IndirectParameterEnable = true,
637 .VertexAccessType = RANDOM,
638 .PrimitiveTopologyType = pipeline->topology);
639 }
640
641 #if GEN_GEN == 7
642
643 static bool
644 verify_cmd_parser(const struct anv_device *device,
645 int required_version,
646 const char *function)
647 {
648 if (device->instance->physicalDevice.cmd_parser_version < required_version) {
649 vk_errorf(VK_ERROR_FEATURE_NOT_PRESENT,
650 "cmd parser version %d is required for %s",
651 required_version, function);
652 return false;
653 } else {
654 return true;
655 }
656 }
657
658 #endif
659
660 void genX(CmdDispatch)(
661 VkCommandBuffer commandBuffer,
662 uint32_t x,
663 uint32_t y,
664 uint32_t z)
665 {
666 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
667 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
668 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
669
670 if (prog_data->uses_num_work_groups) {
671 struct anv_state state =
672 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 12, 4);
673 uint32_t *sizes = state.map;
674 sizes[0] = x;
675 sizes[1] = y;
676 sizes[2] = z;
677 if (!cmd_buffer->device->info.has_llc)
678 anv_state_clflush(state);
679 cmd_buffer->state.num_workgroups_offset = state.offset;
680 cmd_buffer->state.num_workgroups_bo =
681 &cmd_buffer->device->dynamic_state_block_pool.bo;
682 }
683
684 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
685
686 anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER),
687 .SIMDSize = prog_data->simd_size / 16,
688 .ThreadDepthCounterMaximum = 0,
689 .ThreadHeightCounterMaximum = 0,
690 .ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1,
691 .ThreadGroupIDXDimension = x,
692 .ThreadGroupIDYDimension = y,
693 .ThreadGroupIDZDimension = z,
694 .RightExecutionMask = pipeline->cs_right_mask,
695 .BottomExecutionMask = 0xffffffff);
696
697 anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH));
698 }
699
700 #define GPGPU_DISPATCHDIMX 0x2500
701 #define GPGPU_DISPATCHDIMY 0x2504
702 #define GPGPU_DISPATCHDIMZ 0x2508
703
704 #define MI_PREDICATE_SRC0 0x2400
705 #define MI_PREDICATE_SRC1 0x2408
706
707 void genX(CmdDispatchIndirect)(
708 VkCommandBuffer commandBuffer,
709 VkBuffer _buffer,
710 VkDeviceSize offset)
711 {
712 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
713 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
714 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
715 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
716 struct anv_bo *bo = buffer->bo;
717 uint32_t bo_offset = buffer->offset + offset;
718 struct anv_batch *batch = &cmd_buffer->batch;
719
720 #if GEN_GEN == 7
721 /* Linux 4.4 added command parser version 5 which allows the GPGPU
722 * indirect dispatch registers to be written.
723 */
724 if (!verify_cmd_parser(cmd_buffer->device, 5, "vkCmdDispatchIndirect"))
725 return;
726 #endif
727
728 if (prog_data->uses_num_work_groups) {
729 cmd_buffer->state.num_workgroups_offset = bo_offset;
730 cmd_buffer->state.num_workgroups_bo = bo;
731 }
732
733 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
734
735 emit_lrm(batch, GPGPU_DISPATCHDIMX, bo, bo_offset);
736 emit_lrm(batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4);
737 emit_lrm(batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8);
738
739 #if GEN_GEN <= 7
740 /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
741 emit_lri(batch, MI_PREDICATE_SRC0 + 4, 0);
742 emit_lri(batch, MI_PREDICATE_SRC1 + 0, 0);
743 emit_lri(batch, MI_PREDICATE_SRC1 + 4, 0);
744
745 /* Load compute_dispatch_indirect_x_size into SRC0 */
746 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0);
747
748 /* predicate = (compute_dispatch_indirect_x_size == 0); */
749 anv_batch_emit(batch, GENX(MI_PREDICATE),
750 .LoadOperation = LOAD_LOAD,
751 .CombineOperation = COMBINE_SET,
752 .CompareOperation = COMPARE_SRCS_EQUAL);
753
754 /* Load compute_dispatch_indirect_y_size into SRC0 */
755 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4);
756
757 /* predicate |= (compute_dispatch_indirect_y_size == 0); */
758 anv_batch_emit(batch, GENX(MI_PREDICATE),
759 .LoadOperation = LOAD_LOAD,
760 .CombineOperation = COMBINE_OR,
761 .CompareOperation = COMPARE_SRCS_EQUAL);
762
763 /* Load compute_dispatch_indirect_z_size into SRC0 */
764 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8);
765
766 /* predicate |= (compute_dispatch_indirect_z_size == 0); */
767 anv_batch_emit(batch, GENX(MI_PREDICATE),
768 .LoadOperation = LOAD_LOAD,
769 .CombineOperation = COMBINE_OR,
770 .CompareOperation = COMPARE_SRCS_EQUAL);
771
772 /* predicate = !predicate; */
773 #define COMPARE_FALSE 1
774 anv_batch_emit(batch, GENX(MI_PREDICATE),
775 .LoadOperation = LOAD_LOADINV,
776 .CombineOperation = COMBINE_OR,
777 .CompareOperation = COMPARE_FALSE);
778 #endif
779
780 anv_batch_emit(batch, GENX(GPGPU_WALKER),
781 .IndirectParameterEnable = true,
782 .PredicateEnable = GEN_GEN <= 7,
783 .SIMDSize = prog_data->simd_size / 16,
784 .ThreadDepthCounterMaximum = 0,
785 .ThreadHeightCounterMaximum = 0,
786 .ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1,
787 .RightExecutionMask = pipeline->cs_right_mask,
788 .BottomExecutionMask = 0xffffffff);
789
790 anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH));
791 }
792
793 static void
794 flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer,
795 uint32_t pipeline)
796 {
797 #if GEN_GEN >= 8 && GEN_GEN < 10
798 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
799 *
800 * Software must clear the COLOR_CALC_STATE Valid field in
801 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
802 * with Pipeline Select set to GPGPU.
803 *
804 * The internal hardware docs recommend the same workaround for Gen9
805 * hardware too.
806 */
807 if (pipeline == GPGPU)
808 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS));
809 #elif GEN_GEN <= 7
810 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
811 * PIPELINE_SELECT [DevBWR+]":
812 *
813 * Project: DEVSNB+
814 *
815 * Software must ensure all the write caches are flushed through a
816 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
817 * command to invalidate read only caches prior to programming
818 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
819 */
820 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
821 .RenderTargetCacheFlushEnable = true,
822 .DepthCacheFlushEnable = true,
823 .DCFlushEnable = true,
824 .PostSyncOperation = NoWrite,
825 .CommandStreamerStallEnable = true);
826
827 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
828 .TextureCacheInvalidationEnable = true,
829 .ConstantCacheInvalidationEnable = true,
830 .StateCacheInvalidationEnable = true,
831 .InstructionCacheInvalidateEnable = true,
832 .PostSyncOperation = NoWrite);
833 #endif
834 }
835
836 void
837 genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
838 {
839 if (cmd_buffer->state.current_pipeline != _3D) {
840 flush_pipeline_before_pipeline_select(cmd_buffer, _3D);
841
842 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT),
843 #if GEN_GEN >= 9
844 .MaskBits = 3,
845 #endif
846 .PipelineSelection = _3D);
847 cmd_buffer->state.current_pipeline = _3D;
848 }
849 }
850
851 void
852 genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
853 {
854 if (cmd_buffer->state.current_pipeline != GPGPU) {
855 flush_pipeline_before_pipeline_select(cmd_buffer, GPGPU);
856
857 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT),
858 #if GEN_GEN >= 9
859 .MaskBits = 3,
860 #endif
861 .PipelineSelection = GPGPU);
862 cmd_buffer->state.current_pipeline = GPGPU;
863 }
864 }
865
866 struct anv_state
867 genX(cmd_buffer_alloc_null_surface_state)(struct anv_cmd_buffer *cmd_buffer,
868 struct anv_framebuffer *fb)
869 {
870 struct anv_state state =
871 anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
872
873 struct GENX(RENDER_SURFACE_STATE) null_ss = {
874 .SurfaceType = SURFTYPE_NULL,
875 .SurfaceArray = fb->layers > 0,
876 .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
877 #if GEN_GEN >= 8
878 .TileMode = YMAJOR,
879 #else
880 .TiledSurface = true,
881 #endif
882 .Width = fb->width - 1,
883 .Height = fb->height - 1,
884 .Depth = fb->layers - 1,
885 .RenderTargetViewExtent = fb->layers - 1,
886 };
887
888 GENX(RENDER_SURFACE_STATE_pack)(NULL, state.map, &null_ss);
889
890 if (!cmd_buffer->device->info.has_llc)
891 anv_state_clflush(state);
892
893 return state;
894 }
895
896 static void
897 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
898 {
899 struct anv_device *device = cmd_buffer->device;
900 const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
901 const struct anv_image_view *iview =
902 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
903 const struct anv_image *image = iview ? iview->image : NULL;
904 const struct anv_format *anv_format =
905 iview ? anv_format_for_vk_format(iview->vk_format) : NULL;
906 const bool has_depth = iview && anv_format->has_depth;
907 const bool has_stencil = iview && anv_format->has_stencil;
908
909 /* FIXME: Implement the PMA stall W/A */
910 /* FIXME: Width and Height are wrong */
911
912 /* Emit 3DSTATE_DEPTH_BUFFER */
913 if (has_depth) {
914 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER),
915 .SurfaceType = SURFTYPE_2D,
916 .DepthWriteEnable = true,
917 .StencilWriteEnable = has_stencil,
918 .HierarchicalDepthBufferEnable = false,
919 .SurfaceFormat = isl_surf_get_depth_format(&device->isl_dev,
920 &image->depth_surface.isl),
921 .SurfacePitch = image->depth_surface.isl.row_pitch - 1,
922 .SurfaceBaseAddress = {
923 .bo = image->bo,
924 .offset = image->offset + image->depth_surface.offset,
925 },
926 .Height = fb->height - 1,
927 .Width = fb->width - 1,
928 .LOD = 0,
929 .Depth = 1 - 1,
930 .MinimumArrayElement = 0,
931 .DepthBufferObjectControlState = GENX(MOCS),
932 #if GEN_GEN >= 8
933 .SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2,
934 #endif
935 .RenderTargetViewExtent = 1 - 1);
936 } else {
937 /* Even when no depth buffer is present, the hardware requires that
938 * 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
939 *
940 * If a null depth buffer is bound, the driver must instead bind depth as:
941 * 3DSTATE_DEPTH.SurfaceType = SURFTYPE_2D
942 * 3DSTATE_DEPTH.Width = 1
943 * 3DSTATE_DEPTH.Height = 1
944 * 3DSTATE_DEPTH.SuraceFormat = D16_UNORM
945 * 3DSTATE_DEPTH.SurfaceBaseAddress = 0
946 * 3DSTATE_DEPTH.HierarchicalDepthBufferEnable = 0
947 * 3DSTATE_WM_DEPTH_STENCIL.DepthTestEnable = 0
948 * 3DSTATE_WM_DEPTH_STENCIL.DepthBufferWriteEnable = 0
949 *
950 * The PRM is wrong, though. The width and height must be programmed to
951 * actual framebuffer's width and height, even when neither depth buffer
952 * nor stencil buffer is present. Also, D16_UNORM is not allowed to
953 * be combined with a stencil buffer so we use D32_FLOAT instead.
954 */
955 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER),
956 .SurfaceType = SURFTYPE_2D,
957 .SurfaceFormat = D32_FLOAT,
958 .Width = fb->width - 1,
959 .Height = fb->height - 1,
960 .StencilWriteEnable = has_stencil);
961 }
962
963 /* Emit 3DSTATE_STENCIL_BUFFER */
964 if (has_stencil) {
965 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER),
966 #if GEN_GEN >= 8 || GEN_IS_HASWELL
967 .StencilBufferEnable = true,
968 #endif
969 .StencilBufferObjectControlState = GENX(MOCS),
970
971 /* Stencil buffers have strange pitch. The PRM says:
972 *
973 * The pitch must be set to 2x the value computed based on width,
974 * as the stencil buffer is stored with two rows interleaved.
975 */
976 .SurfacePitch = 2 * image->stencil_surface.isl.row_pitch - 1,
977
978 #if GEN_GEN >= 8
979 .SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2,
980 #endif
981 .SurfaceBaseAddress = {
982 .bo = image->bo,
983 .offset = image->offset + image->stencil_surface.offset,
984 });
985 } else {
986 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER));
987 }
988
989 /* Disable hierarchial depth buffers. */
990 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER));
991
992 /* Clear the clear params. */
993 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS));
994 }
995
996 /**
997 * @see anv_cmd_buffer_set_subpass()
998 */
999 void
1000 genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
1001 struct anv_subpass *subpass)
1002 {
1003 cmd_buffer->state.subpass = subpass;
1004
1005 cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
1006
1007 cmd_buffer_emit_depth_stencil(cmd_buffer);
1008 }
1009
1010 void genX(CmdBeginRenderPass)(
1011 VkCommandBuffer commandBuffer,
1012 const VkRenderPassBeginInfo* pRenderPassBegin,
1013 VkSubpassContents contents)
1014 {
1015 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1016 ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
1017 ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
1018
1019 cmd_buffer->state.framebuffer = framebuffer;
1020 cmd_buffer->state.pass = pass;
1021 anv_cmd_state_setup_attachments(cmd_buffer, pRenderPassBegin);
1022
1023 genX(flush_pipeline_select_3d)(cmd_buffer);
1024
1025 const VkRect2D *render_area = &pRenderPassBegin->renderArea;
1026
1027 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DRAWING_RECTANGLE),
1028 .ClippedDrawingRectangleYMin = MAX2(render_area->offset.y, 0),
1029 .ClippedDrawingRectangleXMin = MAX2(render_area->offset.x, 0),
1030 .ClippedDrawingRectangleYMax =
1031 render_area->offset.y + render_area->extent.height - 1,
1032 .ClippedDrawingRectangleXMax =
1033 render_area->offset.x + render_area->extent.width - 1,
1034 .DrawingRectangleOriginY = 0,
1035 .DrawingRectangleOriginX = 0);
1036
1037 genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses);
1038 anv_cmd_buffer_clear_subpass(cmd_buffer);
1039 }
1040
1041 void genX(CmdNextSubpass)(
1042 VkCommandBuffer commandBuffer,
1043 VkSubpassContents contents)
1044 {
1045 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1046
1047 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1048
1049 anv_cmd_buffer_resolve_subpass(cmd_buffer);
1050 genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
1051 anv_cmd_buffer_clear_subpass(cmd_buffer);
1052 }
1053
1054 void genX(CmdEndRenderPass)(
1055 VkCommandBuffer commandBuffer)
1056 {
1057 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1058
1059 anv_cmd_buffer_resolve_subpass(cmd_buffer);
1060 }
1061
1062 static void
1063 emit_ps_depth_count(struct anv_batch *batch,
1064 struct anv_bo *bo, uint32_t offset)
1065 {
1066 anv_batch_emit(batch, GENX(PIPE_CONTROL),
1067 .DestinationAddressType = DAT_PPGTT,
1068 .PostSyncOperation = WritePSDepthCount,
1069 .DepthStallEnable = true,
1070 .Address = { bo, offset });
1071 }
1072
1073 static void
1074 emit_query_availability(struct anv_batch *batch,
1075 struct anv_bo *bo, uint32_t offset)
1076 {
1077 anv_batch_emit(batch, GENX(PIPE_CONTROL),
1078 .DestinationAddressType = DAT_PPGTT,
1079 .PostSyncOperation = WriteImmediateData,
1080 .Address = { bo, offset },
1081 .ImmediateData = 1);
1082 }
1083
1084 void genX(CmdBeginQuery)(
1085 VkCommandBuffer commandBuffer,
1086 VkQueryPool queryPool,
1087 uint32_t query,
1088 VkQueryControlFlags flags)
1089 {
1090 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1091 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1092
1093 /* Workaround: When meta uses the pipeline with the VS disabled, it seems
1094 * that the pipelining of the depth write breaks. What we see is that
1095 * samples from the render pass clear leaks into the first query
1096 * immediately after the clear. Doing a pipecontrol with a post-sync
1097 * operation and DepthStallEnable seems to work around the issue.
1098 */
1099 if (cmd_buffer->state.need_query_wa) {
1100 cmd_buffer->state.need_query_wa = false;
1101 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
1102 .DepthCacheFlushEnable = true,
1103 .DepthStallEnable = true);
1104 }
1105
1106 switch (pool->type) {
1107 case VK_QUERY_TYPE_OCCLUSION:
1108 emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
1109 query * sizeof(struct anv_query_pool_slot));
1110 break;
1111
1112 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1113 default:
1114 unreachable("");
1115 }
1116 }
1117
1118 void genX(CmdEndQuery)(
1119 VkCommandBuffer commandBuffer,
1120 VkQueryPool queryPool,
1121 uint32_t query)
1122 {
1123 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1124 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1125
1126 switch (pool->type) {
1127 case VK_QUERY_TYPE_OCCLUSION:
1128 emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
1129 query * sizeof(struct anv_query_pool_slot) + 8);
1130
1131 emit_query_availability(&cmd_buffer->batch, &pool->bo,
1132 query * sizeof(struct anv_query_pool_slot) + 16);
1133 break;
1134
1135 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1136 default:
1137 unreachable("");
1138 }
1139 }
1140
1141 #define TIMESTAMP 0x2358
1142
1143 void genX(CmdWriteTimestamp)(
1144 VkCommandBuffer commandBuffer,
1145 VkPipelineStageFlagBits pipelineStage,
1146 VkQueryPool queryPool,
1147 uint32_t query)
1148 {
1149 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1150 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1151 uint32_t offset = query * sizeof(struct anv_query_pool_slot);
1152
1153 assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
1154
1155 switch (pipelineStage) {
1156 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
1157 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
1158 .RegisterAddress = TIMESTAMP,
1159 .MemoryAddress = { &pool->bo, offset });
1160 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
1161 .RegisterAddress = TIMESTAMP + 4,
1162 .MemoryAddress = { &pool->bo, offset + 4 });
1163 break;
1164
1165 default:
1166 /* Everything else is bottom-of-pipe */
1167 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
1168 .DestinationAddressType = DAT_PPGTT,
1169 .PostSyncOperation = WriteTimestamp,
1170 .Address = { &pool->bo, offset });
1171 break;
1172 }
1173
1174 emit_query_availability(&cmd_buffer->batch, &pool->bo, query + 16);
1175 }
1176
1177 #if GEN_GEN > 7 || GEN_IS_HASWELL
1178
1179 #define alu_opcode(v) __gen_uint((v), 20, 31)
1180 #define alu_operand1(v) __gen_uint((v), 10, 19)
1181 #define alu_operand2(v) __gen_uint((v), 0, 9)
1182 #define alu(opcode, operand1, operand2) \
1183 alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
1184
1185 #define OPCODE_NOOP 0x000
1186 #define OPCODE_LOAD 0x080
1187 #define OPCODE_LOADINV 0x480
1188 #define OPCODE_LOAD0 0x081
1189 #define OPCODE_LOAD1 0x481
1190 #define OPCODE_ADD 0x100
1191 #define OPCODE_SUB 0x101
1192 #define OPCODE_AND 0x102
1193 #define OPCODE_OR 0x103
1194 #define OPCODE_XOR 0x104
1195 #define OPCODE_STORE 0x180
1196 #define OPCODE_STOREINV 0x580
1197
1198 #define OPERAND_R0 0x00
1199 #define OPERAND_R1 0x01
1200 #define OPERAND_R2 0x02
1201 #define OPERAND_R3 0x03
1202 #define OPERAND_R4 0x04
1203 #define OPERAND_SRCA 0x20
1204 #define OPERAND_SRCB 0x21
1205 #define OPERAND_ACCU 0x31
1206 #define OPERAND_ZF 0x32
1207 #define OPERAND_CF 0x33
1208
1209 #define CS_GPR(n) (0x2600 + (n) * 8)
1210
1211 static void
1212 emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
1213 struct anv_bo *bo, uint32_t offset)
1214 {
1215 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
1216 .RegisterAddress = reg,
1217 .MemoryAddress = { bo, offset });
1218 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
1219 .RegisterAddress = reg + 4,
1220 .MemoryAddress = { bo, offset + 4 });
1221 }
1222
1223 static void
1224 store_query_result(struct anv_batch *batch, uint32_t reg,
1225 struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
1226 {
1227 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM),
1228 .RegisterAddress = reg,
1229 .MemoryAddress = { bo, offset });
1230
1231 if (flags & VK_QUERY_RESULT_64_BIT)
1232 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM),
1233 .RegisterAddress = reg + 4,
1234 .MemoryAddress = { bo, offset + 4 });
1235 }
1236
1237 void genX(CmdCopyQueryPoolResults)(
1238 VkCommandBuffer commandBuffer,
1239 VkQueryPool queryPool,
1240 uint32_t firstQuery,
1241 uint32_t queryCount,
1242 VkBuffer destBuffer,
1243 VkDeviceSize destOffset,
1244 VkDeviceSize destStride,
1245 VkQueryResultFlags flags)
1246 {
1247 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1248 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1249 ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
1250 uint32_t slot_offset, dst_offset;
1251
1252 if (flags & VK_QUERY_RESULT_WAIT_BIT)
1253 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
1254 .CommandStreamerStallEnable = true,
1255 .StallAtPixelScoreboard = true);
1256
1257 dst_offset = buffer->offset + destOffset;
1258 for (uint32_t i = 0; i < queryCount; i++) {
1259
1260 slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
1261 switch (pool->type) {
1262 case VK_QUERY_TYPE_OCCLUSION:
1263 emit_load_alu_reg_u64(&cmd_buffer->batch,
1264 CS_GPR(0), &pool->bo, slot_offset);
1265 emit_load_alu_reg_u64(&cmd_buffer->batch,
1266 CS_GPR(1), &pool->bo, slot_offset + 8);
1267
1268 /* FIXME: We need to clamp the result for 32 bit. */
1269
1270 uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
1271 dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
1272 dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
1273 dw[3] = alu(OPCODE_SUB, 0, 0);
1274 dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
1275 break;
1276
1277 case VK_QUERY_TYPE_TIMESTAMP:
1278 emit_load_alu_reg_u64(&cmd_buffer->batch,
1279 CS_GPR(2), &pool->bo, slot_offset);
1280 break;
1281
1282 default:
1283 unreachable("unhandled query type");
1284 }
1285
1286 store_query_result(&cmd_buffer->batch,
1287 CS_GPR(2), buffer->bo, dst_offset, flags);
1288
1289 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1290 emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
1291 &pool->bo, slot_offset + 16);
1292 if (flags & VK_QUERY_RESULT_64_BIT)
1293 store_query_result(&cmd_buffer->batch,
1294 CS_GPR(0), buffer->bo, dst_offset + 8, flags);
1295 else
1296 store_query_result(&cmd_buffer->batch,
1297 CS_GPR(0), buffer->bo, dst_offset + 4, flags);
1298 }
1299
1300 dst_offset += destStride;
1301 }
1302 }
1303
1304 #endif