anv/cmd_buffer: Use the new emit macro for 3DSTATE_CONSTANT
[mesa.git] / src / intel / vulkan / genX_cmd_buffer.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26
27 #include "anv_private.h"
28
29 #include "genxml/gen_macros.h"
30 #include "genxml/genX_pack.h"
31
32 void
33 genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
34 {
35 struct anv_device *device = cmd_buffer->device;
36 struct anv_bo *scratch_bo = NULL;
37
38 cmd_buffer->state.scratch_size =
39 anv_block_pool_size(&device->scratch_block_pool);
40 if (cmd_buffer->state.scratch_size > 0)
41 scratch_bo = &device->scratch_block_pool.bo;
42
43 /* XXX: Do we need this on more than just BDW? */
44 #if (GEN_GEN >= 8)
45 /* Emit a render target cache flush.
46 *
47 * This isn't documented anywhere in the PRM. However, it seems to be
48 * necessary prior to changing the surface state base adress. Without
49 * this, we get GPU hangs when using multi-level command buffers which
50 * clear depth, reset state base address, and then go render stuff.
51 */
52 anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
53 pc.RenderTargetCacheFlushEnable = true;
54 }
55 #endif
56
57 anv_batch_emit_blk(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) {
58 sba.GeneralStateBaseAddress = (struct anv_address) { scratch_bo, 0 };
59 sba.GeneralStateMemoryObjectControlState = GENX(MOCS);
60 sba.GeneralStateBaseAddressModifyEnable = true;
61
62 sba.SurfaceStateBaseAddress =
63 anv_cmd_buffer_surface_base_address(cmd_buffer);
64 sba.SurfaceStateMemoryObjectControlState = GENX(MOCS);
65 sba.SurfaceStateBaseAddressModifyEnable = true;
66
67 sba.DynamicStateBaseAddress =
68 (struct anv_address) { &device->dynamic_state_block_pool.bo, 0 };
69 sba.DynamicStateMemoryObjectControlState = GENX(MOCS),
70 sba.DynamicStateBaseAddressModifyEnable = true,
71
72 sba.IndirectObjectBaseAddress = (struct anv_address) { NULL, 0 };
73 sba.IndirectObjectMemoryObjectControlState = GENX(MOCS);
74 sba.IndirectObjectBaseAddressModifyEnable = true;
75
76 sba.InstructionBaseAddress =
77 (struct anv_address) { &device->instruction_block_pool.bo, 0 };
78 sba.InstructionMemoryObjectControlState = GENX(MOCS);
79 sba.InstructionBaseAddressModifyEnable = true;
80
81 # if (GEN_GEN >= 8)
82 /* Broadwell requires that we specify a buffer size for a bunch of
83 * these fields. However, since we will be growing the BO's live, we
84 * just set them all to the maximum.
85 */
86 sba.GeneralStateBufferSize = 0xfffff;
87 sba.GeneralStateBufferSizeModifyEnable = true;
88 sba.DynamicStateBufferSize = 0xfffff;
89 sba.DynamicStateBufferSizeModifyEnable = true;
90 sba.IndirectObjectBufferSize = 0xfffff;
91 sba.IndirectObjectBufferSizeModifyEnable = true;
92 sba.InstructionBufferSize = 0xfffff;
93 sba.InstructionBuffersizeModifyEnable = true;
94 # endif
95 }
96
97 /* After re-setting the surface state base address, we have to do some
98 * cache flusing so that the sampler engine will pick up the new
99 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
100 * Shared Function > 3D Sampler > State > State Caching (page 96):
101 *
102 * Coherency with system memory in the state cache, like the texture
103 * cache is handled partially by software. It is expected that the
104 * command stream or shader will issue Cache Flush operation or
105 * Cache_Flush sampler message to ensure that the L1 cache remains
106 * coherent with system memory.
107 *
108 * [...]
109 *
110 * Whenever the value of the Dynamic_State_Base_Addr,
111 * Surface_State_Base_Addr are altered, the L1 state cache must be
112 * invalidated to ensure the new surface or sampler state is fetched
113 * from system memory.
114 *
115 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
116 * which, according the PIPE_CONTROL instruction documentation in the
117 * Broadwell PRM:
118 *
119 * Setting this bit is independent of any other bit in this packet.
120 * This bit controls the invalidation of the L1 and L2 state caches
121 * at the top of the pipe i.e. at the parsing time.
122 *
123 * Unfortunately, experimentation seems to indicate that state cache
124 * invalidation through a PIPE_CONTROL does nothing whatsoever in
125 * regards to surface state and binding tables. In stead, it seems that
126 * invalidating the texture cache is what is actually needed.
127 *
128 * XXX: As far as we have been able to determine through
129 * experimentation, shows that flush the texture cache appears to be
130 * sufficient. The theory here is that all of the sampling/rendering
131 * units cache the binding table in the texture cache. However, we have
132 * yet to be able to actually confirm this.
133 */
134 anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
135 pc.TextureCacheInvalidationEnable = true;
136 }
137 }
138
139 void genX(CmdPipelineBarrier)(
140 VkCommandBuffer commandBuffer,
141 VkPipelineStageFlags srcStageMask,
142 VkPipelineStageFlags destStageMask,
143 VkBool32 byRegion,
144 uint32_t memoryBarrierCount,
145 const VkMemoryBarrier* pMemoryBarriers,
146 uint32_t bufferMemoryBarrierCount,
147 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
148 uint32_t imageMemoryBarrierCount,
149 const VkImageMemoryBarrier* pImageMemoryBarriers)
150 {
151 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
152 uint32_t b, *dw;
153
154 /* XXX: Right now, we're really dumb and just flush whatever categories
155 * the app asks for. One of these days we may make this a bit better
156 * but right now that's all the hardware allows for in most areas.
157 */
158 VkAccessFlags src_flags = 0;
159 VkAccessFlags dst_flags = 0;
160
161 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
162 src_flags |= pMemoryBarriers[i].srcAccessMask;
163 dst_flags |= pMemoryBarriers[i].dstAccessMask;
164 }
165
166 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
167 src_flags |= pBufferMemoryBarriers[i].srcAccessMask;
168 dst_flags |= pBufferMemoryBarriers[i].dstAccessMask;
169 }
170
171 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
172 src_flags |= pImageMemoryBarriers[i].srcAccessMask;
173 dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
174 }
175
176 /* Mask out the Source access flags we care about */
177 const uint32_t src_mask =
178 VK_ACCESS_SHADER_WRITE_BIT |
179 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
180 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
181 VK_ACCESS_TRANSFER_WRITE_BIT;
182
183 src_flags = src_flags & src_mask;
184
185 /* Mask out the destination access flags we care about */
186 const uint32_t dst_mask =
187 VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
188 VK_ACCESS_INDEX_READ_BIT |
189 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
190 VK_ACCESS_UNIFORM_READ_BIT |
191 VK_ACCESS_SHADER_READ_BIT |
192 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
193 VK_ACCESS_TRANSFER_READ_BIT;
194
195 dst_flags = dst_flags & dst_mask;
196
197 /* The src flags represent how things were used previously. This is
198 * what we use for doing flushes.
199 */
200 struct GENX(PIPE_CONTROL) flush_cmd = {
201 GENX(PIPE_CONTROL_header),
202 .PostSyncOperation = NoWrite,
203 };
204
205 for_each_bit(b, src_flags) {
206 switch ((VkAccessFlagBits)(1 << b)) {
207 case VK_ACCESS_SHADER_WRITE_BIT:
208 flush_cmd.DCFlushEnable = true;
209 break;
210 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
211 flush_cmd.RenderTargetCacheFlushEnable = true;
212 break;
213 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
214 flush_cmd.DepthCacheFlushEnable = true;
215 break;
216 case VK_ACCESS_TRANSFER_WRITE_BIT:
217 flush_cmd.RenderTargetCacheFlushEnable = true;
218 flush_cmd.DepthCacheFlushEnable = true;
219 break;
220 default:
221 unreachable("should've masked this out by now");
222 }
223 }
224
225 /* If we end up doing two PIPE_CONTROLs, the first, flusing one also has to
226 * stall and wait for the flushing to finish, so we don't re-dirty the
227 * caches with in-flight rendering after the second PIPE_CONTROL
228 * invalidates.
229 */
230
231 if (dst_flags)
232 flush_cmd.CommandStreamerStallEnable = true;
233
234 if (src_flags && dst_flags) {
235 dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
236 GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &flush_cmd);
237 }
238
239 /* The dst flags represent how things will be used in the future. This
240 * is what we use for doing cache invalidations.
241 */
242 struct GENX(PIPE_CONTROL) invalidate_cmd = {
243 GENX(PIPE_CONTROL_header),
244 .PostSyncOperation = NoWrite,
245 };
246
247 for_each_bit(b, dst_flags) {
248 switch ((VkAccessFlagBits)(1 << b)) {
249 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
250 case VK_ACCESS_INDEX_READ_BIT:
251 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
252 invalidate_cmd.VFCacheInvalidationEnable = true;
253 break;
254 case VK_ACCESS_UNIFORM_READ_BIT:
255 invalidate_cmd.ConstantCacheInvalidationEnable = true;
256 /* fallthrough */
257 case VK_ACCESS_SHADER_READ_BIT:
258 invalidate_cmd.TextureCacheInvalidationEnable = true;
259 break;
260 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
261 invalidate_cmd.TextureCacheInvalidationEnable = true;
262 break;
263 case VK_ACCESS_TRANSFER_READ_BIT:
264 invalidate_cmd.TextureCacheInvalidationEnable = true;
265 break;
266 default:
267 unreachable("should've masked this out by now");
268 }
269 }
270
271 if (dst_flags) {
272 dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
273 GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &invalidate_cmd);
274 }
275 }
276
277 static uint32_t
278 cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
279 {
280 static const uint32_t push_constant_opcodes[] = {
281 [MESA_SHADER_VERTEX] = 21,
282 [MESA_SHADER_TESS_CTRL] = 25, /* HS */
283 [MESA_SHADER_TESS_EVAL] = 26, /* DS */
284 [MESA_SHADER_GEOMETRY] = 22,
285 [MESA_SHADER_FRAGMENT] = 23,
286 [MESA_SHADER_COMPUTE] = 0,
287 };
288
289 VkShaderStageFlags flushed = 0;
290
291 anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) {
292 if (stage == MESA_SHADER_COMPUTE)
293 continue;
294
295 struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
296
297 if (state.offset == 0) {
298 anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c)
299 c._3DCommandSubOpcode = push_constant_opcodes[stage];
300 } else {
301 anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) {
302 c._3DCommandSubOpcode = push_constant_opcodes[stage],
303 c.ConstantBody = (struct GENX(3DSTATE_CONSTANT_BODY)) {
304 #if GEN_GEN >= 9
305 .PointerToConstantBuffer2 = { &cmd_buffer->device->dynamic_state_block_pool.bo, state.offset },
306 .ConstantBuffer2ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
307 #else
308 .PointerToConstantBuffer0 = { .offset = state.offset },
309 .ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
310 #endif
311 };
312 }
313 }
314
315 flushed |= mesa_to_vk_shader_stage(stage);
316 }
317
318 cmd_buffer->state.push_constants_dirty &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
319
320 return flushed;
321 }
322
323 void
324 genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
325 {
326 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
327 uint32_t *p;
328
329 uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
330
331 assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
332
333 #if GEN_GEN >= 8
334 /* FIXME (jason): Currently, the config_l3 function causes problems on
335 * Haswell and prior if you have a kernel older than 4.4. In order to
336 * work, it requires a couple of registers be white-listed in the
337 * command parser and they weren't added until 4.4. What we should do
338 * is check the command parser version and make it a no-op if your
339 * command parser is either off or too old. Compute won't work 100%,
340 * but at least 3-D will. In the mean time, I'm going to make this
341 * gen8+ only so that we can get Haswell working again.
342 */
343 genX(cmd_buffer_config_l3)(cmd_buffer, false);
344 #endif
345
346 genX(flush_pipeline_select_3d)(cmd_buffer);
347
348 if (vb_emit) {
349 const uint32_t num_buffers = __builtin_popcount(vb_emit);
350 const uint32_t num_dwords = 1 + num_buffers * 4;
351
352 p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
353 GENX(3DSTATE_VERTEX_BUFFERS));
354 uint32_t vb, i = 0;
355 for_each_bit(vb, vb_emit) {
356 struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
357 uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
358
359 struct GENX(VERTEX_BUFFER_STATE) state = {
360 .VertexBufferIndex = vb,
361
362 #if GEN_GEN >= 8
363 .MemoryObjectControlState = GENX(MOCS),
364 #else
365 .BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA,
366 .InstanceDataStepRate = 1,
367 .VertexBufferMemoryObjectControlState = GENX(MOCS),
368 #endif
369
370 .AddressModifyEnable = true,
371 .BufferPitch = pipeline->binding_stride[vb],
372 .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
373
374 #if GEN_GEN >= 8
375 .BufferSize = buffer->size - offset
376 #else
377 .EndAddress = { buffer->bo, buffer->offset + buffer->size - 1},
378 #endif
379 };
380
381 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
382 i++;
383 }
384 }
385
386 cmd_buffer->state.vb_dirty &= ~vb_emit;
387
388 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
389 /* If somebody compiled a pipeline after starting a command buffer the
390 * scratch bo may have grown since we started this cmd buffer (and
391 * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now,
392 * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
393 if (cmd_buffer->state.scratch_size < pipeline->total_scratch)
394 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
395
396 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
397
398 /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
399 *
400 * "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
401 * the next 3DPRIMITIVE command after programming the
402 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
403 *
404 * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
405 * pipeline setup, we need to dirty push constants.
406 */
407 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
408 }
409
410 #if GEN_GEN <= 7
411 if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT ||
412 cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) {
413 /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
414 *
415 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
416 * stall needs to be sent just prior to any 3DSTATE_VS,
417 * 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
418 * 3DSTATE_BINDING_TABLE_POINTER_VS,
419 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one
420 * PIPE_CONTROL needs to be sent before any combination of VS
421 * associated 3DSTATE."
422 */
423 anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
424 pc.DepthStallEnable = true;
425 pc.PostSyncOperation = WriteImmediateData;
426 pc.Address =
427 (struct anv_address) { &cmd_buffer->device->workaround_bo, 0 };
428 }
429 }
430 #endif
431
432 /* We emit the binding tables and sampler tables first, then emit push
433 * constants and then finally emit binding table and sampler table
434 * pointers. It has to happen in this order, since emitting the binding
435 * tables may change the push constants (in case of storage images). After
436 * emitting push constants, on SKL+ we have to emit the corresponding
437 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
438 */
439 uint32_t dirty = 0;
440 if (cmd_buffer->state.descriptors_dirty)
441 dirty = gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer);
442
443 if (cmd_buffer->state.push_constants_dirty) {
444 #if GEN_GEN >= 9
445 /* On Sky Lake and later, the binding table pointers commands are
446 * what actually flush the changes to push constant state so we need
447 * to dirty them so they get re-emitted below.
448 */
449 dirty |= cmd_buffer_flush_push_constants(cmd_buffer);
450 #else
451 cmd_buffer_flush_push_constants(cmd_buffer);
452 #endif
453 }
454
455 if (dirty)
456 gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
457
458 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
459 gen8_cmd_buffer_emit_viewport(cmd_buffer);
460
461 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
462 gen7_cmd_buffer_emit_scissor(cmd_buffer);
463
464 genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
465 }
466
467 static void
468 emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
469 struct anv_bo *bo, uint32_t offset)
470 {
471 uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
472 GENX(3DSTATE_VERTEX_BUFFERS));
473
474 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
475 &(struct GENX(VERTEX_BUFFER_STATE)) {
476 .VertexBufferIndex = 32, /* Reserved for this */
477 .AddressModifyEnable = true,
478 .BufferPitch = 0,
479 #if (GEN_GEN >= 8)
480 .MemoryObjectControlState = GENX(MOCS),
481 .BufferStartingAddress = { bo, offset },
482 .BufferSize = 8
483 #else
484 .VertexBufferMemoryObjectControlState = GENX(MOCS),
485 .BufferStartingAddress = { bo, offset },
486 .EndAddress = { bo, offset + 8 },
487 #endif
488 });
489 }
490
491 static void
492 emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
493 uint32_t base_vertex, uint32_t base_instance)
494 {
495 struct anv_state id_state =
496 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 8, 4);
497
498 ((uint32_t *)id_state.map)[0] = base_vertex;
499 ((uint32_t *)id_state.map)[1] = base_instance;
500
501 if (!cmd_buffer->device->info.has_llc)
502 anv_state_clflush(id_state);
503
504 emit_base_vertex_instance_bo(cmd_buffer,
505 &cmd_buffer->device->dynamic_state_block_pool.bo, id_state.offset);
506 }
507
508 void genX(CmdDraw)(
509 VkCommandBuffer commandBuffer,
510 uint32_t vertexCount,
511 uint32_t instanceCount,
512 uint32_t firstVertex,
513 uint32_t firstInstance)
514 {
515 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
516 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
517 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
518
519 genX(cmd_buffer_flush_state)(cmd_buffer);
520
521 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
522 emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
523
524 anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
525 prim.VertexAccessType = SEQUENTIAL;
526 prim.PrimitiveTopologyType = pipeline->topology;
527 prim.VertexCountPerInstance = vertexCount;
528 prim.StartVertexLocation = firstVertex;
529 prim.InstanceCount = instanceCount;
530 prim.StartInstanceLocation = firstInstance;
531 prim.BaseVertexLocation = 0;
532 }
533 }
534
535 void genX(CmdDrawIndexed)(
536 VkCommandBuffer commandBuffer,
537 uint32_t indexCount,
538 uint32_t instanceCount,
539 uint32_t firstIndex,
540 int32_t vertexOffset,
541 uint32_t firstInstance)
542 {
543 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
544 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
545 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
546
547 genX(cmd_buffer_flush_state)(cmd_buffer);
548
549 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
550 emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
551
552 anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
553 prim.VertexAccessType = RANDOM;
554 prim.PrimitiveTopologyType = pipeline->topology;
555 prim.VertexCountPerInstance = indexCount;
556 prim.StartVertexLocation = firstIndex;
557 prim.InstanceCount = instanceCount;
558 prim.StartInstanceLocation = firstInstance;
559 prim.BaseVertexLocation = vertexOffset;
560 }
561 }
562
563 /* Auto-Draw / Indirect Registers */
564 #define GEN7_3DPRIM_END_OFFSET 0x2420
565 #define GEN7_3DPRIM_START_VERTEX 0x2430
566 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434
567 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
568 #define GEN7_3DPRIM_START_INSTANCE 0x243C
569 #define GEN7_3DPRIM_BASE_VERTEX 0x2440
570
571 static void
572 emit_lrm(struct anv_batch *batch,
573 uint32_t reg, struct anv_bo *bo, uint32_t offset)
574 {
575 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
576 .RegisterAddress = reg,
577 .MemoryAddress = { bo, offset });
578 }
579
580 static void
581 emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
582 {
583 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM),
584 .RegisterOffset = reg,
585 .DataDWord = imm);
586 }
587
588 void genX(CmdDrawIndirect)(
589 VkCommandBuffer commandBuffer,
590 VkBuffer _buffer,
591 VkDeviceSize offset,
592 uint32_t drawCount,
593 uint32_t stride)
594 {
595 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
596 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
597 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
598 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
599 struct anv_bo *bo = buffer->bo;
600 uint32_t bo_offset = buffer->offset + offset;
601
602 genX(cmd_buffer_flush_state)(cmd_buffer);
603
604 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
605 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
606
607 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
608 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
609 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
610 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
611 emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
612
613 anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
614 prim.IndirectParameterEnable = true;
615 prim.VertexAccessType = SEQUENTIAL;
616 prim.PrimitiveTopologyType = pipeline->topology;
617 }
618 }
619
620 void genX(CmdDrawIndexedIndirect)(
621 VkCommandBuffer commandBuffer,
622 VkBuffer _buffer,
623 VkDeviceSize offset,
624 uint32_t drawCount,
625 uint32_t stride)
626 {
627 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
628 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
629 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
630 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
631 struct anv_bo *bo = buffer->bo;
632 uint32_t bo_offset = buffer->offset + offset;
633
634 genX(cmd_buffer_flush_state)(cmd_buffer);
635
636 /* TODO: We need to stomp base vertex to 0 somehow */
637 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
638 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
639
640 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
641 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
642 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
643 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
644 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
645
646 anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
647 prim.IndirectParameterEnable = true;
648 prim.VertexAccessType = RANDOM;
649 prim.PrimitiveTopologyType = pipeline->topology;
650 }
651 }
652
653 #if GEN_GEN == 7
654
655 static bool
656 verify_cmd_parser(const struct anv_device *device,
657 int required_version,
658 const char *function)
659 {
660 if (device->instance->physicalDevice.cmd_parser_version < required_version) {
661 vk_errorf(VK_ERROR_FEATURE_NOT_PRESENT,
662 "cmd parser version %d is required for %s",
663 required_version, function);
664 return false;
665 } else {
666 return true;
667 }
668 }
669
670 #endif
671
672 void genX(CmdDispatch)(
673 VkCommandBuffer commandBuffer,
674 uint32_t x,
675 uint32_t y,
676 uint32_t z)
677 {
678 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
679 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
680 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
681
682 if (prog_data->uses_num_work_groups) {
683 struct anv_state state =
684 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 12, 4);
685 uint32_t *sizes = state.map;
686 sizes[0] = x;
687 sizes[1] = y;
688 sizes[2] = z;
689 if (!cmd_buffer->device->info.has_llc)
690 anv_state_clflush(state);
691 cmd_buffer->state.num_workgroups_offset = state.offset;
692 cmd_buffer->state.num_workgroups_bo =
693 &cmd_buffer->device->dynamic_state_block_pool.bo;
694 }
695
696 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
697
698 anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER),
699 .SIMDSize = prog_data->simd_size / 16,
700 .ThreadDepthCounterMaximum = 0,
701 .ThreadHeightCounterMaximum = 0,
702 .ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1,
703 .ThreadGroupIDXDimension = x,
704 .ThreadGroupIDYDimension = y,
705 .ThreadGroupIDZDimension = z,
706 .RightExecutionMask = pipeline->cs_right_mask,
707 .BottomExecutionMask = 0xffffffff);
708
709 anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH));
710 }
711
712 #define GPGPU_DISPATCHDIMX 0x2500
713 #define GPGPU_DISPATCHDIMY 0x2504
714 #define GPGPU_DISPATCHDIMZ 0x2508
715
716 #define MI_PREDICATE_SRC0 0x2400
717 #define MI_PREDICATE_SRC1 0x2408
718
719 void genX(CmdDispatchIndirect)(
720 VkCommandBuffer commandBuffer,
721 VkBuffer _buffer,
722 VkDeviceSize offset)
723 {
724 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
725 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
726 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
727 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
728 struct anv_bo *bo = buffer->bo;
729 uint32_t bo_offset = buffer->offset + offset;
730 struct anv_batch *batch = &cmd_buffer->batch;
731
732 #if GEN_GEN == 7
733 /* Linux 4.4 added command parser version 5 which allows the GPGPU
734 * indirect dispatch registers to be written.
735 */
736 if (!verify_cmd_parser(cmd_buffer->device, 5, "vkCmdDispatchIndirect"))
737 return;
738 #endif
739
740 if (prog_data->uses_num_work_groups) {
741 cmd_buffer->state.num_workgroups_offset = bo_offset;
742 cmd_buffer->state.num_workgroups_bo = bo;
743 }
744
745 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
746
747 emit_lrm(batch, GPGPU_DISPATCHDIMX, bo, bo_offset);
748 emit_lrm(batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4);
749 emit_lrm(batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8);
750
751 #if GEN_GEN <= 7
752 /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
753 emit_lri(batch, MI_PREDICATE_SRC0 + 4, 0);
754 emit_lri(batch, MI_PREDICATE_SRC1 + 0, 0);
755 emit_lri(batch, MI_PREDICATE_SRC1 + 4, 0);
756
757 /* Load compute_dispatch_indirect_x_size into SRC0 */
758 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0);
759
760 /* predicate = (compute_dispatch_indirect_x_size == 0); */
761 anv_batch_emit(batch, GENX(MI_PREDICATE),
762 .LoadOperation = LOAD_LOAD,
763 .CombineOperation = COMBINE_SET,
764 .CompareOperation = COMPARE_SRCS_EQUAL);
765
766 /* Load compute_dispatch_indirect_y_size into SRC0 */
767 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4);
768
769 /* predicate |= (compute_dispatch_indirect_y_size == 0); */
770 anv_batch_emit(batch, GENX(MI_PREDICATE),
771 .LoadOperation = LOAD_LOAD,
772 .CombineOperation = COMBINE_OR,
773 .CompareOperation = COMPARE_SRCS_EQUAL);
774
775 /* Load compute_dispatch_indirect_z_size into SRC0 */
776 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8);
777
778 /* predicate |= (compute_dispatch_indirect_z_size == 0); */
779 anv_batch_emit(batch, GENX(MI_PREDICATE),
780 .LoadOperation = LOAD_LOAD,
781 .CombineOperation = COMBINE_OR,
782 .CompareOperation = COMPARE_SRCS_EQUAL);
783
784 /* predicate = !predicate; */
785 #define COMPARE_FALSE 1
786 anv_batch_emit(batch, GENX(MI_PREDICATE),
787 .LoadOperation = LOAD_LOADINV,
788 .CombineOperation = COMBINE_OR,
789 .CompareOperation = COMPARE_FALSE);
790 #endif
791
792 anv_batch_emit(batch, GENX(GPGPU_WALKER),
793 .IndirectParameterEnable = true,
794 .PredicateEnable = GEN_GEN <= 7,
795 .SIMDSize = prog_data->simd_size / 16,
796 .ThreadDepthCounterMaximum = 0,
797 .ThreadHeightCounterMaximum = 0,
798 .ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1,
799 .RightExecutionMask = pipeline->cs_right_mask,
800 .BottomExecutionMask = 0xffffffff);
801
802 anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH));
803 }
804
805 static void
806 flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer,
807 uint32_t pipeline)
808 {
809 #if GEN_GEN >= 8 && GEN_GEN < 10
810 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
811 *
812 * Software must clear the COLOR_CALC_STATE Valid field in
813 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
814 * with Pipeline Select set to GPGPU.
815 *
816 * The internal hardware docs recommend the same workaround for Gen9
817 * hardware too.
818 */
819 if (pipeline == GPGPU)
820 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS));
821 #elif GEN_GEN <= 7
822 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
823 * PIPELINE_SELECT [DevBWR+]":
824 *
825 * Project: DEVSNB+
826 *
827 * Software must ensure all the write caches are flushed through a
828 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
829 * command to invalidate read only caches prior to programming
830 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
831 */
832 anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
833 pc.RenderTargetCacheFlushEnable = true;
834 pc.DepthCacheFlushEnable = true;
835 pc.DCFlushEnable = true;
836 pc.PostSyncOperation = NoWrite;
837 pc.CommandStreamerStallEnable = true;
838 }
839
840 anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
841 pc.TextureCacheInvalidationEnable = true;
842 pc.ConstantCacheInvalidationEnable = true;
843 pc.StateCacheInvalidationEnable = true;
844 pc.InstructionCacheInvalidateEnable = true;
845 pc.PostSyncOperation = NoWrite;
846 }
847 #endif
848 }
849
850 void
851 genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
852 {
853 if (cmd_buffer->state.current_pipeline != _3D) {
854 flush_pipeline_before_pipeline_select(cmd_buffer, _3D);
855
856 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT),
857 #if GEN_GEN >= 9
858 .MaskBits = 3,
859 #endif
860 .PipelineSelection = _3D);
861 cmd_buffer->state.current_pipeline = _3D;
862 }
863 }
864
865 void
866 genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
867 {
868 if (cmd_buffer->state.current_pipeline != GPGPU) {
869 flush_pipeline_before_pipeline_select(cmd_buffer, GPGPU);
870
871 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT),
872 #if GEN_GEN >= 9
873 .MaskBits = 3,
874 #endif
875 .PipelineSelection = GPGPU);
876 cmd_buffer->state.current_pipeline = GPGPU;
877 }
878 }
879
880 struct anv_state
881 genX(cmd_buffer_alloc_null_surface_state)(struct anv_cmd_buffer *cmd_buffer,
882 struct anv_framebuffer *fb)
883 {
884 struct anv_state state =
885 anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
886
887 struct GENX(RENDER_SURFACE_STATE) null_ss = {
888 .SurfaceType = SURFTYPE_NULL,
889 .SurfaceArray = fb->layers > 0,
890 .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
891 #if GEN_GEN >= 8
892 .TileMode = YMAJOR,
893 #else
894 .TiledSurface = true,
895 #endif
896 .Width = fb->width - 1,
897 .Height = fb->height - 1,
898 .Depth = fb->layers - 1,
899 .RenderTargetViewExtent = fb->layers - 1,
900 };
901
902 GENX(RENDER_SURFACE_STATE_pack)(NULL, state.map, &null_ss);
903
904 if (!cmd_buffer->device->info.has_llc)
905 anv_state_clflush(state);
906
907 return state;
908 }
909
910 static void
911 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
912 {
913 struct anv_device *device = cmd_buffer->device;
914 const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
915 const struct anv_image_view *iview =
916 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
917 const struct anv_image *image = iview ? iview->image : NULL;
918 const struct anv_format *anv_format =
919 iview ? anv_format_for_vk_format(iview->vk_format) : NULL;
920 const bool has_depth = iview && anv_format->has_depth;
921 const bool has_stencil = iview && anv_format->has_stencil;
922
923 /* FIXME: Implement the PMA stall W/A */
924 /* FIXME: Width and Height are wrong */
925
926 /* Emit 3DSTATE_DEPTH_BUFFER */
927 if (has_depth) {
928 anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
929 db.SurfaceType = SURFTYPE_2D;
930 db.DepthWriteEnable = true;
931 db.StencilWriteEnable = has_stencil;
932 db.HierarchicalDepthBufferEnable = false;
933
934 db.SurfaceFormat = isl_surf_get_depth_format(&device->isl_dev,
935 &image->depth_surface.isl);
936
937 db.SurfaceBaseAddress = (struct anv_address) {
938 .bo = image->bo,
939 .offset = image->offset + image->depth_surface.offset,
940 };
941 db.DepthBufferObjectControlState = GENX(MOCS),
942
943 db.SurfacePitch = image->depth_surface.isl.row_pitch - 1;
944 db.Height = fb->height - 1;
945 db.Width = fb->width - 1;
946 db.LOD = 0;
947 db.Depth = 1 - 1;
948 db.MinimumArrayElement = 0;
949
950 #if GEN_GEN >= 8
951 db.SurfaceQPitch =
952 isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2,
953 #endif
954 db.RenderTargetViewExtent = 1 - 1;
955 }
956 } else {
957 /* Even when no depth buffer is present, the hardware requires that
958 * 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
959 *
960 * If a null depth buffer is bound, the driver must instead bind depth as:
961 * 3DSTATE_DEPTH.SurfaceType = SURFTYPE_2D
962 * 3DSTATE_DEPTH.Width = 1
963 * 3DSTATE_DEPTH.Height = 1
964 * 3DSTATE_DEPTH.SuraceFormat = D16_UNORM
965 * 3DSTATE_DEPTH.SurfaceBaseAddress = 0
966 * 3DSTATE_DEPTH.HierarchicalDepthBufferEnable = 0
967 * 3DSTATE_WM_DEPTH_STENCIL.DepthTestEnable = 0
968 * 3DSTATE_WM_DEPTH_STENCIL.DepthBufferWriteEnable = 0
969 *
970 * The PRM is wrong, though. The width and height must be programmed to
971 * actual framebuffer's width and height, even when neither depth buffer
972 * nor stencil buffer is present. Also, D16_UNORM is not allowed to
973 * be combined with a stencil buffer so we use D32_FLOAT instead.
974 */
975 anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
976 db.SurfaceType = SURFTYPE_2D;
977 db.SurfaceFormat = D32_FLOAT;
978 db.Width = fb->width - 1;
979 db.Height = fb->height - 1;
980 db.StencilWriteEnable = has_stencil;
981 }
982 }
983
984 /* Emit 3DSTATE_STENCIL_BUFFER */
985 if (has_stencil) {
986 anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb) {
987 #if GEN_GEN >= 8 || GEN_IS_HASWELL
988 sb.StencilBufferEnable = true,
989 #endif
990 sb.StencilBufferObjectControlState = GENX(MOCS),
991
992 /* Stencil buffers have strange pitch. The PRM says:
993 *
994 * The pitch must be set to 2x the value computed based on width,
995 * as the stencil buffer is stored with two rows interleaved.
996 */
997 sb.SurfacePitch = 2 * image->stencil_surface.isl.row_pitch - 1,
998
999 #if GEN_GEN >= 8
1000 sb.SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2,
1001 #endif
1002 sb.SurfaceBaseAddress = (struct anv_address) {
1003 .bo = image->bo,
1004 .offset = image->offset + image->stencil_surface.offset,
1005 };
1006 }
1007 } else {
1008 anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb);
1009 }
1010
1011 /* Disable hierarchial depth buffers. */
1012 anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hz);
1013
1014 /* Clear the clear params. */
1015 anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS), cp);
1016 }
1017
1018 /**
1019 * @see anv_cmd_buffer_set_subpass()
1020 */
1021 void
1022 genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
1023 struct anv_subpass *subpass)
1024 {
1025 cmd_buffer->state.subpass = subpass;
1026
1027 cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
1028
1029 cmd_buffer_emit_depth_stencil(cmd_buffer);
1030 }
1031
1032 void genX(CmdBeginRenderPass)(
1033 VkCommandBuffer commandBuffer,
1034 const VkRenderPassBeginInfo* pRenderPassBegin,
1035 VkSubpassContents contents)
1036 {
1037 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1038 ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
1039 ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
1040
1041 cmd_buffer->state.framebuffer = framebuffer;
1042 cmd_buffer->state.pass = pass;
1043 anv_cmd_state_setup_attachments(cmd_buffer, pRenderPassBegin);
1044
1045 genX(flush_pipeline_select_3d)(cmd_buffer);
1046
1047 const VkRect2D *render_area = &pRenderPassBegin->renderArea;
1048
1049 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DRAWING_RECTANGLE),
1050 .ClippedDrawingRectangleYMin = MAX2(render_area->offset.y, 0),
1051 .ClippedDrawingRectangleXMin = MAX2(render_area->offset.x, 0),
1052 .ClippedDrawingRectangleYMax =
1053 render_area->offset.y + render_area->extent.height - 1,
1054 .ClippedDrawingRectangleXMax =
1055 render_area->offset.x + render_area->extent.width - 1,
1056 .DrawingRectangleOriginY = 0,
1057 .DrawingRectangleOriginX = 0);
1058
1059 genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses);
1060 anv_cmd_buffer_clear_subpass(cmd_buffer);
1061 }
1062
1063 void genX(CmdNextSubpass)(
1064 VkCommandBuffer commandBuffer,
1065 VkSubpassContents contents)
1066 {
1067 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1068
1069 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1070
1071 anv_cmd_buffer_resolve_subpass(cmd_buffer);
1072 genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
1073 anv_cmd_buffer_clear_subpass(cmd_buffer);
1074 }
1075
1076 void genX(CmdEndRenderPass)(
1077 VkCommandBuffer commandBuffer)
1078 {
1079 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1080
1081 anv_cmd_buffer_resolve_subpass(cmd_buffer);
1082 }
1083
1084 static void
1085 emit_ps_depth_count(struct anv_batch *batch,
1086 struct anv_bo *bo, uint32_t offset)
1087 {
1088 anv_batch_emit_blk(batch, GENX(PIPE_CONTROL), pc) {
1089 pc.DestinationAddressType = DAT_PPGTT;
1090 pc.PostSyncOperation = WritePSDepthCount;
1091 pc.DepthStallEnable = true;
1092 pc.Address = (struct anv_address) { bo, offset };
1093 }
1094 }
1095
1096 static void
1097 emit_query_availability(struct anv_batch *batch,
1098 struct anv_bo *bo, uint32_t offset)
1099 {
1100 anv_batch_emit_blk(batch, GENX(PIPE_CONTROL), pc) {
1101 pc.DestinationAddressType = DAT_PPGTT;
1102 pc.PostSyncOperation = WriteImmediateData;
1103 pc.Address = (struct anv_address) { bo, offset };
1104 pc.ImmediateData = 1;
1105 }
1106 }
1107
1108 void genX(CmdBeginQuery)(
1109 VkCommandBuffer commandBuffer,
1110 VkQueryPool queryPool,
1111 uint32_t query,
1112 VkQueryControlFlags flags)
1113 {
1114 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1115 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1116
1117 /* Workaround: When meta uses the pipeline with the VS disabled, it seems
1118 * that the pipelining of the depth write breaks. What we see is that
1119 * samples from the render pass clear leaks into the first query
1120 * immediately after the clear. Doing a pipecontrol with a post-sync
1121 * operation and DepthStallEnable seems to work around the issue.
1122 */
1123 if (cmd_buffer->state.need_query_wa) {
1124 cmd_buffer->state.need_query_wa = false;
1125 anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1126 pc.DepthCacheFlushEnable = true;
1127 pc.DepthStallEnable = true;
1128 }
1129 }
1130
1131 switch (pool->type) {
1132 case VK_QUERY_TYPE_OCCLUSION:
1133 emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
1134 query * sizeof(struct anv_query_pool_slot));
1135 break;
1136
1137 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1138 default:
1139 unreachable("");
1140 }
1141 }
1142
1143 void genX(CmdEndQuery)(
1144 VkCommandBuffer commandBuffer,
1145 VkQueryPool queryPool,
1146 uint32_t query)
1147 {
1148 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1149 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1150
1151 switch (pool->type) {
1152 case VK_QUERY_TYPE_OCCLUSION:
1153 emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
1154 query * sizeof(struct anv_query_pool_slot) + 8);
1155
1156 emit_query_availability(&cmd_buffer->batch, &pool->bo,
1157 query * sizeof(struct anv_query_pool_slot) + 16);
1158 break;
1159
1160 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1161 default:
1162 unreachable("");
1163 }
1164 }
1165
1166 #define TIMESTAMP 0x2358
1167
1168 void genX(CmdWriteTimestamp)(
1169 VkCommandBuffer commandBuffer,
1170 VkPipelineStageFlagBits pipelineStage,
1171 VkQueryPool queryPool,
1172 uint32_t query)
1173 {
1174 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1175 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1176 uint32_t offset = query * sizeof(struct anv_query_pool_slot);
1177
1178 assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
1179
1180 switch (pipelineStage) {
1181 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
1182 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
1183 .RegisterAddress = TIMESTAMP,
1184 .MemoryAddress = { &pool->bo, offset });
1185 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
1186 .RegisterAddress = TIMESTAMP + 4,
1187 .MemoryAddress = { &pool->bo, offset + 4 });
1188 break;
1189
1190 default:
1191 /* Everything else is bottom-of-pipe */
1192 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
1193 .DestinationAddressType = DAT_PPGTT,
1194 .PostSyncOperation = WriteTimestamp,
1195 .Address = { &pool->bo, offset });
1196 break;
1197 }
1198
1199 emit_query_availability(&cmd_buffer->batch, &pool->bo, query + 16);
1200 }
1201
1202 #if GEN_GEN > 7 || GEN_IS_HASWELL
1203
1204 #define alu_opcode(v) __gen_uint((v), 20, 31)
1205 #define alu_operand1(v) __gen_uint((v), 10, 19)
1206 #define alu_operand2(v) __gen_uint((v), 0, 9)
1207 #define alu(opcode, operand1, operand2) \
1208 alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
1209
1210 #define OPCODE_NOOP 0x000
1211 #define OPCODE_LOAD 0x080
1212 #define OPCODE_LOADINV 0x480
1213 #define OPCODE_LOAD0 0x081
1214 #define OPCODE_LOAD1 0x481
1215 #define OPCODE_ADD 0x100
1216 #define OPCODE_SUB 0x101
1217 #define OPCODE_AND 0x102
1218 #define OPCODE_OR 0x103
1219 #define OPCODE_XOR 0x104
1220 #define OPCODE_STORE 0x180
1221 #define OPCODE_STOREINV 0x580
1222
1223 #define OPERAND_R0 0x00
1224 #define OPERAND_R1 0x01
1225 #define OPERAND_R2 0x02
1226 #define OPERAND_R3 0x03
1227 #define OPERAND_R4 0x04
1228 #define OPERAND_SRCA 0x20
1229 #define OPERAND_SRCB 0x21
1230 #define OPERAND_ACCU 0x31
1231 #define OPERAND_ZF 0x32
1232 #define OPERAND_CF 0x33
1233
1234 #define CS_GPR(n) (0x2600 + (n) * 8)
1235
1236 static void
1237 emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
1238 struct anv_bo *bo, uint32_t offset)
1239 {
1240 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
1241 .RegisterAddress = reg,
1242 .MemoryAddress = { bo, offset });
1243 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
1244 .RegisterAddress = reg + 4,
1245 .MemoryAddress = { bo, offset + 4 });
1246 }
1247
1248 static void
1249 store_query_result(struct anv_batch *batch, uint32_t reg,
1250 struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
1251 {
1252 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM),
1253 .RegisterAddress = reg,
1254 .MemoryAddress = { bo, offset });
1255
1256 if (flags & VK_QUERY_RESULT_64_BIT)
1257 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM),
1258 .RegisterAddress = reg + 4,
1259 .MemoryAddress = { bo, offset + 4 });
1260 }
1261
1262 void genX(CmdCopyQueryPoolResults)(
1263 VkCommandBuffer commandBuffer,
1264 VkQueryPool queryPool,
1265 uint32_t firstQuery,
1266 uint32_t queryCount,
1267 VkBuffer destBuffer,
1268 VkDeviceSize destOffset,
1269 VkDeviceSize destStride,
1270 VkQueryResultFlags flags)
1271 {
1272 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1273 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1274 ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
1275 uint32_t slot_offset, dst_offset;
1276
1277 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1278 anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1279 pc.CommandStreamerStallEnable = true;
1280 pc.StallAtPixelScoreboard = true;
1281 }
1282 }
1283
1284 dst_offset = buffer->offset + destOffset;
1285 for (uint32_t i = 0; i < queryCount; i++) {
1286
1287 slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
1288 switch (pool->type) {
1289 case VK_QUERY_TYPE_OCCLUSION:
1290 emit_load_alu_reg_u64(&cmd_buffer->batch,
1291 CS_GPR(0), &pool->bo, slot_offset);
1292 emit_load_alu_reg_u64(&cmd_buffer->batch,
1293 CS_GPR(1), &pool->bo, slot_offset + 8);
1294
1295 /* FIXME: We need to clamp the result for 32 bit. */
1296
1297 uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
1298 dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
1299 dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
1300 dw[3] = alu(OPCODE_SUB, 0, 0);
1301 dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
1302 break;
1303
1304 case VK_QUERY_TYPE_TIMESTAMP:
1305 emit_load_alu_reg_u64(&cmd_buffer->batch,
1306 CS_GPR(2), &pool->bo, slot_offset);
1307 break;
1308
1309 default:
1310 unreachable("unhandled query type");
1311 }
1312
1313 store_query_result(&cmd_buffer->batch,
1314 CS_GPR(2), buffer->bo, dst_offset, flags);
1315
1316 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1317 emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
1318 &pool->bo, slot_offset + 16);
1319 if (flags & VK_QUERY_RESULT_64_BIT)
1320 store_query_result(&cmd_buffer->batch,
1321 CS_GPR(0), buffer->bo, dst_offset + 8, flags);
1322 else
1323 store_query_result(&cmd_buffer->batch,
1324 CS_GPR(0), buffer->bo, dst_offset + 4, flags);
1325 }
1326
1327 dst_offset += destStride;
1328 }
1329 }
1330
1331 #endif