ee47c2926e0579c00346a16d7eecc6b1b84c4081
[mesa.git] / src / intel / vulkan / genX_cmd_buffer.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26
27 #include "anv_private.h"
28
29 #include "genxml/gen_macros.h"
30 #include "genxml/genX_pack.h"
31
32 void
33 genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
34 {
35 struct anv_device *device = cmd_buffer->device;
36 struct anv_bo *scratch_bo = NULL;
37
38 cmd_buffer->state.scratch_size =
39 anv_block_pool_size(&device->scratch_block_pool);
40 if (cmd_buffer->state.scratch_size > 0)
41 scratch_bo = &device->scratch_block_pool.bo;
42
43 /* XXX: Do we need this on more than just BDW? */
44 #if (GEN_GEN >= 8)
45 /* Emit a render target cache flush.
46 *
47 * This isn't documented anywhere in the PRM. However, it seems to be
48 * necessary prior to changing the surface state base adress. Without
49 * this, we get GPU hangs when using multi-level command buffers which
50 * clear depth, reset state base address, and then go render stuff.
51 */
52 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
53 pc.RenderTargetCacheFlushEnable = true;
54 }
55 #endif
56
57 anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) {
58 sba.GeneralStateBaseAddress = (struct anv_address) { scratch_bo, 0 };
59 sba.GeneralStateMemoryObjectControlState = GENX(MOCS);
60 sba.GeneralStateBaseAddressModifyEnable = true;
61
62 sba.SurfaceStateBaseAddress =
63 anv_cmd_buffer_surface_base_address(cmd_buffer);
64 sba.SurfaceStateMemoryObjectControlState = GENX(MOCS);
65 sba.SurfaceStateBaseAddressModifyEnable = true;
66
67 sba.DynamicStateBaseAddress =
68 (struct anv_address) { &device->dynamic_state_block_pool.bo, 0 };
69 sba.DynamicStateMemoryObjectControlState = GENX(MOCS),
70 sba.DynamicStateBaseAddressModifyEnable = true,
71
72 sba.IndirectObjectBaseAddress = (struct anv_address) { NULL, 0 };
73 sba.IndirectObjectMemoryObjectControlState = GENX(MOCS);
74 sba.IndirectObjectBaseAddressModifyEnable = true;
75
76 sba.InstructionBaseAddress =
77 (struct anv_address) { &device->instruction_block_pool.bo, 0 };
78 sba.InstructionMemoryObjectControlState = GENX(MOCS);
79 sba.InstructionBaseAddressModifyEnable = true;
80
81 # if (GEN_GEN >= 8)
82 /* Broadwell requires that we specify a buffer size for a bunch of
83 * these fields. However, since we will be growing the BO's live, we
84 * just set them all to the maximum.
85 */
86 sba.GeneralStateBufferSize = 0xfffff;
87 sba.GeneralStateBufferSizeModifyEnable = true;
88 sba.DynamicStateBufferSize = 0xfffff;
89 sba.DynamicStateBufferSizeModifyEnable = true;
90 sba.IndirectObjectBufferSize = 0xfffff;
91 sba.IndirectObjectBufferSizeModifyEnable = true;
92 sba.InstructionBufferSize = 0xfffff;
93 sba.InstructionBuffersizeModifyEnable = true;
94 # endif
95 }
96
97 /* After re-setting the surface state base address, we have to do some
98 * cache flusing so that the sampler engine will pick up the new
99 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
100 * Shared Function > 3D Sampler > State > State Caching (page 96):
101 *
102 * Coherency with system memory in the state cache, like the texture
103 * cache is handled partially by software. It is expected that the
104 * command stream or shader will issue Cache Flush operation or
105 * Cache_Flush sampler message to ensure that the L1 cache remains
106 * coherent with system memory.
107 *
108 * [...]
109 *
110 * Whenever the value of the Dynamic_State_Base_Addr,
111 * Surface_State_Base_Addr are altered, the L1 state cache must be
112 * invalidated to ensure the new surface or sampler state is fetched
113 * from system memory.
114 *
115 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
116 * which, according the PIPE_CONTROL instruction documentation in the
117 * Broadwell PRM:
118 *
119 * Setting this bit is independent of any other bit in this packet.
120 * This bit controls the invalidation of the L1 and L2 state caches
121 * at the top of the pipe i.e. at the parsing time.
122 *
123 * Unfortunately, experimentation seems to indicate that state cache
124 * invalidation through a PIPE_CONTROL does nothing whatsoever in
125 * regards to surface state and binding tables. In stead, it seems that
126 * invalidating the texture cache is what is actually needed.
127 *
128 * XXX: As far as we have been able to determine through
129 * experimentation, shows that flush the texture cache appears to be
130 * sufficient. The theory here is that all of the sampling/rendering
131 * units cache the binding table in the texture cache. However, we have
132 * yet to be able to actually confirm this.
133 */
134 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
135 pc.TextureCacheInvalidationEnable = true;
136 }
137 }
138
139 void genX(CmdPipelineBarrier)(
140 VkCommandBuffer commandBuffer,
141 VkPipelineStageFlags srcStageMask,
142 VkPipelineStageFlags destStageMask,
143 VkBool32 byRegion,
144 uint32_t memoryBarrierCount,
145 const VkMemoryBarrier* pMemoryBarriers,
146 uint32_t bufferMemoryBarrierCount,
147 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
148 uint32_t imageMemoryBarrierCount,
149 const VkImageMemoryBarrier* pImageMemoryBarriers)
150 {
151 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
152 uint32_t b, *dw;
153
154 /* XXX: Right now, we're really dumb and just flush whatever categories
155 * the app asks for. One of these days we may make this a bit better
156 * but right now that's all the hardware allows for in most areas.
157 */
158 VkAccessFlags src_flags = 0;
159 VkAccessFlags dst_flags = 0;
160
161 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
162 src_flags |= pMemoryBarriers[i].srcAccessMask;
163 dst_flags |= pMemoryBarriers[i].dstAccessMask;
164 }
165
166 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
167 src_flags |= pBufferMemoryBarriers[i].srcAccessMask;
168 dst_flags |= pBufferMemoryBarriers[i].dstAccessMask;
169 }
170
171 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
172 src_flags |= pImageMemoryBarriers[i].srcAccessMask;
173 dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
174 }
175
176 /* Mask out the Source access flags we care about */
177 const uint32_t src_mask =
178 VK_ACCESS_SHADER_WRITE_BIT |
179 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
180 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
181 VK_ACCESS_TRANSFER_WRITE_BIT;
182
183 src_flags = src_flags & src_mask;
184
185 /* Mask out the destination access flags we care about */
186 const uint32_t dst_mask =
187 VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
188 VK_ACCESS_INDEX_READ_BIT |
189 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
190 VK_ACCESS_UNIFORM_READ_BIT |
191 VK_ACCESS_SHADER_READ_BIT |
192 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
193 VK_ACCESS_TRANSFER_READ_BIT;
194
195 dst_flags = dst_flags & dst_mask;
196
197 /* The src flags represent how things were used previously. This is
198 * what we use for doing flushes.
199 */
200 struct GENX(PIPE_CONTROL) flush_cmd = {
201 GENX(PIPE_CONTROL_header),
202 .PostSyncOperation = NoWrite,
203 };
204
205 for_each_bit(b, src_flags) {
206 switch ((VkAccessFlagBits)(1 << b)) {
207 case VK_ACCESS_SHADER_WRITE_BIT:
208 flush_cmd.DCFlushEnable = true;
209 break;
210 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
211 flush_cmd.RenderTargetCacheFlushEnable = true;
212 break;
213 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
214 flush_cmd.DepthCacheFlushEnable = true;
215 break;
216 case VK_ACCESS_TRANSFER_WRITE_BIT:
217 flush_cmd.RenderTargetCacheFlushEnable = true;
218 flush_cmd.DepthCacheFlushEnable = true;
219 break;
220 default:
221 unreachable("should've masked this out by now");
222 }
223 }
224
225 /* If we end up doing two PIPE_CONTROLs, the first, flusing one also has to
226 * stall and wait for the flushing to finish, so we don't re-dirty the
227 * caches with in-flight rendering after the second PIPE_CONTROL
228 * invalidates.
229 */
230
231 if (dst_flags)
232 flush_cmd.CommandStreamerStallEnable = true;
233
234 if (src_flags && dst_flags) {
235 dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
236 GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &flush_cmd);
237 }
238
239 /* The dst flags represent how things will be used in the future. This
240 * is what we use for doing cache invalidations.
241 */
242 struct GENX(PIPE_CONTROL) invalidate_cmd = {
243 GENX(PIPE_CONTROL_header),
244 .PostSyncOperation = NoWrite,
245 };
246
247 for_each_bit(b, dst_flags) {
248 switch ((VkAccessFlagBits)(1 << b)) {
249 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
250 case VK_ACCESS_INDEX_READ_BIT:
251 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
252 invalidate_cmd.VFCacheInvalidationEnable = true;
253 break;
254 case VK_ACCESS_UNIFORM_READ_BIT:
255 invalidate_cmd.ConstantCacheInvalidationEnable = true;
256 /* fallthrough */
257 case VK_ACCESS_SHADER_READ_BIT:
258 invalidate_cmd.TextureCacheInvalidationEnable = true;
259 break;
260 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
261 invalidate_cmd.TextureCacheInvalidationEnable = true;
262 break;
263 case VK_ACCESS_TRANSFER_READ_BIT:
264 invalidate_cmd.TextureCacheInvalidationEnable = true;
265 break;
266 default:
267 unreachable("should've masked this out by now");
268 }
269 }
270
271 if (dst_flags) {
272 dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
273 GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &invalidate_cmd);
274 }
275 }
276
277 static uint32_t
278 cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
279 {
280 static const uint32_t push_constant_opcodes[] = {
281 [MESA_SHADER_VERTEX] = 21,
282 [MESA_SHADER_TESS_CTRL] = 25, /* HS */
283 [MESA_SHADER_TESS_EVAL] = 26, /* DS */
284 [MESA_SHADER_GEOMETRY] = 22,
285 [MESA_SHADER_FRAGMENT] = 23,
286 [MESA_SHADER_COMPUTE] = 0,
287 };
288
289 VkShaderStageFlags flushed = 0;
290
291 anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) {
292 if (stage == MESA_SHADER_COMPUTE)
293 continue;
294
295 struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
296
297 if (state.offset == 0) {
298 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c)
299 c._3DCommandSubOpcode = push_constant_opcodes[stage];
300 } else {
301 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) {
302 c._3DCommandSubOpcode = push_constant_opcodes[stage],
303 c.ConstantBody = (struct GENX(3DSTATE_CONSTANT_BODY)) {
304 #if GEN_GEN >= 9
305 .PointerToConstantBuffer2 = { &cmd_buffer->device->dynamic_state_block_pool.bo, state.offset },
306 .ConstantBuffer2ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
307 #else
308 .PointerToConstantBuffer0 = { .offset = state.offset },
309 .ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
310 #endif
311 };
312 }
313 }
314
315 flushed |= mesa_to_vk_shader_stage(stage);
316 }
317
318 cmd_buffer->state.push_constants_dirty &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
319
320 return flushed;
321 }
322
323 void
324 genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
325 {
326 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
327 uint32_t *p;
328
329 uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
330
331 assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
332
333 genX(cmd_buffer_config_l3)(cmd_buffer, pipeline);
334
335 genX(flush_pipeline_select_3d)(cmd_buffer);
336
337 if (vb_emit) {
338 const uint32_t num_buffers = __builtin_popcount(vb_emit);
339 const uint32_t num_dwords = 1 + num_buffers * 4;
340
341 p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
342 GENX(3DSTATE_VERTEX_BUFFERS));
343 uint32_t vb, i = 0;
344 for_each_bit(vb, vb_emit) {
345 struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
346 uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
347
348 struct GENX(VERTEX_BUFFER_STATE) state = {
349 .VertexBufferIndex = vb,
350
351 #if GEN_GEN >= 8
352 .MemoryObjectControlState = GENX(MOCS),
353 #else
354 .BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA,
355 .InstanceDataStepRate = 1,
356 .VertexBufferMemoryObjectControlState = GENX(MOCS),
357 #endif
358
359 .AddressModifyEnable = true,
360 .BufferPitch = pipeline->binding_stride[vb],
361 .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
362
363 #if GEN_GEN >= 8
364 .BufferSize = buffer->size - offset
365 #else
366 .EndAddress = { buffer->bo, buffer->offset + buffer->size - 1},
367 #endif
368 };
369
370 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
371 i++;
372 }
373 }
374
375 cmd_buffer->state.vb_dirty &= ~vb_emit;
376
377 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
378 /* If somebody compiled a pipeline after starting a command buffer the
379 * scratch bo may have grown since we started this cmd buffer (and
380 * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now,
381 * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
382 if (cmd_buffer->state.scratch_size < pipeline->total_scratch)
383 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
384
385 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
386
387 /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
388 *
389 * "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
390 * the next 3DPRIMITIVE command after programming the
391 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
392 *
393 * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
394 * pipeline setup, we need to dirty push constants.
395 */
396 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
397 }
398
399 #if GEN_GEN <= 7
400 if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT ||
401 cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) {
402 /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
403 *
404 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
405 * stall needs to be sent just prior to any 3DSTATE_VS,
406 * 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
407 * 3DSTATE_BINDING_TABLE_POINTER_VS,
408 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one
409 * PIPE_CONTROL needs to be sent before any combination of VS
410 * associated 3DSTATE."
411 */
412 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
413 pc.DepthStallEnable = true;
414 pc.PostSyncOperation = WriteImmediateData;
415 pc.Address =
416 (struct anv_address) { &cmd_buffer->device->workaround_bo, 0 };
417 }
418 }
419 #endif
420
421 /* We emit the binding tables and sampler tables first, then emit push
422 * constants and then finally emit binding table and sampler table
423 * pointers. It has to happen in this order, since emitting the binding
424 * tables may change the push constants (in case of storage images). After
425 * emitting push constants, on SKL+ we have to emit the corresponding
426 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
427 */
428 uint32_t dirty = 0;
429 if (cmd_buffer->state.descriptors_dirty)
430 dirty = gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer);
431
432 if (cmd_buffer->state.push_constants_dirty) {
433 #if GEN_GEN >= 9
434 /* On Sky Lake and later, the binding table pointers commands are
435 * what actually flush the changes to push constant state so we need
436 * to dirty them so they get re-emitted below.
437 */
438 dirty |= cmd_buffer_flush_push_constants(cmd_buffer);
439 #else
440 cmd_buffer_flush_push_constants(cmd_buffer);
441 #endif
442 }
443
444 if (dirty)
445 gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
446
447 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
448 gen8_cmd_buffer_emit_viewport(cmd_buffer);
449
450 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
451 gen7_cmd_buffer_emit_scissor(cmd_buffer);
452
453 genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
454 }
455
456 static void
457 emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
458 struct anv_bo *bo, uint32_t offset)
459 {
460 uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
461 GENX(3DSTATE_VERTEX_BUFFERS));
462
463 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
464 &(struct GENX(VERTEX_BUFFER_STATE)) {
465 .VertexBufferIndex = 32, /* Reserved for this */
466 .AddressModifyEnable = true,
467 .BufferPitch = 0,
468 #if (GEN_GEN >= 8)
469 .MemoryObjectControlState = GENX(MOCS),
470 .BufferStartingAddress = { bo, offset },
471 .BufferSize = 8
472 #else
473 .VertexBufferMemoryObjectControlState = GENX(MOCS),
474 .BufferStartingAddress = { bo, offset },
475 .EndAddress = { bo, offset + 8 },
476 #endif
477 });
478 }
479
480 static void
481 emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
482 uint32_t base_vertex, uint32_t base_instance)
483 {
484 struct anv_state id_state =
485 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 8, 4);
486
487 ((uint32_t *)id_state.map)[0] = base_vertex;
488 ((uint32_t *)id_state.map)[1] = base_instance;
489
490 if (!cmd_buffer->device->info.has_llc)
491 anv_state_clflush(id_state);
492
493 emit_base_vertex_instance_bo(cmd_buffer,
494 &cmd_buffer->device->dynamic_state_block_pool.bo, id_state.offset);
495 }
496
497 void genX(CmdDraw)(
498 VkCommandBuffer commandBuffer,
499 uint32_t vertexCount,
500 uint32_t instanceCount,
501 uint32_t firstVertex,
502 uint32_t firstInstance)
503 {
504 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
505 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
506 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
507
508 genX(cmd_buffer_flush_state)(cmd_buffer);
509
510 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
511 emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
512
513 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
514 prim.VertexAccessType = SEQUENTIAL;
515 prim.PrimitiveTopologyType = pipeline->topology;
516 prim.VertexCountPerInstance = vertexCount;
517 prim.StartVertexLocation = firstVertex;
518 prim.InstanceCount = instanceCount;
519 prim.StartInstanceLocation = firstInstance;
520 prim.BaseVertexLocation = 0;
521 }
522 }
523
524 void genX(CmdDrawIndexed)(
525 VkCommandBuffer commandBuffer,
526 uint32_t indexCount,
527 uint32_t instanceCount,
528 uint32_t firstIndex,
529 int32_t vertexOffset,
530 uint32_t firstInstance)
531 {
532 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
533 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
534 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
535
536 genX(cmd_buffer_flush_state)(cmd_buffer);
537
538 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
539 emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
540
541 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
542 prim.VertexAccessType = RANDOM;
543 prim.PrimitiveTopologyType = pipeline->topology;
544 prim.VertexCountPerInstance = indexCount;
545 prim.StartVertexLocation = firstIndex;
546 prim.InstanceCount = instanceCount;
547 prim.StartInstanceLocation = firstInstance;
548 prim.BaseVertexLocation = vertexOffset;
549 }
550 }
551
552 /* Auto-Draw / Indirect Registers */
553 #define GEN7_3DPRIM_END_OFFSET 0x2420
554 #define GEN7_3DPRIM_START_VERTEX 0x2430
555 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434
556 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
557 #define GEN7_3DPRIM_START_INSTANCE 0x243C
558 #define GEN7_3DPRIM_BASE_VERTEX 0x2440
559
560 static void
561 emit_lrm(struct anv_batch *batch,
562 uint32_t reg, struct anv_bo *bo, uint32_t offset)
563 {
564 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
565 lrm.RegisterAddress = reg;
566 lrm.MemoryAddress = (struct anv_address) { bo, offset };
567 }
568 }
569
570 static void
571 emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
572 {
573 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
574 lri.RegisterOffset = reg;
575 lri.DataDWord = imm;
576 }
577 }
578
579 void genX(CmdDrawIndirect)(
580 VkCommandBuffer commandBuffer,
581 VkBuffer _buffer,
582 VkDeviceSize offset,
583 uint32_t drawCount,
584 uint32_t stride)
585 {
586 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
587 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
588 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
589 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
590 struct anv_bo *bo = buffer->bo;
591 uint32_t bo_offset = buffer->offset + offset;
592
593 genX(cmd_buffer_flush_state)(cmd_buffer);
594
595 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
596 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
597
598 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
599 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
600 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
601 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
602 emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
603
604 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
605 prim.IndirectParameterEnable = true;
606 prim.VertexAccessType = SEQUENTIAL;
607 prim.PrimitiveTopologyType = pipeline->topology;
608 }
609 }
610
611 void genX(CmdDrawIndexedIndirect)(
612 VkCommandBuffer commandBuffer,
613 VkBuffer _buffer,
614 VkDeviceSize offset,
615 uint32_t drawCount,
616 uint32_t stride)
617 {
618 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
619 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
620 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
621 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
622 struct anv_bo *bo = buffer->bo;
623 uint32_t bo_offset = buffer->offset + offset;
624
625 genX(cmd_buffer_flush_state)(cmd_buffer);
626
627 /* TODO: We need to stomp base vertex to 0 somehow */
628 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
629 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
630
631 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
632 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
633 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
634 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
635 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
636
637 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
638 prim.IndirectParameterEnable = true;
639 prim.VertexAccessType = RANDOM;
640 prim.PrimitiveTopologyType = pipeline->topology;
641 }
642 }
643
644 #if GEN_GEN == 7
645
646 static bool
647 verify_cmd_parser(const struct anv_device *device,
648 int required_version,
649 const char *function)
650 {
651 if (device->instance->physicalDevice.cmd_parser_version < required_version) {
652 vk_errorf(VK_ERROR_FEATURE_NOT_PRESENT,
653 "cmd parser version %d is required for %s",
654 required_version, function);
655 return false;
656 } else {
657 return true;
658 }
659 }
660
661 #endif
662
663 void genX(CmdDispatch)(
664 VkCommandBuffer commandBuffer,
665 uint32_t x,
666 uint32_t y,
667 uint32_t z)
668 {
669 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
670 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
671 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
672
673 if (prog_data->uses_num_work_groups) {
674 struct anv_state state =
675 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 12, 4);
676 uint32_t *sizes = state.map;
677 sizes[0] = x;
678 sizes[1] = y;
679 sizes[2] = z;
680 if (!cmd_buffer->device->info.has_llc)
681 anv_state_clflush(state);
682 cmd_buffer->state.num_workgroups_offset = state.offset;
683 cmd_buffer->state.num_workgroups_bo =
684 &cmd_buffer->device->dynamic_state_block_pool.bo;
685 }
686
687 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
688
689 anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER), ggw) {
690 ggw.SIMDSize = prog_data->simd_size / 16;
691 ggw.ThreadDepthCounterMaximum = 0;
692 ggw.ThreadHeightCounterMaximum = 0;
693 ggw.ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1;
694 ggw.ThreadGroupIDXDimension = x;
695 ggw.ThreadGroupIDYDimension = y;
696 ggw.ThreadGroupIDZDimension = z;
697 ggw.RightExecutionMask = pipeline->cs_right_mask;
698 ggw.BottomExecutionMask = 0xffffffff;
699 }
700
701 anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH), msf);
702 }
703
704 #define GPGPU_DISPATCHDIMX 0x2500
705 #define GPGPU_DISPATCHDIMY 0x2504
706 #define GPGPU_DISPATCHDIMZ 0x2508
707
708 #define MI_PREDICATE_SRC0 0x2400
709 #define MI_PREDICATE_SRC1 0x2408
710
711 void genX(CmdDispatchIndirect)(
712 VkCommandBuffer commandBuffer,
713 VkBuffer _buffer,
714 VkDeviceSize offset)
715 {
716 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
717 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
718 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
719 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
720 struct anv_bo *bo = buffer->bo;
721 uint32_t bo_offset = buffer->offset + offset;
722 struct anv_batch *batch = &cmd_buffer->batch;
723
724 #if GEN_GEN == 7
725 /* Linux 4.4 added command parser version 5 which allows the GPGPU
726 * indirect dispatch registers to be written.
727 */
728 if (!verify_cmd_parser(cmd_buffer->device, 5, "vkCmdDispatchIndirect"))
729 return;
730 #endif
731
732 if (prog_data->uses_num_work_groups) {
733 cmd_buffer->state.num_workgroups_offset = bo_offset;
734 cmd_buffer->state.num_workgroups_bo = bo;
735 }
736
737 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
738
739 emit_lrm(batch, GPGPU_DISPATCHDIMX, bo, bo_offset);
740 emit_lrm(batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4);
741 emit_lrm(batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8);
742
743 #if GEN_GEN <= 7
744 /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
745 emit_lri(batch, MI_PREDICATE_SRC0 + 4, 0);
746 emit_lri(batch, MI_PREDICATE_SRC1 + 0, 0);
747 emit_lri(batch, MI_PREDICATE_SRC1 + 4, 0);
748
749 /* Load compute_dispatch_indirect_x_size into SRC0 */
750 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0);
751
752 /* predicate = (compute_dispatch_indirect_x_size == 0); */
753 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
754 mip.LoadOperation = LOAD_LOAD;
755 mip.CombineOperation = COMBINE_SET;
756 mip.CompareOperation = COMPARE_SRCS_EQUAL;
757 }
758
759 /* Load compute_dispatch_indirect_y_size into SRC0 */
760 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4);
761
762 /* predicate |= (compute_dispatch_indirect_y_size == 0); */
763 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
764 mip.LoadOperation = LOAD_LOAD;
765 mip.CombineOperation = COMBINE_OR;
766 mip.CompareOperation = COMPARE_SRCS_EQUAL;
767 }
768
769 /* Load compute_dispatch_indirect_z_size into SRC0 */
770 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8);
771
772 /* predicate |= (compute_dispatch_indirect_z_size == 0); */
773 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
774 mip.LoadOperation = LOAD_LOAD;
775 mip.CombineOperation = COMBINE_OR;
776 mip.CompareOperation = COMPARE_SRCS_EQUAL;
777 }
778
779 /* predicate = !predicate; */
780 #define COMPARE_FALSE 1
781 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
782 mip.LoadOperation = LOAD_LOADINV;
783 mip.CombineOperation = COMBINE_OR;
784 mip.CompareOperation = COMPARE_FALSE;
785 }
786 #endif
787
788 anv_batch_emit(batch, GENX(GPGPU_WALKER), ggw) {
789 ggw.IndirectParameterEnable = true;
790 ggw.PredicateEnable = GEN_GEN <= 7;
791 ggw.SIMDSize = prog_data->simd_size / 16;
792 ggw.ThreadDepthCounterMaximum = 0;
793 ggw.ThreadHeightCounterMaximum = 0;
794 ggw.ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1;
795 ggw.RightExecutionMask = pipeline->cs_right_mask;
796 ggw.BottomExecutionMask = 0xffffffff;
797 }
798
799 anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH), msf);
800 }
801
802 static void
803 flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer,
804 uint32_t pipeline)
805 {
806 #if GEN_GEN >= 8 && GEN_GEN < 10
807 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
808 *
809 * Software must clear the COLOR_CALC_STATE Valid field in
810 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
811 * with Pipeline Select set to GPGPU.
812 *
813 * The internal hardware docs recommend the same workaround for Gen9
814 * hardware too.
815 */
816 if (pipeline == GPGPU)
817 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
818 #elif GEN_GEN <= 7
819 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
820 * PIPELINE_SELECT [DevBWR+]":
821 *
822 * Project: DEVSNB+
823 *
824 * Software must ensure all the write caches are flushed through a
825 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
826 * command to invalidate read only caches prior to programming
827 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
828 */
829 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
830 pc.RenderTargetCacheFlushEnable = true;
831 pc.DepthCacheFlushEnable = true;
832 pc.DCFlushEnable = true;
833 pc.PostSyncOperation = NoWrite;
834 pc.CommandStreamerStallEnable = true;
835 }
836
837 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
838 pc.TextureCacheInvalidationEnable = true;
839 pc.ConstantCacheInvalidationEnable = true;
840 pc.StateCacheInvalidationEnable = true;
841 pc.InstructionCacheInvalidateEnable = true;
842 pc.PostSyncOperation = NoWrite;
843 }
844 #endif
845 }
846
847 void
848 genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
849 {
850 if (cmd_buffer->state.current_pipeline != _3D) {
851 flush_pipeline_before_pipeline_select(cmd_buffer, _3D);
852
853 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
854 #if GEN_GEN >= 9
855 ps.MaskBits = 3;
856 #endif
857 ps.PipelineSelection = _3D;
858 }
859
860 cmd_buffer->state.current_pipeline = _3D;
861 }
862 }
863
864 void
865 genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
866 {
867 if (cmd_buffer->state.current_pipeline != GPGPU) {
868 flush_pipeline_before_pipeline_select(cmd_buffer, GPGPU);
869
870 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
871 #if GEN_GEN >= 9
872 ps.MaskBits = 3;
873 #endif
874 ps.PipelineSelection = GPGPU;
875 }
876
877 cmd_buffer->state.current_pipeline = GPGPU;
878 }
879 }
880
881 struct anv_state
882 genX(cmd_buffer_alloc_null_surface_state)(struct anv_cmd_buffer *cmd_buffer,
883 struct anv_framebuffer *fb)
884 {
885 struct anv_state state =
886 anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
887
888 struct GENX(RENDER_SURFACE_STATE) null_ss = {
889 .SurfaceType = SURFTYPE_NULL,
890 .SurfaceArray = fb->layers > 0,
891 .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
892 #if GEN_GEN >= 8
893 .TileMode = YMAJOR,
894 #else
895 .TiledSurface = true,
896 #endif
897 .Width = fb->width - 1,
898 .Height = fb->height - 1,
899 .Depth = fb->layers - 1,
900 .RenderTargetViewExtent = fb->layers - 1,
901 };
902
903 GENX(RENDER_SURFACE_STATE_pack)(NULL, state.map, &null_ss);
904
905 if (!cmd_buffer->device->info.has_llc)
906 anv_state_clflush(state);
907
908 return state;
909 }
910
911 static void
912 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
913 {
914 struct anv_device *device = cmd_buffer->device;
915 const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
916 const struct anv_image_view *iview =
917 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
918 const struct anv_image *image = iview ? iview->image : NULL;
919 const bool has_depth = image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT);
920 const bool has_stencil =
921 image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT);
922
923 /* FIXME: Implement the PMA stall W/A */
924 /* FIXME: Width and Height are wrong */
925
926 /* Emit 3DSTATE_DEPTH_BUFFER */
927 if (has_depth) {
928 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
929 db.SurfaceType = SURFTYPE_2D;
930 db.DepthWriteEnable = true;
931 db.StencilWriteEnable = has_stencil;
932 db.HierarchicalDepthBufferEnable = false;
933
934 db.SurfaceFormat = isl_surf_get_depth_format(&device->isl_dev,
935 &image->depth_surface.isl);
936
937 db.SurfaceBaseAddress = (struct anv_address) {
938 .bo = image->bo,
939 .offset = image->offset + image->depth_surface.offset,
940 };
941 db.DepthBufferObjectControlState = GENX(MOCS),
942
943 db.SurfacePitch = image->depth_surface.isl.row_pitch - 1;
944 db.Height = fb->height - 1;
945 db.Width = fb->width - 1;
946 db.LOD = 0;
947 db.Depth = 1 - 1;
948 db.MinimumArrayElement = 0;
949
950 #if GEN_GEN >= 8
951 db.SurfaceQPitch =
952 isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2,
953 #endif
954 db.RenderTargetViewExtent = 1 - 1;
955 }
956 } else {
957 /* Even when no depth buffer is present, the hardware requires that
958 * 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
959 *
960 * If a null depth buffer is bound, the driver must instead bind depth as:
961 * 3DSTATE_DEPTH.SurfaceType = SURFTYPE_2D
962 * 3DSTATE_DEPTH.Width = 1
963 * 3DSTATE_DEPTH.Height = 1
964 * 3DSTATE_DEPTH.SuraceFormat = D16_UNORM
965 * 3DSTATE_DEPTH.SurfaceBaseAddress = 0
966 * 3DSTATE_DEPTH.HierarchicalDepthBufferEnable = 0
967 * 3DSTATE_WM_DEPTH_STENCIL.DepthTestEnable = 0
968 * 3DSTATE_WM_DEPTH_STENCIL.DepthBufferWriteEnable = 0
969 *
970 * The PRM is wrong, though. The width and height must be programmed to
971 * actual framebuffer's width and height, even when neither depth buffer
972 * nor stencil buffer is present. Also, D16_UNORM is not allowed to
973 * be combined with a stencil buffer so we use D32_FLOAT instead.
974 */
975 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
976 db.SurfaceType = SURFTYPE_2D;
977 db.SurfaceFormat = D32_FLOAT;
978 db.Width = fb->width - 1;
979 db.Height = fb->height - 1;
980 db.StencilWriteEnable = has_stencil;
981 }
982 }
983
984 /* Emit 3DSTATE_STENCIL_BUFFER */
985 if (has_stencil) {
986 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb) {
987 #if GEN_GEN >= 8 || GEN_IS_HASWELL
988 sb.StencilBufferEnable = true,
989 #endif
990 sb.StencilBufferObjectControlState = GENX(MOCS),
991
992 /* Stencil buffers have strange pitch. The PRM says:
993 *
994 * The pitch must be set to 2x the value computed based on width,
995 * as the stencil buffer is stored with two rows interleaved.
996 */
997 sb.SurfacePitch = 2 * image->stencil_surface.isl.row_pitch - 1,
998
999 #if GEN_GEN >= 8
1000 sb.SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2,
1001 #endif
1002 sb.SurfaceBaseAddress = (struct anv_address) {
1003 .bo = image->bo,
1004 .offset = image->offset + image->stencil_surface.offset,
1005 };
1006 }
1007 } else {
1008 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb);
1009 }
1010
1011 /* Disable hierarchial depth buffers. */
1012 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hz);
1013
1014 /* Clear the clear params. */
1015 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS), cp);
1016 }
1017
1018 /**
1019 * @see anv_cmd_buffer_set_subpass()
1020 */
1021 void
1022 genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
1023 struct anv_subpass *subpass)
1024 {
1025 cmd_buffer->state.subpass = subpass;
1026
1027 cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
1028
1029 cmd_buffer_emit_depth_stencil(cmd_buffer);
1030 }
1031
1032 void genX(CmdBeginRenderPass)(
1033 VkCommandBuffer commandBuffer,
1034 const VkRenderPassBeginInfo* pRenderPassBegin,
1035 VkSubpassContents contents)
1036 {
1037 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1038 ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
1039 ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
1040
1041 cmd_buffer->state.framebuffer = framebuffer;
1042 cmd_buffer->state.pass = pass;
1043 anv_cmd_state_setup_attachments(cmd_buffer, pRenderPassBegin);
1044
1045 genX(flush_pipeline_select_3d)(cmd_buffer);
1046
1047 const VkRect2D *render_area = &pRenderPassBegin->renderArea;
1048
1049 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DRAWING_RECTANGLE), r) {
1050 r.ClippedDrawingRectangleYMin = MAX2(render_area->offset.y, 0);
1051 r.ClippedDrawingRectangleXMin = MAX2(render_area->offset.x, 0);
1052 r.ClippedDrawingRectangleYMax =
1053 render_area->offset.y + render_area->extent.height - 1;
1054 r.ClippedDrawingRectangleXMax =
1055 render_area->offset.x + render_area->extent.width - 1;
1056 r.DrawingRectangleOriginY = 0;
1057 r.DrawingRectangleOriginX = 0;
1058 }
1059
1060 genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses);
1061 anv_cmd_buffer_clear_subpass(cmd_buffer);
1062 }
1063
1064 void genX(CmdNextSubpass)(
1065 VkCommandBuffer commandBuffer,
1066 VkSubpassContents contents)
1067 {
1068 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1069
1070 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1071
1072 anv_cmd_buffer_resolve_subpass(cmd_buffer);
1073 genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
1074 anv_cmd_buffer_clear_subpass(cmd_buffer);
1075 }
1076
1077 void genX(CmdEndRenderPass)(
1078 VkCommandBuffer commandBuffer)
1079 {
1080 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1081
1082 anv_cmd_buffer_resolve_subpass(cmd_buffer);
1083 }
1084
1085 static void
1086 emit_ps_depth_count(struct anv_batch *batch,
1087 struct anv_bo *bo, uint32_t offset)
1088 {
1089 anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) {
1090 pc.DestinationAddressType = DAT_PPGTT;
1091 pc.PostSyncOperation = WritePSDepthCount;
1092 pc.DepthStallEnable = true;
1093 pc.Address = (struct anv_address) { bo, offset };
1094 }
1095 }
1096
1097 static void
1098 emit_query_availability(struct anv_batch *batch,
1099 struct anv_bo *bo, uint32_t offset)
1100 {
1101 anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) {
1102 pc.DestinationAddressType = DAT_PPGTT;
1103 pc.PostSyncOperation = WriteImmediateData;
1104 pc.Address = (struct anv_address) { bo, offset };
1105 pc.ImmediateData = 1;
1106 }
1107 }
1108
1109 void genX(CmdBeginQuery)(
1110 VkCommandBuffer commandBuffer,
1111 VkQueryPool queryPool,
1112 uint32_t query,
1113 VkQueryControlFlags flags)
1114 {
1115 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1116 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1117
1118 /* Workaround: When meta uses the pipeline with the VS disabled, it seems
1119 * that the pipelining of the depth write breaks. What we see is that
1120 * samples from the render pass clear leaks into the first query
1121 * immediately after the clear. Doing a pipecontrol with a post-sync
1122 * operation and DepthStallEnable seems to work around the issue.
1123 */
1124 if (cmd_buffer->state.need_query_wa) {
1125 cmd_buffer->state.need_query_wa = false;
1126 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1127 pc.DepthCacheFlushEnable = true;
1128 pc.DepthStallEnable = true;
1129 }
1130 }
1131
1132 switch (pool->type) {
1133 case VK_QUERY_TYPE_OCCLUSION:
1134 emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
1135 query * sizeof(struct anv_query_pool_slot));
1136 break;
1137
1138 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1139 default:
1140 unreachable("");
1141 }
1142 }
1143
1144 void genX(CmdEndQuery)(
1145 VkCommandBuffer commandBuffer,
1146 VkQueryPool queryPool,
1147 uint32_t query)
1148 {
1149 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1150 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1151
1152 switch (pool->type) {
1153 case VK_QUERY_TYPE_OCCLUSION:
1154 emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
1155 query * sizeof(struct anv_query_pool_slot) + 8);
1156
1157 emit_query_availability(&cmd_buffer->batch, &pool->bo,
1158 query * sizeof(struct anv_query_pool_slot) + 16);
1159 break;
1160
1161 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1162 default:
1163 unreachable("");
1164 }
1165 }
1166
1167 #define TIMESTAMP 0x2358
1168
1169 void genX(CmdWriteTimestamp)(
1170 VkCommandBuffer commandBuffer,
1171 VkPipelineStageFlagBits pipelineStage,
1172 VkQueryPool queryPool,
1173 uint32_t query)
1174 {
1175 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1176 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1177 uint32_t offset = query * sizeof(struct anv_query_pool_slot);
1178
1179 assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
1180
1181 switch (pipelineStage) {
1182 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
1183 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
1184 srm.RegisterAddress = TIMESTAMP;
1185 srm.MemoryAddress = (struct anv_address) { &pool->bo, offset };
1186 }
1187 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
1188 srm.RegisterAddress = TIMESTAMP + 4;
1189 srm.MemoryAddress = (struct anv_address) { &pool->bo, offset + 4 };
1190 }
1191 break;
1192
1193 default:
1194 /* Everything else is bottom-of-pipe */
1195 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1196 pc.DestinationAddressType = DAT_PPGTT,
1197 pc.PostSyncOperation = WriteTimestamp,
1198 pc.Address = (struct anv_address) { &pool->bo, offset };
1199 }
1200 break;
1201 }
1202
1203 emit_query_availability(&cmd_buffer->batch, &pool->bo, query + 16);
1204 }
1205
1206 #if GEN_GEN > 7 || GEN_IS_HASWELL
1207
1208 #define alu_opcode(v) __gen_uint((v), 20, 31)
1209 #define alu_operand1(v) __gen_uint((v), 10, 19)
1210 #define alu_operand2(v) __gen_uint((v), 0, 9)
1211 #define alu(opcode, operand1, operand2) \
1212 alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
1213
1214 #define OPCODE_NOOP 0x000
1215 #define OPCODE_LOAD 0x080
1216 #define OPCODE_LOADINV 0x480
1217 #define OPCODE_LOAD0 0x081
1218 #define OPCODE_LOAD1 0x481
1219 #define OPCODE_ADD 0x100
1220 #define OPCODE_SUB 0x101
1221 #define OPCODE_AND 0x102
1222 #define OPCODE_OR 0x103
1223 #define OPCODE_XOR 0x104
1224 #define OPCODE_STORE 0x180
1225 #define OPCODE_STOREINV 0x580
1226
1227 #define OPERAND_R0 0x00
1228 #define OPERAND_R1 0x01
1229 #define OPERAND_R2 0x02
1230 #define OPERAND_R3 0x03
1231 #define OPERAND_R4 0x04
1232 #define OPERAND_SRCA 0x20
1233 #define OPERAND_SRCB 0x21
1234 #define OPERAND_ACCU 0x31
1235 #define OPERAND_ZF 0x32
1236 #define OPERAND_CF 0x33
1237
1238 #define CS_GPR(n) (0x2600 + (n) * 8)
1239
1240 static void
1241 emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
1242 struct anv_bo *bo, uint32_t offset)
1243 {
1244 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
1245 lrm.RegisterAddress = reg,
1246 lrm.MemoryAddress = (struct anv_address) { bo, offset };
1247 }
1248 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
1249 lrm.RegisterAddress = reg + 4;
1250 lrm.MemoryAddress = (struct anv_address) { bo, offset + 4 };
1251 }
1252 }
1253
1254 static void
1255 store_query_result(struct anv_batch *batch, uint32_t reg,
1256 struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
1257 {
1258 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
1259 srm.RegisterAddress = reg;
1260 srm.MemoryAddress = (struct anv_address) { bo, offset };
1261 }
1262
1263 if (flags & VK_QUERY_RESULT_64_BIT) {
1264 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
1265 srm.RegisterAddress = reg + 4;
1266 srm.MemoryAddress = (struct anv_address) { bo, offset + 4 };
1267 }
1268 }
1269 }
1270
1271 void genX(CmdCopyQueryPoolResults)(
1272 VkCommandBuffer commandBuffer,
1273 VkQueryPool queryPool,
1274 uint32_t firstQuery,
1275 uint32_t queryCount,
1276 VkBuffer destBuffer,
1277 VkDeviceSize destOffset,
1278 VkDeviceSize destStride,
1279 VkQueryResultFlags flags)
1280 {
1281 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1282 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1283 ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
1284 uint32_t slot_offset, dst_offset;
1285
1286 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1287 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1288 pc.CommandStreamerStallEnable = true;
1289 pc.StallAtPixelScoreboard = true;
1290 }
1291 }
1292
1293 dst_offset = buffer->offset + destOffset;
1294 for (uint32_t i = 0; i < queryCount; i++) {
1295
1296 slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
1297 switch (pool->type) {
1298 case VK_QUERY_TYPE_OCCLUSION:
1299 emit_load_alu_reg_u64(&cmd_buffer->batch,
1300 CS_GPR(0), &pool->bo, slot_offset);
1301 emit_load_alu_reg_u64(&cmd_buffer->batch,
1302 CS_GPR(1), &pool->bo, slot_offset + 8);
1303
1304 /* FIXME: We need to clamp the result for 32 bit. */
1305
1306 uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
1307 dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
1308 dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
1309 dw[3] = alu(OPCODE_SUB, 0, 0);
1310 dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
1311 break;
1312
1313 case VK_QUERY_TYPE_TIMESTAMP:
1314 emit_load_alu_reg_u64(&cmd_buffer->batch,
1315 CS_GPR(2), &pool->bo, slot_offset);
1316 break;
1317
1318 default:
1319 unreachable("unhandled query type");
1320 }
1321
1322 store_query_result(&cmd_buffer->batch,
1323 CS_GPR(2), buffer->bo, dst_offset, flags);
1324
1325 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1326 emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
1327 &pool->bo, slot_offset + 16);
1328 if (flags & VK_QUERY_RESULT_64_BIT)
1329 store_query_result(&cmd_buffer->batch,
1330 CS_GPR(0), buffer->bo, dst_offset + 8, flags);
1331 else
1332 store_query_result(&cmd_buffer->batch,
1333 CS_GPR(0), buffer->bo, dst_offset + 4, flags);
1334 }
1335
1336 dst_offset += destStride;
1337 }
1338 }
1339
1340 #endif