932ba650e1a4431b13194c7481279940ea12c662
[mesa.git] / src / intel / vulkan / genX_cmd_buffer.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26
27 #include "anv_private.h"
28
29 #include "genxml/gen_macros.h"
30 #include "genxml/genX_pack.h"
31
32 void
33 genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
34 {
35 struct anv_device *device = cmd_buffer->device;
36 struct anv_bo *scratch_bo = NULL;
37
38 cmd_buffer->state.scratch_size =
39 anv_block_pool_size(&device->scratch_block_pool);
40 if (cmd_buffer->state.scratch_size > 0)
41 scratch_bo = &device->scratch_block_pool.bo;
42
43 /* XXX: Do we need this on more than just BDW? */
44 #if (GEN_GEN >= 8)
45 /* Emit a render target cache flush.
46 *
47 * This isn't documented anywhere in the PRM. However, it seems to be
48 * necessary prior to changing the surface state base adress. Without
49 * this, we get GPU hangs when using multi-level command buffers which
50 * clear depth, reset state base address, and then go render stuff.
51 */
52 anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
53 pc.RenderTargetCacheFlushEnable = true;
54 }
55 #endif
56
57 anv_batch_emit_blk(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) {
58 sba.GeneralStateBaseAddress = (struct anv_address) { scratch_bo, 0 };
59 sba.GeneralStateMemoryObjectControlState = GENX(MOCS);
60 sba.GeneralStateBaseAddressModifyEnable = true;
61
62 sba.SurfaceStateBaseAddress =
63 anv_cmd_buffer_surface_base_address(cmd_buffer);
64 sba.SurfaceStateMemoryObjectControlState = GENX(MOCS);
65 sba.SurfaceStateBaseAddressModifyEnable = true;
66
67 sba.DynamicStateBaseAddress =
68 (struct anv_address) { &device->dynamic_state_block_pool.bo, 0 };
69 sba.DynamicStateMemoryObjectControlState = GENX(MOCS),
70 sba.DynamicStateBaseAddressModifyEnable = true,
71
72 sba.IndirectObjectBaseAddress = (struct anv_address) { NULL, 0 };
73 sba.IndirectObjectMemoryObjectControlState = GENX(MOCS);
74 sba.IndirectObjectBaseAddressModifyEnable = true;
75
76 sba.InstructionBaseAddress =
77 (struct anv_address) { &device->instruction_block_pool.bo, 0 };
78 sba.InstructionMemoryObjectControlState = GENX(MOCS);
79 sba.InstructionBaseAddressModifyEnable = true;
80
81 # if (GEN_GEN >= 8)
82 /* Broadwell requires that we specify a buffer size for a bunch of
83 * these fields. However, since we will be growing the BO's live, we
84 * just set them all to the maximum.
85 */
86 sba.GeneralStateBufferSize = 0xfffff;
87 sba.GeneralStateBufferSizeModifyEnable = true;
88 sba.DynamicStateBufferSize = 0xfffff;
89 sba.DynamicStateBufferSizeModifyEnable = true;
90 sba.IndirectObjectBufferSize = 0xfffff;
91 sba.IndirectObjectBufferSizeModifyEnable = true;
92 sba.InstructionBufferSize = 0xfffff;
93 sba.InstructionBuffersizeModifyEnable = true;
94 # endif
95 }
96
97 /* After re-setting the surface state base address, we have to do some
98 * cache flusing so that the sampler engine will pick up the new
99 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
100 * Shared Function > 3D Sampler > State > State Caching (page 96):
101 *
102 * Coherency with system memory in the state cache, like the texture
103 * cache is handled partially by software. It is expected that the
104 * command stream or shader will issue Cache Flush operation or
105 * Cache_Flush sampler message to ensure that the L1 cache remains
106 * coherent with system memory.
107 *
108 * [...]
109 *
110 * Whenever the value of the Dynamic_State_Base_Addr,
111 * Surface_State_Base_Addr are altered, the L1 state cache must be
112 * invalidated to ensure the new surface or sampler state is fetched
113 * from system memory.
114 *
115 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
116 * which, according the PIPE_CONTROL instruction documentation in the
117 * Broadwell PRM:
118 *
119 * Setting this bit is independent of any other bit in this packet.
120 * This bit controls the invalidation of the L1 and L2 state caches
121 * at the top of the pipe i.e. at the parsing time.
122 *
123 * Unfortunately, experimentation seems to indicate that state cache
124 * invalidation through a PIPE_CONTROL does nothing whatsoever in
125 * regards to surface state and binding tables. In stead, it seems that
126 * invalidating the texture cache is what is actually needed.
127 *
128 * XXX: As far as we have been able to determine through
129 * experimentation, shows that flush the texture cache appears to be
130 * sufficient. The theory here is that all of the sampling/rendering
131 * units cache the binding table in the texture cache. However, we have
132 * yet to be able to actually confirm this.
133 */
134 anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
135 pc.TextureCacheInvalidationEnable = true;
136 }
137 }
138
139 void genX(CmdPipelineBarrier)(
140 VkCommandBuffer commandBuffer,
141 VkPipelineStageFlags srcStageMask,
142 VkPipelineStageFlags destStageMask,
143 VkBool32 byRegion,
144 uint32_t memoryBarrierCount,
145 const VkMemoryBarrier* pMemoryBarriers,
146 uint32_t bufferMemoryBarrierCount,
147 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
148 uint32_t imageMemoryBarrierCount,
149 const VkImageMemoryBarrier* pImageMemoryBarriers)
150 {
151 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
152 uint32_t b, *dw;
153
154 /* XXX: Right now, we're really dumb and just flush whatever categories
155 * the app asks for. One of these days we may make this a bit better
156 * but right now that's all the hardware allows for in most areas.
157 */
158 VkAccessFlags src_flags = 0;
159 VkAccessFlags dst_flags = 0;
160
161 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
162 src_flags |= pMemoryBarriers[i].srcAccessMask;
163 dst_flags |= pMemoryBarriers[i].dstAccessMask;
164 }
165
166 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
167 src_flags |= pBufferMemoryBarriers[i].srcAccessMask;
168 dst_flags |= pBufferMemoryBarriers[i].dstAccessMask;
169 }
170
171 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
172 src_flags |= pImageMemoryBarriers[i].srcAccessMask;
173 dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
174 }
175
176 /* Mask out the Source access flags we care about */
177 const uint32_t src_mask =
178 VK_ACCESS_SHADER_WRITE_BIT |
179 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
180 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
181 VK_ACCESS_TRANSFER_WRITE_BIT;
182
183 src_flags = src_flags & src_mask;
184
185 /* Mask out the destination access flags we care about */
186 const uint32_t dst_mask =
187 VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
188 VK_ACCESS_INDEX_READ_BIT |
189 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
190 VK_ACCESS_UNIFORM_READ_BIT |
191 VK_ACCESS_SHADER_READ_BIT |
192 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
193 VK_ACCESS_TRANSFER_READ_BIT;
194
195 dst_flags = dst_flags & dst_mask;
196
197 /* The src flags represent how things were used previously. This is
198 * what we use for doing flushes.
199 */
200 struct GENX(PIPE_CONTROL) flush_cmd = {
201 GENX(PIPE_CONTROL_header),
202 .PostSyncOperation = NoWrite,
203 };
204
205 for_each_bit(b, src_flags) {
206 switch ((VkAccessFlagBits)(1 << b)) {
207 case VK_ACCESS_SHADER_WRITE_BIT:
208 flush_cmd.DCFlushEnable = true;
209 break;
210 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
211 flush_cmd.RenderTargetCacheFlushEnable = true;
212 break;
213 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
214 flush_cmd.DepthCacheFlushEnable = true;
215 break;
216 case VK_ACCESS_TRANSFER_WRITE_BIT:
217 flush_cmd.RenderTargetCacheFlushEnable = true;
218 flush_cmd.DepthCacheFlushEnable = true;
219 break;
220 default:
221 unreachable("should've masked this out by now");
222 }
223 }
224
225 /* If we end up doing two PIPE_CONTROLs, the first, flusing one also has to
226 * stall and wait for the flushing to finish, so we don't re-dirty the
227 * caches with in-flight rendering after the second PIPE_CONTROL
228 * invalidates.
229 */
230
231 if (dst_flags)
232 flush_cmd.CommandStreamerStallEnable = true;
233
234 if (src_flags && dst_flags) {
235 dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
236 GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &flush_cmd);
237 }
238
239 /* The dst flags represent how things will be used in the future. This
240 * is what we use for doing cache invalidations.
241 */
242 struct GENX(PIPE_CONTROL) invalidate_cmd = {
243 GENX(PIPE_CONTROL_header),
244 .PostSyncOperation = NoWrite,
245 };
246
247 for_each_bit(b, dst_flags) {
248 switch ((VkAccessFlagBits)(1 << b)) {
249 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
250 case VK_ACCESS_INDEX_READ_BIT:
251 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
252 invalidate_cmd.VFCacheInvalidationEnable = true;
253 break;
254 case VK_ACCESS_UNIFORM_READ_BIT:
255 invalidate_cmd.ConstantCacheInvalidationEnable = true;
256 /* fallthrough */
257 case VK_ACCESS_SHADER_READ_BIT:
258 invalidate_cmd.TextureCacheInvalidationEnable = true;
259 break;
260 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
261 invalidate_cmd.TextureCacheInvalidationEnable = true;
262 break;
263 case VK_ACCESS_TRANSFER_READ_BIT:
264 invalidate_cmd.TextureCacheInvalidationEnable = true;
265 break;
266 default:
267 unreachable("should've masked this out by now");
268 }
269 }
270
271 if (dst_flags) {
272 dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
273 GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &invalidate_cmd);
274 }
275 }
276
277 static uint32_t
278 cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
279 {
280 static const uint32_t push_constant_opcodes[] = {
281 [MESA_SHADER_VERTEX] = 21,
282 [MESA_SHADER_TESS_CTRL] = 25, /* HS */
283 [MESA_SHADER_TESS_EVAL] = 26, /* DS */
284 [MESA_SHADER_GEOMETRY] = 22,
285 [MESA_SHADER_FRAGMENT] = 23,
286 [MESA_SHADER_COMPUTE] = 0,
287 };
288
289 VkShaderStageFlags flushed = 0;
290
291 anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) {
292 if (stage == MESA_SHADER_COMPUTE)
293 continue;
294
295 struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
296
297 if (state.offset == 0) {
298 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS),
299 ._3DCommandSubOpcode = push_constant_opcodes[stage]);
300 } else {
301 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS),
302 ._3DCommandSubOpcode = push_constant_opcodes[stage],
303 .ConstantBody = {
304 #if GEN_GEN >= 9
305 .PointerToConstantBuffer2 = { &cmd_buffer->device->dynamic_state_block_pool.bo, state.offset },
306 .ConstantBuffer2ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
307 #else
308 .PointerToConstantBuffer0 = { .offset = state.offset },
309 .ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
310 #endif
311 });
312 }
313
314 flushed |= mesa_to_vk_shader_stage(stage);
315 }
316
317 cmd_buffer->state.push_constants_dirty &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
318
319 return flushed;
320 }
321
322 void
323 genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
324 {
325 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
326 uint32_t *p;
327
328 uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
329
330 assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
331
332 #if GEN_GEN >= 8
333 /* FIXME (jason): Currently, the config_l3 function causes problems on
334 * Haswell and prior if you have a kernel older than 4.4. In order to
335 * work, it requires a couple of registers be white-listed in the
336 * command parser and they weren't added until 4.4. What we should do
337 * is check the command parser version and make it a no-op if your
338 * command parser is either off or too old. Compute won't work 100%,
339 * but at least 3-D will. In the mean time, I'm going to make this
340 * gen8+ only so that we can get Haswell working again.
341 */
342 genX(cmd_buffer_config_l3)(cmd_buffer, false);
343 #endif
344
345 genX(flush_pipeline_select_3d)(cmd_buffer);
346
347 if (vb_emit) {
348 const uint32_t num_buffers = __builtin_popcount(vb_emit);
349 const uint32_t num_dwords = 1 + num_buffers * 4;
350
351 p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
352 GENX(3DSTATE_VERTEX_BUFFERS));
353 uint32_t vb, i = 0;
354 for_each_bit(vb, vb_emit) {
355 struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
356 uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
357
358 struct GENX(VERTEX_BUFFER_STATE) state = {
359 .VertexBufferIndex = vb,
360
361 #if GEN_GEN >= 8
362 .MemoryObjectControlState = GENX(MOCS),
363 #else
364 .BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA,
365 .InstanceDataStepRate = 1,
366 .VertexBufferMemoryObjectControlState = GENX(MOCS),
367 #endif
368
369 .AddressModifyEnable = true,
370 .BufferPitch = pipeline->binding_stride[vb],
371 .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
372
373 #if GEN_GEN >= 8
374 .BufferSize = buffer->size - offset
375 #else
376 .EndAddress = { buffer->bo, buffer->offset + buffer->size - 1},
377 #endif
378 };
379
380 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
381 i++;
382 }
383 }
384
385 cmd_buffer->state.vb_dirty &= ~vb_emit;
386
387 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
388 /* If somebody compiled a pipeline after starting a command buffer the
389 * scratch bo may have grown since we started this cmd buffer (and
390 * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now,
391 * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
392 if (cmd_buffer->state.scratch_size < pipeline->total_scratch)
393 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
394
395 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
396
397 /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
398 *
399 * "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
400 * the next 3DPRIMITIVE command after programming the
401 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
402 *
403 * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
404 * pipeline setup, we need to dirty push constants.
405 */
406 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
407 }
408
409 #if GEN_GEN <= 7
410 if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT ||
411 cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) {
412 /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
413 *
414 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
415 * stall needs to be sent just prior to any 3DSTATE_VS,
416 * 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
417 * 3DSTATE_BINDING_TABLE_POINTER_VS,
418 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one
419 * PIPE_CONTROL needs to be sent before any combination of VS
420 * associated 3DSTATE."
421 */
422 anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
423 pc.DepthStallEnable = true;
424 pc.PostSyncOperation = WriteImmediateData;
425 pc.Address =
426 (struct anv_address) { &cmd_buffer->device->workaround_bo, 0 };
427 }
428 }
429 #endif
430
431 /* We emit the binding tables and sampler tables first, then emit push
432 * constants and then finally emit binding table and sampler table
433 * pointers. It has to happen in this order, since emitting the binding
434 * tables may change the push constants (in case of storage images). After
435 * emitting push constants, on SKL+ we have to emit the corresponding
436 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
437 */
438 uint32_t dirty = 0;
439 if (cmd_buffer->state.descriptors_dirty)
440 dirty = gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer);
441
442 if (cmd_buffer->state.push_constants_dirty) {
443 #if GEN_GEN >= 9
444 /* On Sky Lake and later, the binding table pointers commands are
445 * what actually flush the changes to push constant state so we need
446 * to dirty them so they get re-emitted below.
447 */
448 dirty |= cmd_buffer_flush_push_constants(cmd_buffer);
449 #else
450 cmd_buffer_flush_push_constants(cmd_buffer);
451 #endif
452 }
453
454 if (dirty)
455 gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
456
457 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
458 gen8_cmd_buffer_emit_viewport(cmd_buffer);
459
460 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
461 gen7_cmd_buffer_emit_scissor(cmd_buffer);
462
463 genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
464 }
465
466 static void
467 emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
468 struct anv_bo *bo, uint32_t offset)
469 {
470 uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
471 GENX(3DSTATE_VERTEX_BUFFERS));
472
473 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
474 &(struct GENX(VERTEX_BUFFER_STATE)) {
475 .VertexBufferIndex = 32, /* Reserved for this */
476 .AddressModifyEnable = true,
477 .BufferPitch = 0,
478 #if (GEN_GEN >= 8)
479 .MemoryObjectControlState = GENX(MOCS),
480 .BufferStartingAddress = { bo, offset },
481 .BufferSize = 8
482 #else
483 .VertexBufferMemoryObjectControlState = GENX(MOCS),
484 .BufferStartingAddress = { bo, offset },
485 .EndAddress = { bo, offset + 8 },
486 #endif
487 });
488 }
489
490 static void
491 emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
492 uint32_t base_vertex, uint32_t base_instance)
493 {
494 struct anv_state id_state =
495 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 8, 4);
496
497 ((uint32_t *)id_state.map)[0] = base_vertex;
498 ((uint32_t *)id_state.map)[1] = base_instance;
499
500 if (!cmd_buffer->device->info.has_llc)
501 anv_state_clflush(id_state);
502
503 emit_base_vertex_instance_bo(cmd_buffer,
504 &cmd_buffer->device->dynamic_state_block_pool.bo, id_state.offset);
505 }
506
507 void genX(CmdDraw)(
508 VkCommandBuffer commandBuffer,
509 uint32_t vertexCount,
510 uint32_t instanceCount,
511 uint32_t firstVertex,
512 uint32_t firstInstance)
513 {
514 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
515 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
516 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
517
518 genX(cmd_buffer_flush_state)(cmd_buffer);
519
520 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
521 emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
522
523 anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
524 prim.VertexAccessType = SEQUENTIAL;
525 prim.PrimitiveTopologyType = pipeline->topology;
526 prim.VertexCountPerInstance = vertexCount;
527 prim.StartVertexLocation = firstVertex;
528 prim.InstanceCount = instanceCount;
529 prim.StartInstanceLocation = firstInstance;
530 prim.BaseVertexLocation = 0;
531 }
532 }
533
534 void genX(CmdDrawIndexed)(
535 VkCommandBuffer commandBuffer,
536 uint32_t indexCount,
537 uint32_t instanceCount,
538 uint32_t firstIndex,
539 int32_t vertexOffset,
540 uint32_t firstInstance)
541 {
542 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
543 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
544 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
545
546 genX(cmd_buffer_flush_state)(cmd_buffer);
547
548 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
549 emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
550
551 anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
552 prim.VertexAccessType = RANDOM;
553 prim.PrimitiveTopologyType = pipeline->topology;
554 prim.VertexCountPerInstance = indexCount;
555 prim.StartVertexLocation = firstIndex;
556 prim.InstanceCount = instanceCount;
557 prim.StartInstanceLocation = firstInstance;
558 prim.BaseVertexLocation = vertexOffset;
559 }
560 }
561
562 /* Auto-Draw / Indirect Registers */
563 #define GEN7_3DPRIM_END_OFFSET 0x2420
564 #define GEN7_3DPRIM_START_VERTEX 0x2430
565 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434
566 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
567 #define GEN7_3DPRIM_START_INSTANCE 0x243C
568 #define GEN7_3DPRIM_BASE_VERTEX 0x2440
569
570 static void
571 emit_lrm(struct anv_batch *batch,
572 uint32_t reg, struct anv_bo *bo, uint32_t offset)
573 {
574 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
575 .RegisterAddress = reg,
576 .MemoryAddress = { bo, offset });
577 }
578
579 static void
580 emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
581 {
582 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM),
583 .RegisterOffset = reg,
584 .DataDWord = imm);
585 }
586
587 void genX(CmdDrawIndirect)(
588 VkCommandBuffer commandBuffer,
589 VkBuffer _buffer,
590 VkDeviceSize offset,
591 uint32_t drawCount,
592 uint32_t stride)
593 {
594 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
595 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
596 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
597 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
598 struct anv_bo *bo = buffer->bo;
599 uint32_t bo_offset = buffer->offset + offset;
600
601 genX(cmd_buffer_flush_state)(cmd_buffer);
602
603 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
604 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
605
606 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
607 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
608 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
609 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
610 emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
611
612 anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
613 prim.IndirectParameterEnable = true;
614 prim.VertexAccessType = SEQUENTIAL;
615 prim.PrimitiveTopologyType = pipeline->topology;
616 }
617 }
618
619 void genX(CmdDrawIndexedIndirect)(
620 VkCommandBuffer commandBuffer,
621 VkBuffer _buffer,
622 VkDeviceSize offset,
623 uint32_t drawCount,
624 uint32_t stride)
625 {
626 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
627 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
628 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
629 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
630 struct anv_bo *bo = buffer->bo;
631 uint32_t bo_offset = buffer->offset + offset;
632
633 genX(cmd_buffer_flush_state)(cmd_buffer);
634
635 /* TODO: We need to stomp base vertex to 0 somehow */
636 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
637 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
638
639 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
640 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
641 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
642 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
643 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
644
645 anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
646 prim.IndirectParameterEnable = true;
647 prim.VertexAccessType = RANDOM;
648 prim.PrimitiveTopologyType = pipeline->topology;
649 }
650 }
651
652 #if GEN_GEN == 7
653
654 static bool
655 verify_cmd_parser(const struct anv_device *device,
656 int required_version,
657 const char *function)
658 {
659 if (device->instance->physicalDevice.cmd_parser_version < required_version) {
660 vk_errorf(VK_ERROR_FEATURE_NOT_PRESENT,
661 "cmd parser version %d is required for %s",
662 required_version, function);
663 return false;
664 } else {
665 return true;
666 }
667 }
668
669 #endif
670
671 void genX(CmdDispatch)(
672 VkCommandBuffer commandBuffer,
673 uint32_t x,
674 uint32_t y,
675 uint32_t z)
676 {
677 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
678 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
679 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
680
681 if (prog_data->uses_num_work_groups) {
682 struct anv_state state =
683 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 12, 4);
684 uint32_t *sizes = state.map;
685 sizes[0] = x;
686 sizes[1] = y;
687 sizes[2] = z;
688 if (!cmd_buffer->device->info.has_llc)
689 anv_state_clflush(state);
690 cmd_buffer->state.num_workgroups_offset = state.offset;
691 cmd_buffer->state.num_workgroups_bo =
692 &cmd_buffer->device->dynamic_state_block_pool.bo;
693 }
694
695 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
696
697 anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER),
698 .SIMDSize = prog_data->simd_size / 16,
699 .ThreadDepthCounterMaximum = 0,
700 .ThreadHeightCounterMaximum = 0,
701 .ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1,
702 .ThreadGroupIDXDimension = x,
703 .ThreadGroupIDYDimension = y,
704 .ThreadGroupIDZDimension = z,
705 .RightExecutionMask = pipeline->cs_right_mask,
706 .BottomExecutionMask = 0xffffffff);
707
708 anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH));
709 }
710
711 #define GPGPU_DISPATCHDIMX 0x2500
712 #define GPGPU_DISPATCHDIMY 0x2504
713 #define GPGPU_DISPATCHDIMZ 0x2508
714
715 #define MI_PREDICATE_SRC0 0x2400
716 #define MI_PREDICATE_SRC1 0x2408
717
718 void genX(CmdDispatchIndirect)(
719 VkCommandBuffer commandBuffer,
720 VkBuffer _buffer,
721 VkDeviceSize offset)
722 {
723 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
724 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
725 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
726 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
727 struct anv_bo *bo = buffer->bo;
728 uint32_t bo_offset = buffer->offset + offset;
729 struct anv_batch *batch = &cmd_buffer->batch;
730
731 #if GEN_GEN == 7
732 /* Linux 4.4 added command parser version 5 which allows the GPGPU
733 * indirect dispatch registers to be written.
734 */
735 if (!verify_cmd_parser(cmd_buffer->device, 5, "vkCmdDispatchIndirect"))
736 return;
737 #endif
738
739 if (prog_data->uses_num_work_groups) {
740 cmd_buffer->state.num_workgroups_offset = bo_offset;
741 cmd_buffer->state.num_workgroups_bo = bo;
742 }
743
744 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
745
746 emit_lrm(batch, GPGPU_DISPATCHDIMX, bo, bo_offset);
747 emit_lrm(batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4);
748 emit_lrm(batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8);
749
750 #if GEN_GEN <= 7
751 /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
752 emit_lri(batch, MI_PREDICATE_SRC0 + 4, 0);
753 emit_lri(batch, MI_PREDICATE_SRC1 + 0, 0);
754 emit_lri(batch, MI_PREDICATE_SRC1 + 4, 0);
755
756 /* Load compute_dispatch_indirect_x_size into SRC0 */
757 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0);
758
759 /* predicate = (compute_dispatch_indirect_x_size == 0); */
760 anv_batch_emit(batch, GENX(MI_PREDICATE),
761 .LoadOperation = LOAD_LOAD,
762 .CombineOperation = COMBINE_SET,
763 .CompareOperation = COMPARE_SRCS_EQUAL);
764
765 /* Load compute_dispatch_indirect_y_size into SRC0 */
766 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4);
767
768 /* predicate |= (compute_dispatch_indirect_y_size == 0); */
769 anv_batch_emit(batch, GENX(MI_PREDICATE),
770 .LoadOperation = LOAD_LOAD,
771 .CombineOperation = COMBINE_OR,
772 .CompareOperation = COMPARE_SRCS_EQUAL);
773
774 /* Load compute_dispatch_indirect_z_size into SRC0 */
775 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8);
776
777 /* predicate |= (compute_dispatch_indirect_z_size == 0); */
778 anv_batch_emit(batch, GENX(MI_PREDICATE),
779 .LoadOperation = LOAD_LOAD,
780 .CombineOperation = COMBINE_OR,
781 .CompareOperation = COMPARE_SRCS_EQUAL);
782
783 /* predicate = !predicate; */
784 #define COMPARE_FALSE 1
785 anv_batch_emit(batch, GENX(MI_PREDICATE),
786 .LoadOperation = LOAD_LOADINV,
787 .CombineOperation = COMBINE_OR,
788 .CompareOperation = COMPARE_FALSE);
789 #endif
790
791 anv_batch_emit(batch, GENX(GPGPU_WALKER),
792 .IndirectParameterEnable = true,
793 .PredicateEnable = GEN_GEN <= 7,
794 .SIMDSize = prog_data->simd_size / 16,
795 .ThreadDepthCounterMaximum = 0,
796 .ThreadHeightCounterMaximum = 0,
797 .ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1,
798 .RightExecutionMask = pipeline->cs_right_mask,
799 .BottomExecutionMask = 0xffffffff);
800
801 anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH));
802 }
803
804 static void
805 flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer,
806 uint32_t pipeline)
807 {
808 #if GEN_GEN >= 8 && GEN_GEN < 10
809 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
810 *
811 * Software must clear the COLOR_CALC_STATE Valid field in
812 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
813 * with Pipeline Select set to GPGPU.
814 *
815 * The internal hardware docs recommend the same workaround for Gen9
816 * hardware too.
817 */
818 if (pipeline == GPGPU)
819 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS));
820 #elif GEN_GEN <= 7
821 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
822 * PIPELINE_SELECT [DevBWR+]":
823 *
824 * Project: DEVSNB+
825 *
826 * Software must ensure all the write caches are flushed through a
827 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
828 * command to invalidate read only caches prior to programming
829 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
830 */
831 anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
832 pc.RenderTargetCacheFlushEnable = true;
833 pc.DepthCacheFlushEnable = true;
834 pc.DCFlushEnable = true;
835 pc.PostSyncOperation = NoWrite;
836 pc.CommandStreamerStallEnable = true;
837 }
838
839 anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
840 pc.TextureCacheInvalidationEnable = true;
841 pc.ConstantCacheInvalidationEnable = true;
842 pc.StateCacheInvalidationEnable = true;
843 pc.InstructionCacheInvalidateEnable = true;
844 pc.PostSyncOperation = NoWrite;
845 }
846 #endif
847 }
848
849 void
850 genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
851 {
852 if (cmd_buffer->state.current_pipeline != _3D) {
853 flush_pipeline_before_pipeline_select(cmd_buffer, _3D);
854
855 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT),
856 #if GEN_GEN >= 9
857 .MaskBits = 3,
858 #endif
859 .PipelineSelection = _3D);
860 cmd_buffer->state.current_pipeline = _3D;
861 }
862 }
863
864 void
865 genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
866 {
867 if (cmd_buffer->state.current_pipeline != GPGPU) {
868 flush_pipeline_before_pipeline_select(cmd_buffer, GPGPU);
869
870 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT),
871 #if GEN_GEN >= 9
872 .MaskBits = 3,
873 #endif
874 .PipelineSelection = GPGPU);
875 cmd_buffer->state.current_pipeline = GPGPU;
876 }
877 }
878
879 struct anv_state
880 genX(cmd_buffer_alloc_null_surface_state)(struct anv_cmd_buffer *cmd_buffer,
881 struct anv_framebuffer *fb)
882 {
883 struct anv_state state =
884 anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
885
886 struct GENX(RENDER_SURFACE_STATE) null_ss = {
887 .SurfaceType = SURFTYPE_NULL,
888 .SurfaceArray = fb->layers > 0,
889 .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
890 #if GEN_GEN >= 8
891 .TileMode = YMAJOR,
892 #else
893 .TiledSurface = true,
894 #endif
895 .Width = fb->width - 1,
896 .Height = fb->height - 1,
897 .Depth = fb->layers - 1,
898 .RenderTargetViewExtent = fb->layers - 1,
899 };
900
901 GENX(RENDER_SURFACE_STATE_pack)(NULL, state.map, &null_ss);
902
903 if (!cmd_buffer->device->info.has_llc)
904 anv_state_clflush(state);
905
906 return state;
907 }
908
909 static void
910 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
911 {
912 struct anv_device *device = cmd_buffer->device;
913 const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
914 const struct anv_image_view *iview =
915 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
916 const struct anv_image *image = iview ? iview->image : NULL;
917 const struct anv_format *anv_format =
918 iview ? anv_format_for_vk_format(iview->vk_format) : NULL;
919 const bool has_depth = iview && anv_format->has_depth;
920 const bool has_stencil = iview && anv_format->has_stencil;
921
922 /* FIXME: Implement the PMA stall W/A */
923 /* FIXME: Width and Height are wrong */
924
925 /* Emit 3DSTATE_DEPTH_BUFFER */
926 if (has_depth) {
927 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER),
928 .SurfaceType = SURFTYPE_2D,
929 .DepthWriteEnable = true,
930 .StencilWriteEnable = has_stencil,
931 .HierarchicalDepthBufferEnable = false,
932 .SurfaceFormat = isl_surf_get_depth_format(&device->isl_dev,
933 &image->depth_surface.isl),
934 .SurfacePitch = image->depth_surface.isl.row_pitch - 1,
935 .SurfaceBaseAddress = {
936 .bo = image->bo,
937 .offset = image->offset + image->depth_surface.offset,
938 },
939 .Height = fb->height - 1,
940 .Width = fb->width - 1,
941 .LOD = 0,
942 .Depth = 1 - 1,
943 .MinimumArrayElement = 0,
944 .DepthBufferObjectControlState = GENX(MOCS),
945 #if GEN_GEN >= 8
946 .SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2,
947 #endif
948 .RenderTargetViewExtent = 1 - 1);
949 } else {
950 /* Even when no depth buffer is present, the hardware requires that
951 * 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
952 *
953 * If a null depth buffer is bound, the driver must instead bind depth as:
954 * 3DSTATE_DEPTH.SurfaceType = SURFTYPE_2D
955 * 3DSTATE_DEPTH.Width = 1
956 * 3DSTATE_DEPTH.Height = 1
957 * 3DSTATE_DEPTH.SuraceFormat = D16_UNORM
958 * 3DSTATE_DEPTH.SurfaceBaseAddress = 0
959 * 3DSTATE_DEPTH.HierarchicalDepthBufferEnable = 0
960 * 3DSTATE_WM_DEPTH_STENCIL.DepthTestEnable = 0
961 * 3DSTATE_WM_DEPTH_STENCIL.DepthBufferWriteEnable = 0
962 *
963 * The PRM is wrong, though. The width and height must be programmed to
964 * actual framebuffer's width and height, even when neither depth buffer
965 * nor stencil buffer is present. Also, D16_UNORM is not allowed to
966 * be combined with a stencil buffer so we use D32_FLOAT instead.
967 */
968 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER),
969 .SurfaceType = SURFTYPE_2D,
970 .SurfaceFormat = D32_FLOAT,
971 .Width = fb->width - 1,
972 .Height = fb->height - 1,
973 .StencilWriteEnable = has_stencil);
974 }
975
976 /* Emit 3DSTATE_STENCIL_BUFFER */
977 if (has_stencil) {
978 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER),
979 #if GEN_GEN >= 8 || GEN_IS_HASWELL
980 .StencilBufferEnable = true,
981 #endif
982 .StencilBufferObjectControlState = GENX(MOCS),
983
984 /* Stencil buffers have strange pitch. The PRM says:
985 *
986 * The pitch must be set to 2x the value computed based on width,
987 * as the stencil buffer is stored with two rows interleaved.
988 */
989 .SurfacePitch = 2 * image->stencil_surface.isl.row_pitch - 1,
990
991 #if GEN_GEN >= 8
992 .SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2,
993 #endif
994 .SurfaceBaseAddress = {
995 .bo = image->bo,
996 .offset = image->offset + image->stencil_surface.offset,
997 });
998 } else {
999 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER));
1000 }
1001
1002 /* Disable hierarchial depth buffers. */
1003 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER));
1004
1005 /* Clear the clear params. */
1006 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS));
1007 }
1008
1009 /**
1010 * @see anv_cmd_buffer_set_subpass()
1011 */
1012 void
1013 genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
1014 struct anv_subpass *subpass)
1015 {
1016 cmd_buffer->state.subpass = subpass;
1017
1018 cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
1019
1020 cmd_buffer_emit_depth_stencil(cmd_buffer);
1021 }
1022
1023 void genX(CmdBeginRenderPass)(
1024 VkCommandBuffer commandBuffer,
1025 const VkRenderPassBeginInfo* pRenderPassBegin,
1026 VkSubpassContents contents)
1027 {
1028 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1029 ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
1030 ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
1031
1032 cmd_buffer->state.framebuffer = framebuffer;
1033 cmd_buffer->state.pass = pass;
1034 anv_cmd_state_setup_attachments(cmd_buffer, pRenderPassBegin);
1035
1036 genX(flush_pipeline_select_3d)(cmd_buffer);
1037
1038 const VkRect2D *render_area = &pRenderPassBegin->renderArea;
1039
1040 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DRAWING_RECTANGLE),
1041 .ClippedDrawingRectangleYMin = MAX2(render_area->offset.y, 0),
1042 .ClippedDrawingRectangleXMin = MAX2(render_area->offset.x, 0),
1043 .ClippedDrawingRectangleYMax =
1044 render_area->offset.y + render_area->extent.height - 1,
1045 .ClippedDrawingRectangleXMax =
1046 render_area->offset.x + render_area->extent.width - 1,
1047 .DrawingRectangleOriginY = 0,
1048 .DrawingRectangleOriginX = 0);
1049
1050 genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses);
1051 anv_cmd_buffer_clear_subpass(cmd_buffer);
1052 }
1053
1054 void genX(CmdNextSubpass)(
1055 VkCommandBuffer commandBuffer,
1056 VkSubpassContents contents)
1057 {
1058 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1059
1060 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1061
1062 anv_cmd_buffer_resolve_subpass(cmd_buffer);
1063 genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
1064 anv_cmd_buffer_clear_subpass(cmd_buffer);
1065 }
1066
1067 void genX(CmdEndRenderPass)(
1068 VkCommandBuffer commandBuffer)
1069 {
1070 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1071
1072 anv_cmd_buffer_resolve_subpass(cmd_buffer);
1073 }
1074
1075 static void
1076 emit_ps_depth_count(struct anv_batch *batch,
1077 struct anv_bo *bo, uint32_t offset)
1078 {
1079 anv_batch_emit_blk(batch, GENX(PIPE_CONTROL), pc) {
1080 pc.DestinationAddressType = DAT_PPGTT;
1081 pc.PostSyncOperation = WritePSDepthCount;
1082 pc.DepthStallEnable = true;
1083 pc.Address = (struct anv_address) { bo, offset };
1084 }
1085 }
1086
1087 static void
1088 emit_query_availability(struct anv_batch *batch,
1089 struct anv_bo *bo, uint32_t offset)
1090 {
1091 anv_batch_emit_blk(batch, GENX(PIPE_CONTROL), pc) {
1092 pc.DestinationAddressType = DAT_PPGTT;
1093 pc.PostSyncOperation = WriteImmediateData;
1094 pc.Address = (struct anv_address) { bo, offset };
1095 pc.ImmediateData = 1;
1096 }
1097 }
1098
1099 void genX(CmdBeginQuery)(
1100 VkCommandBuffer commandBuffer,
1101 VkQueryPool queryPool,
1102 uint32_t query,
1103 VkQueryControlFlags flags)
1104 {
1105 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1106 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1107
1108 /* Workaround: When meta uses the pipeline with the VS disabled, it seems
1109 * that the pipelining of the depth write breaks. What we see is that
1110 * samples from the render pass clear leaks into the first query
1111 * immediately after the clear. Doing a pipecontrol with a post-sync
1112 * operation and DepthStallEnable seems to work around the issue.
1113 */
1114 if (cmd_buffer->state.need_query_wa) {
1115 cmd_buffer->state.need_query_wa = false;
1116 anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1117 pc.DepthCacheFlushEnable = true;
1118 pc.DepthStallEnable = true;
1119 }
1120 }
1121
1122 switch (pool->type) {
1123 case VK_QUERY_TYPE_OCCLUSION:
1124 emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
1125 query * sizeof(struct anv_query_pool_slot));
1126 break;
1127
1128 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1129 default:
1130 unreachable("");
1131 }
1132 }
1133
1134 void genX(CmdEndQuery)(
1135 VkCommandBuffer commandBuffer,
1136 VkQueryPool queryPool,
1137 uint32_t query)
1138 {
1139 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1140 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1141
1142 switch (pool->type) {
1143 case VK_QUERY_TYPE_OCCLUSION:
1144 emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
1145 query * sizeof(struct anv_query_pool_slot) + 8);
1146
1147 emit_query_availability(&cmd_buffer->batch, &pool->bo,
1148 query * sizeof(struct anv_query_pool_slot) + 16);
1149 break;
1150
1151 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1152 default:
1153 unreachable("");
1154 }
1155 }
1156
1157 #define TIMESTAMP 0x2358
1158
1159 void genX(CmdWriteTimestamp)(
1160 VkCommandBuffer commandBuffer,
1161 VkPipelineStageFlagBits pipelineStage,
1162 VkQueryPool queryPool,
1163 uint32_t query)
1164 {
1165 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1166 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1167 uint32_t offset = query * sizeof(struct anv_query_pool_slot);
1168
1169 assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
1170
1171 switch (pipelineStage) {
1172 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
1173 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
1174 .RegisterAddress = TIMESTAMP,
1175 .MemoryAddress = { &pool->bo, offset });
1176 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
1177 .RegisterAddress = TIMESTAMP + 4,
1178 .MemoryAddress = { &pool->bo, offset + 4 });
1179 break;
1180
1181 default:
1182 /* Everything else is bottom-of-pipe */
1183 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
1184 .DestinationAddressType = DAT_PPGTT,
1185 .PostSyncOperation = WriteTimestamp,
1186 .Address = { &pool->bo, offset });
1187 break;
1188 }
1189
1190 emit_query_availability(&cmd_buffer->batch, &pool->bo, query + 16);
1191 }
1192
1193 #if GEN_GEN > 7 || GEN_IS_HASWELL
1194
1195 #define alu_opcode(v) __gen_uint((v), 20, 31)
1196 #define alu_operand1(v) __gen_uint((v), 10, 19)
1197 #define alu_operand2(v) __gen_uint((v), 0, 9)
1198 #define alu(opcode, operand1, operand2) \
1199 alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
1200
1201 #define OPCODE_NOOP 0x000
1202 #define OPCODE_LOAD 0x080
1203 #define OPCODE_LOADINV 0x480
1204 #define OPCODE_LOAD0 0x081
1205 #define OPCODE_LOAD1 0x481
1206 #define OPCODE_ADD 0x100
1207 #define OPCODE_SUB 0x101
1208 #define OPCODE_AND 0x102
1209 #define OPCODE_OR 0x103
1210 #define OPCODE_XOR 0x104
1211 #define OPCODE_STORE 0x180
1212 #define OPCODE_STOREINV 0x580
1213
1214 #define OPERAND_R0 0x00
1215 #define OPERAND_R1 0x01
1216 #define OPERAND_R2 0x02
1217 #define OPERAND_R3 0x03
1218 #define OPERAND_R4 0x04
1219 #define OPERAND_SRCA 0x20
1220 #define OPERAND_SRCB 0x21
1221 #define OPERAND_ACCU 0x31
1222 #define OPERAND_ZF 0x32
1223 #define OPERAND_CF 0x33
1224
1225 #define CS_GPR(n) (0x2600 + (n) * 8)
1226
1227 static void
1228 emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
1229 struct anv_bo *bo, uint32_t offset)
1230 {
1231 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
1232 .RegisterAddress = reg,
1233 .MemoryAddress = { bo, offset });
1234 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
1235 .RegisterAddress = reg + 4,
1236 .MemoryAddress = { bo, offset + 4 });
1237 }
1238
1239 static void
1240 store_query_result(struct anv_batch *batch, uint32_t reg,
1241 struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
1242 {
1243 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM),
1244 .RegisterAddress = reg,
1245 .MemoryAddress = { bo, offset });
1246
1247 if (flags & VK_QUERY_RESULT_64_BIT)
1248 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM),
1249 .RegisterAddress = reg + 4,
1250 .MemoryAddress = { bo, offset + 4 });
1251 }
1252
1253 void genX(CmdCopyQueryPoolResults)(
1254 VkCommandBuffer commandBuffer,
1255 VkQueryPool queryPool,
1256 uint32_t firstQuery,
1257 uint32_t queryCount,
1258 VkBuffer destBuffer,
1259 VkDeviceSize destOffset,
1260 VkDeviceSize destStride,
1261 VkQueryResultFlags flags)
1262 {
1263 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1264 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1265 ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
1266 uint32_t slot_offset, dst_offset;
1267
1268 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1269 anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1270 pc.CommandStreamerStallEnable = true;
1271 pc.StallAtPixelScoreboard = true;
1272 }
1273 }
1274
1275 dst_offset = buffer->offset + destOffset;
1276 for (uint32_t i = 0; i < queryCount; i++) {
1277
1278 slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
1279 switch (pool->type) {
1280 case VK_QUERY_TYPE_OCCLUSION:
1281 emit_load_alu_reg_u64(&cmd_buffer->batch,
1282 CS_GPR(0), &pool->bo, slot_offset);
1283 emit_load_alu_reg_u64(&cmd_buffer->batch,
1284 CS_GPR(1), &pool->bo, slot_offset + 8);
1285
1286 /* FIXME: We need to clamp the result for 32 bit. */
1287
1288 uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
1289 dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
1290 dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
1291 dw[3] = alu(OPCODE_SUB, 0, 0);
1292 dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
1293 break;
1294
1295 case VK_QUERY_TYPE_TIMESTAMP:
1296 emit_load_alu_reg_u64(&cmd_buffer->batch,
1297 CS_GPR(2), &pool->bo, slot_offset);
1298 break;
1299
1300 default:
1301 unreachable("unhandled query type");
1302 }
1303
1304 store_query_result(&cmd_buffer->batch,
1305 CS_GPR(2), buffer->bo, dst_offset, flags);
1306
1307 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1308 emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
1309 &pool->bo, slot_offset + 16);
1310 if (flags & VK_QUERY_RESULT_64_BIT)
1311 store_query_result(&cmd_buffer->batch,
1312 CS_GPR(0), buffer->bo, dst_offset + 8, flags);
1313 else
1314 store_query_result(&cmd_buffer->batch,
1315 CS_GPR(0), buffer->bo, dst_offset + 4, flags);
1316 }
1317
1318 dst_offset += destStride;
1319 }
1320 }
1321
1322 #endif