anv: Store prog data in pipeline cache stream
[mesa.git] / src / intel / vulkan / genX_cmd_buffer.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26
27 #include "anv_private.h"
28
29 #include "genxml/gen_macros.h"
30 #include "genxml/genX_pack.h"
31
32 void
33 genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
34 {
35 struct anv_device *device = cmd_buffer->device;
36 struct anv_bo *scratch_bo = NULL;
37
38 cmd_buffer->state.scratch_size =
39 anv_block_pool_size(&device->scratch_block_pool);
40 if (cmd_buffer->state.scratch_size > 0)
41 scratch_bo = &device->scratch_block_pool.bo;
42
43 /* XXX: Do we need this on more than just BDW? */
44 #if (GEN_GEN >= 8)
45 /* Emit a render target cache flush.
46 *
47 * This isn't documented anywhere in the PRM. However, it seems to be
48 * necessary prior to changing the surface state base adress. Without
49 * this, we get GPU hangs when using multi-level command buffers which
50 * clear depth, reset state base address, and then go render stuff.
51 */
52 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
53 .RenderTargetCacheFlushEnable = true);
54 #endif
55
56 anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS),
57 .GeneralStateBaseAddress = { scratch_bo, 0 },
58 .GeneralStateMemoryObjectControlState = GENX(MOCS),
59 .GeneralStateBaseAddressModifyEnable = true,
60
61 .SurfaceStateBaseAddress = anv_cmd_buffer_surface_base_address(cmd_buffer),
62 .SurfaceStateMemoryObjectControlState = GENX(MOCS),
63 .SurfaceStateBaseAddressModifyEnable = true,
64
65 .DynamicStateBaseAddress = { &device->dynamic_state_block_pool.bo, 0 },
66 .DynamicStateMemoryObjectControlState = GENX(MOCS),
67 .DynamicStateBaseAddressModifyEnable = true,
68
69 .IndirectObjectBaseAddress = { NULL, 0 },
70 .IndirectObjectMemoryObjectControlState = GENX(MOCS),
71 .IndirectObjectBaseAddressModifyEnable = true,
72
73 .InstructionBaseAddress = { &device->instruction_block_pool.bo, 0 },
74 .InstructionMemoryObjectControlState = GENX(MOCS),
75 .InstructionBaseAddressModifyEnable = true,
76
77 # if (GEN_GEN >= 8)
78 /* Broadwell requires that we specify a buffer size for a bunch of
79 * these fields. However, since we will be growing the BO's live, we
80 * just set them all to the maximum.
81 */
82 .GeneralStateBufferSize = 0xfffff,
83 .GeneralStateBufferSizeModifyEnable = true,
84 .DynamicStateBufferSize = 0xfffff,
85 .DynamicStateBufferSizeModifyEnable = true,
86 .IndirectObjectBufferSize = 0xfffff,
87 .IndirectObjectBufferSizeModifyEnable = true,
88 .InstructionBufferSize = 0xfffff,
89 .InstructionBuffersizeModifyEnable = true,
90 # endif
91 );
92
93 /* After re-setting the surface state base address, we have to do some
94 * cache flusing so that the sampler engine will pick up the new
95 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
96 * Shared Function > 3D Sampler > State > State Caching (page 96):
97 *
98 * Coherency with system memory in the state cache, like the texture
99 * cache is handled partially by software. It is expected that the
100 * command stream or shader will issue Cache Flush operation or
101 * Cache_Flush sampler message to ensure that the L1 cache remains
102 * coherent with system memory.
103 *
104 * [...]
105 *
106 * Whenever the value of the Dynamic_State_Base_Addr,
107 * Surface_State_Base_Addr are altered, the L1 state cache must be
108 * invalidated to ensure the new surface or sampler state is fetched
109 * from system memory.
110 *
111 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
112 * which, according the PIPE_CONTROL instruction documentation in the
113 * Broadwell PRM:
114 *
115 * Setting this bit is independent of any other bit in this packet.
116 * This bit controls the invalidation of the L1 and L2 state caches
117 * at the top of the pipe i.e. at the parsing time.
118 *
119 * Unfortunately, experimentation seems to indicate that state cache
120 * invalidation through a PIPE_CONTROL does nothing whatsoever in
121 * regards to surface state and binding tables. In stead, it seems that
122 * invalidating the texture cache is what is actually needed.
123 *
124 * XXX: As far as we have been able to determine through
125 * experimentation, shows that flush the texture cache appears to be
126 * sufficient. The theory here is that all of the sampling/rendering
127 * units cache the binding table in the texture cache. However, we have
128 * yet to be able to actually confirm this.
129 */
130 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
131 .TextureCacheInvalidationEnable = true);
132 }
133
134 void genX(CmdPipelineBarrier)(
135 VkCommandBuffer commandBuffer,
136 VkPipelineStageFlags srcStageMask,
137 VkPipelineStageFlags destStageMask,
138 VkBool32 byRegion,
139 uint32_t memoryBarrierCount,
140 const VkMemoryBarrier* pMemoryBarriers,
141 uint32_t bufferMemoryBarrierCount,
142 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
143 uint32_t imageMemoryBarrierCount,
144 const VkImageMemoryBarrier* pImageMemoryBarriers)
145 {
146 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
147 uint32_t b, *dw;
148
149 /* XXX: Right now, we're really dumb and just flush whatever categories
150 * the app asks for. One of these days we may make this a bit better
151 * but right now that's all the hardware allows for in most areas.
152 */
153 VkAccessFlags src_flags = 0;
154 VkAccessFlags dst_flags = 0;
155
156 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
157 src_flags |= pMemoryBarriers[i].srcAccessMask;
158 dst_flags |= pMemoryBarriers[i].dstAccessMask;
159 }
160
161 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
162 src_flags |= pBufferMemoryBarriers[i].srcAccessMask;
163 dst_flags |= pBufferMemoryBarriers[i].dstAccessMask;
164 }
165
166 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
167 src_flags |= pImageMemoryBarriers[i].srcAccessMask;
168 dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
169 }
170
171 /* Mask out the Source access flags we care about */
172 const uint32_t src_mask =
173 VK_ACCESS_SHADER_WRITE_BIT |
174 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
175 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
176 VK_ACCESS_TRANSFER_WRITE_BIT;
177
178 src_flags = src_flags & src_mask;
179
180 /* Mask out the destination access flags we care about */
181 const uint32_t dst_mask =
182 VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
183 VK_ACCESS_INDEX_READ_BIT |
184 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
185 VK_ACCESS_UNIFORM_READ_BIT |
186 VK_ACCESS_SHADER_READ_BIT |
187 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
188 VK_ACCESS_TRANSFER_READ_BIT;
189
190 dst_flags = dst_flags & dst_mask;
191
192 /* The src flags represent how things were used previously. This is
193 * what we use for doing flushes.
194 */
195 struct GENX(PIPE_CONTROL) flush_cmd = {
196 GENX(PIPE_CONTROL_header),
197 .PostSyncOperation = NoWrite,
198 };
199
200 for_each_bit(b, src_flags) {
201 switch ((VkAccessFlagBits)(1 << b)) {
202 case VK_ACCESS_SHADER_WRITE_BIT:
203 flush_cmd.DCFlushEnable = true;
204 break;
205 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
206 flush_cmd.RenderTargetCacheFlushEnable = true;
207 break;
208 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
209 flush_cmd.DepthCacheFlushEnable = true;
210 break;
211 case VK_ACCESS_TRANSFER_WRITE_BIT:
212 flush_cmd.RenderTargetCacheFlushEnable = true;
213 flush_cmd.DepthCacheFlushEnable = true;
214 break;
215 default:
216 unreachable("should've masked this out by now");
217 }
218 }
219
220 /* If we end up doing two PIPE_CONTROLs, the first, flusing one also has to
221 * stall and wait for the flushing to finish, so we don't re-dirty the
222 * caches with in-flight rendering after the second PIPE_CONTROL
223 * invalidates.
224 */
225
226 if (dst_flags)
227 flush_cmd.CommandStreamerStallEnable = true;
228
229 if (src_flags && dst_flags) {
230 dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
231 GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &flush_cmd);
232 }
233
234 /* The dst flags represent how things will be used in the future. This
235 * is what we use for doing cache invalidations.
236 */
237 struct GENX(PIPE_CONTROL) invalidate_cmd = {
238 GENX(PIPE_CONTROL_header),
239 .PostSyncOperation = NoWrite,
240 };
241
242 for_each_bit(b, dst_flags) {
243 switch ((VkAccessFlagBits)(1 << b)) {
244 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
245 case VK_ACCESS_INDEX_READ_BIT:
246 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
247 invalidate_cmd.VFCacheInvalidationEnable = true;
248 break;
249 case VK_ACCESS_UNIFORM_READ_BIT:
250 invalidate_cmd.ConstantCacheInvalidationEnable = true;
251 /* fallthrough */
252 case VK_ACCESS_SHADER_READ_BIT:
253 invalidate_cmd.TextureCacheInvalidationEnable = true;
254 break;
255 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
256 invalidate_cmd.TextureCacheInvalidationEnable = true;
257 break;
258 case VK_ACCESS_TRANSFER_READ_BIT:
259 invalidate_cmd.TextureCacheInvalidationEnable = true;
260 break;
261 default:
262 unreachable("should've masked this out by now");
263 }
264 }
265
266 if (dst_flags) {
267 dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
268 GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &invalidate_cmd);
269 }
270 }
271
272 static void
273 emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
274 struct anv_bo *bo, uint32_t offset)
275 {
276 uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
277 GENX(3DSTATE_VERTEX_BUFFERS));
278
279 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
280 &(struct GENX(VERTEX_BUFFER_STATE)) {
281 .VertexBufferIndex = 32, /* Reserved for this */
282 .AddressModifyEnable = true,
283 .BufferPitch = 0,
284 #if (GEN_GEN >= 8)
285 .MemoryObjectControlState = GENX(MOCS),
286 .BufferStartingAddress = { bo, offset },
287 .BufferSize = 8
288 #else
289 .VertexBufferMemoryObjectControlState = GENX(MOCS),
290 .BufferStartingAddress = { bo, offset },
291 .EndAddress = { bo, offset + 8 },
292 #endif
293 });
294 }
295
296 static void
297 emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
298 uint32_t base_vertex, uint32_t base_instance)
299 {
300 struct anv_state id_state =
301 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 8, 4);
302
303 ((uint32_t *)id_state.map)[0] = base_vertex;
304 ((uint32_t *)id_state.map)[1] = base_instance;
305
306 if (!cmd_buffer->device->info.has_llc)
307 anv_state_clflush(id_state);
308
309 emit_base_vertex_instance_bo(cmd_buffer,
310 &cmd_buffer->device->dynamic_state_block_pool.bo, id_state.offset);
311 }
312
313 void genX(CmdDraw)(
314 VkCommandBuffer commandBuffer,
315 uint32_t vertexCount,
316 uint32_t instanceCount,
317 uint32_t firstVertex,
318 uint32_t firstInstance)
319 {
320 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
321 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
322 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
323
324 genX(cmd_buffer_flush_state)(cmd_buffer);
325
326 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
327 emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
328
329 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
330 .VertexAccessType = SEQUENTIAL,
331 .PrimitiveTopologyType = pipeline->topology,
332 .VertexCountPerInstance = vertexCount,
333 .StartVertexLocation = firstVertex,
334 .InstanceCount = instanceCount,
335 .StartInstanceLocation = firstInstance,
336 .BaseVertexLocation = 0);
337 }
338
339 void genX(CmdDrawIndexed)(
340 VkCommandBuffer commandBuffer,
341 uint32_t indexCount,
342 uint32_t instanceCount,
343 uint32_t firstIndex,
344 int32_t vertexOffset,
345 uint32_t firstInstance)
346 {
347 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
348 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
349 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
350
351 genX(cmd_buffer_flush_state)(cmd_buffer);
352
353 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
354 emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
355
356 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
357 .VertexAccessType = RANDOM,
358 .PrimitiveTopologyType = pipeline->topology,
359 .VertexCountPerInstance = indexCount,
360 .StartVertexLocation = firstIndex,
361 .InstanceCount = instanceCount,
362 .StartInstanceLocation = firstInstance,
363 .BaseVertexLocation = vertexOffset);
364 }
365
366 /* Auto-Draw / Indirect Registers */
367 #define GEN7_3DPRIM_END_OFFSET 0x2420
368 #define GEN7_3DPRIM_START_VERTEX 0x2430
369 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434
370 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
371 #define GEN7_3DPRIM_START_INSTANCE 0x243C
372 #define GEN7_3DPRIM_BASE_VERTEX 0x2440
373
374 static void
375 emit_lrm(struct anv_batch *batch,
376 uint32_t reg, struct anv_bo *bo, uint32_t offset)
377 {
378 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
379 .RegisterAddress = reg,
380 .MemoryAddress = { bo, offset });
381 }
382
383 static void
384 emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
385 {
386 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM),
387 .RegisterOffset = reg,
388 .DataDWord = imm);
389 }
390
391 void genX(CmdDrawIndirect)(
392 VkCommandBuffer commandBuffer,
393 VkBuffer _buffer,
394 VkDeviceSize offset,
395 uint32_t drawCount,
396 uint32_t stride)
397 {
398 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
399 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
400 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
401 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
402 struct anv_bo *bo = buffer->bo;
403 uint32_t bo_offset = buffer->offset + offset;
404
405 genX(cmd_buffer_flush_state)(cmd_buffer);
406
407 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
408 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
409
410 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
411 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
412 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
413 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
414 emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
415
416 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
417 .IndirectParameterEnable = true,
418 .VertexAccessType = SEQUENTIAL,
419 .PrimitiveTopologyType = pipeline->topology);
420 }
421
422 void genX(CmdDrawIndexedIndirect)(
423 VkCommandBuffer commandBuffer,
424 VkBuffer _buffer,
425 VkDeviceSize offset,
426 uint32_t drawCount,
427 uint32_t stride)
428 {
429 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
430 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
431 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
432 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
433 struct anv_bo *bo = buffer->bo;
434 uint32_t bo_offset = buffer->offset + offset;
435
436 genX(cmd_buffer_flush_state)(cmd_buffer);
437
438 /* TODO: We need to stomp base vertex to 0 somehow */
439 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
440 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
441
442 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
443 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
444 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
445 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
446 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
447
448 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
449 .IndirectParameterEnable = true,
450 .VertexAccessType = RANDOM,
451 .PrimitiveTopologyType = pipeline->topology);
452 }
453
454
455 void genX(CmdDispatch)(
456 VkCommandBuffer commandBuffer,
457 uint32_t x,
458 uint32_t y,
459 uint32_t z)
460 {
461 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
462 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
463 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
464
465 if (prog_data->uses_num_work_groups) {
466 struct anv_state state =
467 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 12, 4);
468 uint32_t *sizes = state.map;
469 sizes[0] = x;
470 sizes[1] = y;
471 sizes[2] = z;
472 if (!cmd_buffer->device->info.has_llc)
473 anv_state_clflush(state);
474 cmd_buffer->state.num_workgroups_offset = state.offset;
475 cmd_buffer->state.num_workgroups_bo =
476 &cmd_buffer->device->dynamic_state_block_pool.bo;
477 }
478
479 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
480
481 anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER),
482 .SIMDSize = prog_data->simd_size / 16,
483 .ThreadDepthCounterMaximum = 0,
484 .ThreadHeightCounterMaximum = 0,
485 .ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1,
486 .ThreadGroupIDXDimension = x,
487 .ThreadGroupIDYDimension = y,
488 .ThreadGroupIDZDimension = z,
489 .RightExecutionMask = pipeline->cs_right_mask,
490 .BottomExecutionMask = 0xffffffff);
491
492 anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH));
493 }
494
495 #define GPGPU_DISPATCHDIMX 0x2500
496 #define GPGPU_DISPATCHDIMY 0x2504
497 #define GPGPU_DISPATCHDIMZ 0x2508
498
499 #define MI_PREDICATE_SRC0 0x2400
500 #define MI_PREDICATE_SRC1 0x2408
501
502 void genX(CmdDispatchIndirect)(
503 VkCommandBuffer commandBuffer,
504 VkBuffer _buffer,
505 VkDeviceSize offset)
506 {
507 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
508 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
509 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
510 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
511 struct anv_bo *bo = buffer->bo;
512 uint32_t bo_offset = buffer->offset + offset;
513 struct anv_batch *batch = &cmd_buffer->batch;
514
515 if (prog_data->uses_num_work_groups) {
516 cmd_buffer->state.num_workgroups_offset = bo_offset;
517 cmd_buffer->state.num_workgroups_bo = bo;
518 }
519
520 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
521
522 emit_lrm(batch, GPGPU_DISPATCHDIMX, bo, bo_offset);
523 emit_lrm(batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4);
524 emit_lrm(batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8);
525
526 #if GEN_GEN <= 7
527 /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
528 emit_lri(batch, MI_PREDICATE_SRC0 + 4, 0);
529 emit_lri(batch, MI_PREDICATE_SRC1 + 0, 0);
530 emit_lri(batch, MI_PREDICATE_SRC1 + 4, 0);
531
532 /* Load compute_dispatch_indirect_x_size into SRC0 */
533 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0);
534
535 /* predicate = (compute_dispatch_indirect_x_size == 0); */
536 anv_batch_emit(batch, GENX(MI_PREDICATE),
537 .LoadOperation = LOAD_LOAD,
538 .CombineOperation = COMBINE_SET,
539 .CompareOperation = COMPARE_SRCS_EQUAL);
540
541 /* Load compute_dispatch_indirect_y_size into SRC0 */
542 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4);
543
544 /* predicate |= (compute_dispatch_indirect_y_size == 0); */
545 anv_batch_emit(batch, GENX(MI_PREDICATE),
546 .LoadOperation = LOAD_LOAD,
547 .CombineOperation = COMBINE_OR,
548 .CompareOperation = COMPARE_SRCS_EQUAL);
549
550 /* Load compute_dispatch_indirect_z_size into SRC0 */
551 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8);
552
553 /* predicate |= (compute_dispatch_indirect_z_size == 0); */
554 anv_batch_emit(batch, GENX(MI_PREDICATE),
555 .LoadOperation = LOAD_LOAD,
556 .CombineOperation = COMBINE_OR,
557 .CompareOperation = COMPARE_SRCS_EQUAL);
558
559 /* predicate = !predicate; */
560 #define COMPARE_FALSE 1
561 anv_batch_emit(batch, GENX(MI_PREDICATE),
562 .LoadOperation = LOAD_LOADINV,
563 .CombineOperation = COMBINE_OR,
564 .CompareOperation = COMPARE_FALSE);
565 #endif
566
567 anv_batch_emit(batch, GENX(GPGPU_WALKER),
568 .IndirectParameterEnable = true,
569 .PredicateEnable = GEN_GEN <= 7,
570 .SIMDSize = prog_data->simd_size / 16,
571 .ThreadDepthCounterMaximum = 0,
572 .ThreadHeightCounterMaximum = 0,
573 .ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1,
574 .RightExecutionMask = pipeline->cs_right_mask,
575 .BottomExecutionMask = 0xffffffff);
576
577 anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH));
578 }
579
580 void
581 genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
582 {
583 if (cmd_buffer->state.current_pipeline != _3D) {
584 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT),
585 #if GEN_GEN >= 9
586 .MaskBits = 3,
587 #endif
588 .PipelineSelection = _3D);
589 cmd_buffer->state.current_pipeline = _3D;
590 }
591 }
592
593 static void
594 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
595 {
596 struct anv_device *device = cmd_buffer->device;
597 const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
598 const struct anv_image_view *iview =
599 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
600 const struct anv_image *image = iview ? iview->image : NULL;
601 const struct anv_format *anv_format =
602 iview ? anv_format_for_vk_format(iview->vk_format) : NULL;
603 const bool has_depth = iview && anv_format->has_depth;
604 const bool has_stencil = iview && anv_format->has_stencil;
605
606 /* FIXME: Implement the PMA stall W/A */
607 /* FIXME: Width and Height are wrong */
608
609 /* Emit 3DSTATE_DEPTH_BUFFER */
610 if (has_depth) {
611 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER),
612 .SurfaceType = SURFTYPE_2D,
613 .DepthWriteEnable = true,
614 .StencilWriteEnable = has_stencil,
615 .HierarchicalDepthBufferEnable = false,
616 .SurfaceFormat = isl_surf_get_depth_format(&device->isl_dev,
617 &image->depth_surface.isl),
618 .SurfacePitch = image->depth_surface.isl.row_pitch - 1,
619 .SurfaceBaseAddress = {
620 .bo = image->bo,
621 .offset = image->offset + image->depth_surface.offset,
622 },
623 .Height = fb->height - 1,
624 .Width = fb->width - 1,
625 .LOD = 0,
626 .Depth = 1 - 1,
627 .MinimumArrayElement = 0,
628 .DepthBufferObjectControlState = GENX(MOCS),
629 #if GEN_GEN >= 8
630 .SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2,
631 #endif
632 .RenderTargetViewExtent = 1 - 1);
633 } else {
634 /* Even when no depth buffer is present, the hardware requires that
635 * 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
636 *
637 * If a null depth buffer is bound, the driver must instead bind depth as:
638 * 3DSTATE_DEPTH.SurfaceType = SURFTYPE_2D
639 * 3DSTATE_DEPTH.Width = 1
640 * 3DSTATE_DEPTH.Height = 1
641 * 3DSTATE_DEPTH.SuraceFormat = D16_UNORM
642 * 3DSTATE_DEPTH.SurfaceBaseAddress = 0
643 * 3DSTATE_DEPTH.HierarchicalDepthBufferEnable = 0
644 * 3DSTATE_WM_DEPTH_STENCIL.DepthTestEnable = 0
645 * 3DSTATE_WM_DEPTH_STENCIL.DepthBufferWriteEnable = 0
646 *
647 * The PRM is wrong, though. The width and height must be programmed to
648 * actual framebuffer's width and height, even when neither depth buffer
649 * nor stencil buffer is present. Also, D16_UNORM is not allowed to
650 * be combined with a stencil buffer so we use D32_FLOAT instead.
651 */
652 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER),
653 .SurfaceType = SURFTYPE_2D,
654 .SurfaceFormat = D32_FLOAT,
655 .Width = fb->width - 1,
656 .Height = fb->height - 1,
657 .StencilWriteEnable = has_stencil);
658 }
659
660 /* Emit 3DSTATE_STENCIL_BUFFER */
661 if (has_stencil) {
662 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER),
663 #if GEN_GEN >= 8 || GEN_IS_HASWELL
664 .StencilBufferEnable = true,
665 #endif
666 .StencilBufferObjectControlState = GENX(MOCS),
667
668 /* Stencil buffers have strange pitch. The PRM says:
669 *
670 * The pitch must be set to 2x the value computed based on width,
671 * as the stencil buffer is stored with two rows interleaved.
672 */
673 .SurfacePitch = 2 * image->stencil_surface.isl.row_pitch - 1,
674
675 #if GEN_GEN >= 8
676 .SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2,
677 #endif
678 .SurfaceBaseAddress = {
679 .bo = image->bo,
680 .offset = image->offset + image->stencil_surface.offset,
681 });
682 } else {
683 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER));
684 }
685
686 /* Disable hierarchial depth buffers. */
687 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER));
688
689 /* Clear the clear params. */
690 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS));
691 }
692
693 /**
694 * @see anv_cmd_buffer_set_subpass()
695 */
696 void
697 genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
698 struct anv_subpass *subpass)
699 {
700 cmd_buffer->state.subpass = subpass;
701
702 cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
703
704 cmd_buffer_emit_depth_stencil(cmd_buffer);
705 }
706
707 void genX(CmdBeginRenderPass)(
708 VkCommandBuffer commandBuffer,
709 const VkRenderPassBeginInfo* pRenderPassBegin,
710 VkSubpassContents contents)
711 {
712 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
713 ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
714 ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
715
716 cmd_buffer->state.framebuffer = framebuffer;
717 cmd_buffer->state.pass = pass;
718 anv_cmd_state_setup_attachments(cmd_buffer, pRenderPassBegin);
719
720 genX(flush_pipeline_select_3d)(cmd_buffer);
721
722 const VkRect2D *render_area = &pRenderPassBegin->renderArea;
723
724 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DRAWING_RECTANGLE),
725 .ClippedDrawingRectangleYMin = MAX2(render_area->offset.y, 0),
726 .ClippedDrawingRectangleXMin = MAX2(render_area->offset.x, 0),
727 .ClippedDrawingRectangleYMax =
728 render_area->offset.y + render_area->extent.height - 1,
729 .ClippedDrawingRectangleXMax =
730 render_area->offset.x + render_area->extent.width - 1,
731 .DrawingRectangleOriginY = 0,
732 .DrawingRectangleOriginX = 0);
733
734 genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses);
735 anv_cmd_buffer_clear_subpass(cmd_buffer);
736 }
737
738 void genX(CmdNextSubpass)(
739 VkCommandBuffer commandBuffer,
740 VkSubpassContents contents)
741 {
742 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
743
744 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
745
746 anv_cmd_buffer_resolve_subpass(cmd_buffer);
747 genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
748 anv_cmd_buffer_clear_subpass(cmd_buffer);
749 }
750
751 void genX(CmdEndRenderPass)(
752 VkCommandBuffer commandBuffer)
753 {
754 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
755
756 anv_cmd_buffer_resolve_subpass(cmd_buffer);
757 }
758
759 static void
760 emit_ps_depth_count(struct anv_batch *batch,
761 struct anv_bo *bo, uint32_t offset)
762 {
763 anv_batch_emit(batch, GENX(PIPE_CONTROL),
764 .DestinationAddressType = DAT_PPGTT,
765 .PostSyncOperation = WritePSDepthCount,
766 .DepthStallEnable = true,
767 .Address = { bo, offset });
768 }
769
770 static void
771 emit_query_availability(struct anv_batch *batch,
772 struct anv_bo *bo, uint32_t offset)
773 {
774 anv_batch_emit(batch, GENX(PIPE_CONTROL),
775 .DestinationAddressType = DAT_PPGTT,
776 .PostSyncOperation = WriteImmediateData,
777 .Address = { bo, offset },
778 .ImmediateData = 1);
779 }
780
781 void genX(CmdBeginQuery)(
782 VkCommandBuffer commandBuffer,
783 VkQueryPool queryPool,
784 uint32_t query,
785 VkQueryControlFlags flags)
786 {
787 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
788 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
789
790 /* Workaround: When meta uses the pipeline with the VS disabled, it seems
791 * that the pipelining of the depth write breaks. What we see is that
792 * samples from the render pass clear leaks into the first query
793 * immediately after the clear. Doing a pipecontrol with a post-sync
794 * operation and DepthStallEnable seems to work around the issue.
795 */
796 if (cmd_buffer->state.need_query_wa) {
797 cmd_buffer->state.need_query_wa = false;
798 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
799 .DepthCacheFlushEnable = true,
800 .DepthStallEnable = true);
801 }
802
803 switch (pool->type) {
804 case VK_QUERY_TYPE_OCCLUSION:
805 emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
806 query * sizeof(struct anv_query_pool_slot));
807 break;
808
809 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
810 default:
811 unreachable("");
812 }
813 }
814
815 void genX(CmdEndQuery)(
816 VkCommandBuffer commandBuffer,
817 VkQueryPool queryPool,
818 uint32_t query)
819 {
820 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
821 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
822
823 switch (pool->type) {
824 case VK_QUERY_TYPE_OCCLUSION:
825 emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
826 query * sizeof(struct anv_query_pool_slot) + 8);
827
828 emit_query_availability(&cmd_buffer->batch, &pool->bo,
829 query * sizeof(struct anv_query_pool_slot) + 16);
830 break;
831
832 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
833 default:
834 unreachable("");
835 }
836 }
837
838 #define TIMESTAMP 0x2358
839
840 void genX(CmdWriteTimestamp)(
841 VkCommandBuffer commandBuffer,
842 VkPipelineStageFlagBits pipelineStage,
843 VkQueryPool queryPool,
844 uint32_t query)
845 {
846 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
847 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
848 uint32_t offset = query * sizeof(struct anv_query_pool_slot);
849
850 assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
851
852 switch (pipelineStage) {
853 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
854 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
855 .RegisterAddress = TIMESTAMP,
856 .MemoryAddress = { &pool->bo, offset });
857 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
858 .RegisterAddress = TIMESTAMP + 4,
859 .MemoryAddress = { &pool->bo, offset + 4 });
860 break;
861
862 default:
863 /* Everything else is bottom-of-pipe */
864 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
865 .DestinationAddressType = DAT_PPGTT,
866 .PostSyncOperation = WriteTimestamp,
867 .Address = { &pool->bo, offset });
868 break;
869 }
870
871 emit_query_availability(&cmd_buffer->batch, &pool->bo, query + 16);
872 }
873
874 #if GEN_GEN > 7 || GEN_IS_HASWELL
875
876 #define alu_opcode(v) __gen_uint((v), 20, 31)
877 #define alu_operand1(v) __gen_uint((v), 10, 19)
878 #define alu_operand2(v) __gen_uint((v), 0, 9)
879 #define alu(opcode, operand1, operand2) \
880 alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
881
882 #define OPCODE_NOOP 0x000
883 #define OPCODE_LOAD 0x080
884 #define OPCODE_LOADINV 0x480
885 #define OPCODE_LOAD0 0x081
886 #define OPCODE_LOAD1 0x481
887 #define OPCODE_ADD 0x100
888 #define OPCODE_SUB 0x101
889 #define OPCODE_AND 0x102
890 #define OPCODE_OR 0x103
891 #define OPCODE_XOR 0x104
892 #define OPCODE_STORE 0x180
893 #define OPCODE_STOREINV 0x580
894
895 #define OPERAND_R0 0x00
896 #define OPERAND_R1 0x01
897 #define OPERAND_R2 0x02
898 #define OPERAND_R3 0x03
899 #define OPERAND_R4 0x04
900 #define OPERAND_SRCA 0x20
901 #define OPERAND_SRCB 0x21
902 #define OPERAND_ACCU 0x31
903 #define OPERAND_ZF 0x32
904 #define OPERAND_CF 0x33
905
906 #define CS_GPR(n) (0x2600 + (n) * 8)
907
908 static void
909 emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
910 struct anv_bo *bo, uint32_t offset)
911 {
912 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
913 .RegisterAddress = reg,
914 .MemoryAddress = { bo, offset });
915 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
916 .RegisterAddress = reg + 4,
917 .MemoryAddress = { bo, offset + 4 });
918 }
919
920 static void
921 store_query_result(struct anv_batch *batch, uint32_t reg,
922 struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
923 {
924 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM),
925 .RegisterAddress = reg,
926 .MemoryAddress = { bo, offset });
927
928 if (flags & VK_QUERY_RESULT_64_BIT)
929 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM),
930 .RegisterAddress = reg + 4,
931 .MemoryAddress = { bo, offset + 4 });
932 }
933
934 void genX(CmdCopyQueryPoolResults)(
935 VkCommandBuffer commandBuffer,
936 VkQueryPool queryPool,
937 uint32_t firstQuery,
938 uint32_t queryCount,
939 VkBuffer destBuffer,
940 VkDeviceSize destOffset,
941 VkDeviceSize destStride,
942 VkQueryResultFlags flags)
943 {
944 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
945 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
946 ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
947 uint32_t slot_offset, dst_offset;
948
949 if (flags & VK_QUERY_RESULT_WAIT_BIT)
950 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
951 .CommandStreamerStallEnable = true,
952 .StallAtPixelScoreboard = true);
953
954 dst_offset = buffer->offset + destOffset;
955 for (uint32_t i = 0; i < queryCount; i++) {
956
957 slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
958 switch (pool->type) {
959 case VK_QUERY_TYPE_OCCLUSION:
960 emit_load_alu_reg_u64(&cmd_buffer->batch,
961 CS_GPR(0), &pool->bo, slot_offset);
962 emit_load_alu_reg_u64(&cmd_buffer->batch,
963 CS_GPR(1), &pool->bo, slot_offset + 8);
964
965 /* FIXME: We need to clamp the result for 32 bit. */
966
967 uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
968 dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
969 dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
970 dw[3] = alu(OPCODE_SUB, 0, 0);
971 dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
972 break;
973
974 case VK_QUERY_TYPE_TIMESTAMP:
975 emit_load_alu_reg_u64(&cmd_buffer->batch,
976 CS_GPR(2), &pool->bo, slot_offset);
977 break;
978
979 default:
980 unreachable("unhandled query type");
981 }
982
983 store_query_result(&cmd_buffer->batch,
984 CS_GPR(2), buffer->bo, dst_offset, flags);
985
986 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
987 emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
988 &pool->bo, slot_offset + 16);
989 if (flags & VK_QUERY_RESULT_64_BIT)
990 store_query_result(&cmd_buffer->batch,
991 CS_GPR(0), buffer->bo, dst_offset + 8, flags);
992 else
993 store_query_result(&cmd_buffer->batch,
994 CS_GPR(0), buffer->bo, dst_offset + 4, flags);
995 }
996
997 dst_offset += destStride;
998 }
999 }
1000
1001 #endif