gen8/cmd_buffer: Properly return flushed push constant stages
[mesa.git] / src / intel / vulkan / gen8_cmd_buffer.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
34
35 static uint32_t
36 cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
37 {
38 static const uint32_t push_constant_opcodes[] = {
39 [MESA_SHADER_VERTEX] = 21,
40 [MESA_SHADER_TESS_CTRL] = 25, /* HS */
41 [MESA_SHADER_TESS_EVAL] = 26, /* DS */
42 [MESA_SHADER_GEOMETRY] = 22,
43 [MESA_SHADER_FRAGMENT] = 23,
44 [MESA_SHADER_COMPUTE] = 0,
45 };
46
47 VkShaderStageFlags flushed = 0;
48
49 anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) {
50 if (stage == MESA_SHADER_COMPUTE)
51 continue;
52
53 struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
54
55 if (state.offset == 0) {
56 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS),
57 ._3DCommandSubOpcode = push_constant_opcodes[stage]);
58 } else {
59 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS),
60 ._3DCommandSubOpcode = push_constant_opcodes[stage],
61 .ConstantBody = {
62 .PointerToConstantBuffer2 = { &cmd_buffer->device->dynamic_state_block_pool.bo, state.offset },
63 .ConstantBuffer2ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
64 });
65 }
66
67 flushed |= mesa_to_vk_shader_stage(stage);
68 }
69
70 cmd_buffer->state.push_constants_dirty &= ~flushed;
71
72 return flushed;
73 }
74
75 #if GEN_GEN == 8
76 static void
77 emit_viewport_state(struct anv_cmd_buffer *cmd_buffer,
78 uint32_t count, const VkViewport *viewports)
79 {
80 struct anv_state sf_clip_state =
81 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, count * 64, 64);
82 struct anv_state cc_state =
83 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, count * 8, 32);
84
85 for (uint32_t i = 0; i < count; i++) {
86 const VkViewport *vp = &viewports[i];
87
88 /* The gen7 state struct has just the matrix and guardband fields, the
89 * gen8 struct adds the min/max viewport fields. */
90 struct GENX(SF_CLIP_VIEWPORT) sf_clip_viewport = {
91 .ViewportMatrixElementm00 = vp->width / 2,
92 .ViewportMatrixElementm11 = vp->height / 2,
93 .ViewportMatrixElementm22 = 1.0,
94 .ViewportMatrixElementm30 = vp->x + vp->width / 2,
95 .ViewportMatrixElementm31 = vp->y + vp->height / 2,
96 .ViewportMatrixElementm32 = 0.0,
97 .XMinClipGuardband = -1.0f,
98 .XMaxClipGuardband = 1.0f,
99 .YMinClipGuardband = -1.0f,
100 .YMaxClipGuardband = 1.0f,
101 .XMinViewPort = vp->x,
102 .XMaxViewPort = vp->x + vp->width - 1,
103 .YMinViewPort = vp->y,
104 .YMaxViewPort = vp->y + vp->height - 1,
105 };
106
107 struct GENX(CC_VIEWPORT) cc_viewport = {
108 .MinimumDepth = vp->minDepth,
109 .MaximumDepth = vp->maxDepth
110 };
111
112 GENX(SF_CLIP_VIEWPORT_pack)(NULL, sf_clip_state.map + i * 64,
113 &sf_clip_viewport);
114 GENX(CC_VIEWPORT_pack)(NULL, cc_state.map + i * 8, &cc_viewport);
115 }
116
117 if (!cmd_buffer->device->info.has_llc) {
118 anv_state_clflush(sf_clip_state);
119 anv_state_clflush(cc_state);
120 }
121
122 anv_batch_emit(&cmd_buffer->batch,
123 GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC),
124 .CCViewportPointer = cc_state.offset);
125 anv_batch_emit(&cmd_buffer->batch,
126 GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP),
127 .SFClipViewportPointer = sf_clip_state.offset);
128 }
129
130 void
131 gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer)
132 {
133 if (cmd_buffer->state.dynamic.viewport.count > 0) {
134 emit_viewport_state(cmd_buffer, cmd_buffer->state.dynamic.viewport.count,
135 cmd_buffer->state.dynamic.viewport.viewports);
136 } else {
137 /* If viewport count is 0, this is taken to mean "use the default" */
138 emit_viewport_state(cmd_buffer, 1,
139 &(VkViewport) {
140 .x = 0.0f,
141 .y = 0.0f,
142 .width = cmd_buffer->state.framebuffer->width,
143 .height = cmd_buffer->state.framebuffer->height,
144 .minDepth = 0.0f,
145 .maxDepth = 1.0f,
146 });
147 }
148 }
149 #endif
150
151 static void
152 emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
153 {
154 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM),
155 .RegisterOffset = reg,
156 .DataDWord = imm);
157 }
158
159 #define GEN8_L3CNTLREG 0x7034
160
161 static void
162 config_l3(struct anv_cmd_buffer *cmd_buffer, bool enable_slm)
163 {
164 /* References for GL state:
165 *
166 * - commits e307cfa..228d5a3
167 * - src/mesa/drivers/dri/i965/gen7_l3_state.c
168 */
169
170 uint32_t val = enable_slm ?
171 /* All = 48 ways; URB = 16 ways; DC and RO = 0, SLM = 1 */
172 0x60000021 :
173 /* All = 48 ways; URB = 48 ways; DC, RO and SLM = 0 */
174 0x60000060;
175 bool changed = cmd_buffer->state.current_l3_config != val;
176
177 if (changed) {
178 /* According to the hardware docs, the L3 partitioning can only be changed
179 * while the pipeline is completely drained and the caches are flushed,
180 * which involves a first PIPE_CONTROL flush which stalls the pipeline and
181 * initiates invalidation of the relevant caches...
182 */
183 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
184 .TextureCacheInvalidationEnable = true,
185 .ConstantCacheInvalidationEnable = true,
186 .InstructionCacheInvalidateEnable = true,
187 .DCFlushEnable = true,
188 .PostSyncOperation = NoWrite,
189 .CommandStreamerStallEnable = true);
190
191 /* ...followed by a second stalling flush which guarantees that
192 * invalidation is complete when the L3 configuration registers are
193 * modified.
194 */
195 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
196 .DCFlushEnable = true,
197 .PostSyncOperation = NoWrite,
198 .CommandStreamerStallEnable = true);
199
200 emit_lri(&cmd_buffer->batch, GEN8_L3CNTLREG, val);
201 cmd_buffer->state.current_l3_config = val;
202 }
203 }
204
205 static void
206 __emit_genx_sf_state(struct anv_cmd_buffer *cmd_buffer)
207 {
208 uint32_t sf_dw[GENX(3DSTATE_SF_length)];
209 struct GENX(3DSTATE_SF) sf = {
210 GENX(3DSTATE_SF_header),
211 .LineWidth = cmd_buffer->state.dynamic.line_width,
212 };
213 GENX(3DSTATE_SF_pack)(NULL, sf_dw, &sf);
214 /* FIXME: gen9.fs */
215 anv_batch_emit_merge(&cmd_buffer->batch, sf_dw,
216 cmd_buffer->state.pipeline->gen8.sf);
217 }
218
219 #include "genxml/gen9_pack.h"
220 static void
221 __emit_gen9_sf_state(struct anv_cmd_buffer *cmd_buffer)
222 {
223 uint32_t sf_dw[GENX(3DSTATE_SF_length)];
224 struct GEN9_3DSTATE_SF sf = {
225 GEN9_3DSTATE_SF_header,
226 .LineWidth = cmd_buffer->state.dynamic.line_width,
227 };
228 GEN9_3DSTATE_SF_pack(NULL, sf_dw, &sf);
229 /* FIXME: gen9.fs */
230 anv_batch_emit_merge(&cmd_buffer->batch, sf_dw,
231 cmd_buffer->state.pipeline->gen8.sf);
232 }
233
234 static void
235 __emit_sf_state(struct anv_cmd_buffer *cmd_buffer)
236 {
237 if (cmd_buffer->device->info.is_cherryview)
238 __emit_gen9_sf_state(cmd_buffer);
239 else
240 __emit_genx_sf_state(cmd_buffer);
241 }
242
243 void
244 genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
245 {
246 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
247 uint32_t *p;
248
249 uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
250
251 assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
252
253 config_l3(cmd_buffer, false);
254
255 genX(flush_pipeline_select_3d)(cmd_buffer);
256
257 if (vb_emit) {
258 const uint32_t num_buffers = __builtin_popcount(vb_emit);
259 const uint32_t num_dwords = 1 + num_buffers * 4;
260
261 p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
262 GENX(3DSTATE_VERTEX_BUFFERS));
263 uint32_t vb, i = 0;
264 for_each_bit(vb, vb_emit) {
265 struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
266 uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
267
268 struct GENX(VERTEX_BUFFER_STATE) state = {
269 .VertexBufferIndex = vb,
270 .MemoryObjectControlState = GENX(MOCS),
271 .AddressModifyEnable = true,
272 .BufferPitch = pipeline->binding_stride[vb],
273 .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
274 .BufferSize = buffer->size - offset
275 };
276
277 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
278 i++;
279 }
280 }
281
282 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
283 /* If somebody compiled a pipeline after starting a command buffer the
284 * scratch bo may have grown since we started this cmd buffer (and
285 * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now,
286 * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
287 if (cmd_buffer->state.scratch_size < pipeline->total_scratch)
288 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
289
290 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
291
292 /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
293 *
294 * "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
295 * the next 3DPRIMITIVE command after programming the
296 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
297 *
298 * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
299 * pipeline setup, we need to dirty push constants.
300 */
301 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
302 }
303
304 /* We emit the binding tables and sampler tables first, then emit push
305 * constants and then finally emit binding table and sampler table
306 * pointers. It has to happen in this order, since emitting the binding
307 * tables may change the push constants (in case of storage images). After
308 * emitting push constants, on SKL+ we have to emit the corresponding
309 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
310 */
311 uint32_t dirty = 0;
312 if (cmd_buffer->state.descriptors_dirty)
313 dirty = gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer);
314
315 if (cmd_buffer->state.push_constants_dirty)
316 dirty |= cmd_buffer_flush_push_constants(cmd_buffer);
317
318 if (dirty)
319 gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
320
321 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
322 gen8_cmd_buffer_emit_viewport(cmd_buffer);
323
324 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
325 gen7_cmd_buffer_emit_scissor(cmd_buffer);
326
327 if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
328 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)) {
329 __emit_sf_state(cmd_buffer);
330 }
331
332 if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
333 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)){
334 bool enable_bias = cmd_buffer->state.dynamic.depth_bias.bias != 0.0f ||
335 cmd_buffer->state.dynamic.depth_bias.slope != 0.0f;
336
337 uint32_t raster_dw[GENX(3DSTATE_RASTER_length)];
338 struct GENX(3DSTATE_RASTER) raster = {
339 GENX(3DSTATE_RASTER_header),
340 .GlobalDepthOffsetEnableSolid = enable_bias,
341 .GlobalDepthOffsetEnableWireframe = enable_bias,
342 .GlobalDepthOffsetEnablePoint = enable_bias,
343 .GlobalDepthOffsetConstant = cmd_buffer->state.dynamic.depth_bias.bias,
344 .GlobalDepthOffsetScale = cmd_buffer->state.dynamic.depth_bias.slope,
345 .GlobalDepthOffsetClamp = cmd_buffer->state.dynamic.depth_bias.clamp
346 };
347 GENX(3DSTATE_RASTER_pack)(NULL, raster_dw, &raster);
348 anv_batch_emit_merge(&cmd_buffer->batch, raster_dw,
349 pipeline->gen8.raster);
350 }
351
352 /* Stencil reference values moved from COLOR_CALC_STATE in gen8 to
353 * 3DSTATE_WM_DEPTH_STENCIL in gen9. That means the dirty bits gets split
354 * across different state packets for gen8 and gen9. We handle that by
355 * using a big old #if switch here.
356 */
357 #if GEN_GEN == 8
358 if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS |
359 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) {
360 struct anv_dynamic_state *d = &cmd_buffer->state.dynamic;
361 struct anv_state cc_state =
362 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
363 GENX(COLOR_CALC_STATE_length) * 4,
364 64);
365 struct GENX(COLOR_CALC_STATE) cc = {
366 .BlendConstantColorRed = cmd_buffer->state.dynamic.blend_constants[0],
367 .BlendConstantColorGreen = cmd_buffer->state.dynamic.blend_constants[1],
368 .BlendConstantColorBlue = cmd_buffer->state.dynamic.blend_constants[2],
369 .BlendConstantColorAlpha = cmd_buffer->state.dynamic.blend_constants[3],
370 .StencilReferenceValue = d->stencil_reference.front,
371 .BackFaceStencilReferenceValue = d->stencil_reference.back,
372 };
373 GENX(COLOR_CALC_STATE_pack)(NULL, cc_state.map, &cc);
374
375 if (!cmd_buffer->device->info.has_llc)
376 anv_state_clflush(cc_state);
377
378 anv_batch_emit(&cmd_buffer->batch,
379 GENX(3DSTATE_CC_STATE_POINTERS),
380 .ColorCalcStatePointer = cc_state.offset,
381 .ColorCalcStatePointerValid = true);
382 }
383
384 if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
385 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK |
386 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK)) {
387 uint32_t wm_depth_stencil_dw[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
388 struct anv_dynamic_state *d = &cmd_buffer->state.dynamic;
389
390 struct GENX(3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil) = {
391 GENX(3DSTATE_WM_DEPTH_STENCIL_header),
392
393 .StencilBufferWriteEnable = d->stencil_write_mask.front != 0 ||
394 d->stencil_write_mask.back != 0,
395
396 .StencilTestMask = d->stencil_compare_mask.front & 0xff,
397 .StencilWriteMask = d->stencil_write_mask.front & 0xff,
398
399 .BackfaceStencilTestMask = d->stencil_compare_mask.back & 0xff,
400 .BackfaceStencilWriteMask = d->stencil_write_mask.back & 0xff,
401 };
402 GENX(3DSTATE_WM_DEPTH_STENCIL_pack)(NULL, wm_depth_stencil_dw,
403 &wm_depth_stencil);
404
405 anv_batch_emit_merge(&cmd_buffer->batch, wm_depth_stencil_dw,
406 pipeline->gen8.wm_depth_stencil);
407 }
408 #else
409 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS) {
410 struct anv_state cc_state =
411 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
412 GEN9_COLOR_CALC_STATE_length * 4,
413 64);
414 struct GEN9_COLOR_CALC_STATE cc = {
415 .BlendConstantColorRed = cmd_buffer->state.dynamic.blend_constants[0],
416 .BlendConstantColorGreen = cmd_buffer->state.dynamic.blend_constants[1],
417 .BlendConstantColorBlue = cmd_buffer->state.dynamic.blend_constants[2],
418 .BlendConstantColorAlpha = cmd_buffer->state.dynamic.blend_constants[3],
419 };
420 GEN9_COLOR_CALC_STATE_pack(NULL, cc_state.map, &cc);
421
422 if (!cmd_buffer->device->info.has_llc)
423 anv_state_clflush(cc_state);
424
425 anv_batch_emit(&cmd_buffer->batch,
426 GEN9_3DSTATE_CC_STATE_POINTERS,
427 .ColorCalcStatePointer = cc_state.offset,
428 .ColorCalcStatePointerValid = true);
429 }
430
431 if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
432 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK |
433 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
434 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) {
435 uint32_t dwords[GEN9_3DSTATE_WM_DEPTH_STENCIL_length];
436 struct anv_dynamic_state *d = &cmd_buffer->state.dynamic;
437 struct GEN9_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil = {
438 GEN9_3DSTATE_WM_DEPTH_STENCIL_header,
439
440 .StencilBufferWriteEnable = d->stencil_write_mask.front != 0 ||
441 d->stencil_write_mask.back != 0,
442
443 .StencilTestMask = d->stencil_compare_mask.front & 0xff,
444 .StencilWriteMask = d->stencil_write_mask.front & 0xff,
445
446 .BackfaceStencilTestMask = d->stencil_compare_mask.back & 0xff,
447 .BackfaceStencilWriteMask = d->stencil_write_mask.back & 0xff,
448
449 .StencilReferenceValue = d->stencil_reference.front,
450 .BackfaceStencilReferenceValue = d->stencil_reference.back
451 };
452 GEN9_3DSTATE_WM_DEPTH_STENCIL_pack(NULL, dwords, &wm_depth_stencil);
453
454 anv_batch_emit_merge(&cmd_buffer->batch, dwords,
455 pipeline->gen9.wm_depth_stencil);
456 }
457 #endif
458
459 if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
460 ANV_CMD_DIRTY_INDEX_BUFFER)) {
461 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF),
462 .IndexedDrawCutIndexEnable = pipeline->primitive_restart,
463 .CutIndex = cmd_buffer->state.restart_index,
464 );
465 }
466
467 cmd_buffer->state.vb_dirty &= ~vb_emit;
468 cmd_buffer->state.dirty = 0;
469 }
470
471 void genX(CmdBindIndexBuffer)(
472 VkCommandBuffer commandBuffer,
473 VkBuffer _buffer,
474 VkDeviceSize offset,
475 VkIndexType indexType)
476 {
477 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
478 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
479
480 static const uint32_t vk_to_gen_index_type[] = {
481 [VK_INDEX_TYPE_UINT16] = INDEX_WORD,
482 [VK_INDEX_TYPE_UINT32] = INDEX_DWORD,
483 };
484
485 static const uint32_t restart_index_for_type[] = {
486 [VK_INDEX_TYPE_UINT16] = UINT16_MAX,
487 [VK_INDEX_TYPE_UINT32] = UINT32_MAX,
488 };
489
490 cmd_buffer->state.restart_index = restart_index_for_type[indexType];
491
492 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER),
493 .IndexFormat = vk_to_gen_index_type[indexType],
494 .MemoryObjectControlState = GENX(MOCS),
495 .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
496 .BufferSize = buffer->size - offset);
497
498 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_INDEX_BUFFER;
499 }
500
501 static VkResult
502 flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
503 {
504 struct anv_device *device = cmd_buffer->device;
505 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
506 struct anv_state surfaces = { 0, }, samplers = { 0, };
507 VkResult result;
508
509 result = anv_cmd_buffer_emit_samplers(cmd_buffer,
510 MESA_SHADER_COMPUTE, &samplers);
511 if (result != VK_SUCCESS)
512 return result;
513 result = anv_cmd_buffer_emit_binding_table(cmd_buffer,
514 MESA_SHADER_COMPUTE, &surfaces);
515 if (result != VK_SUCCESS)
516 return result;
517
518 struct anv_state push_state = anv_cmd_buffer_cs_push_constants(cmd_buffer);
519
520 const struct brw_cs_prog_data *cs_prog_data = &pipeline->cs_prog_data;
521 const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
522
523 unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
524 unsigned push_constant_data_size =
525 (prog_data->nr_params + local_id_dwords) * 4;
526 unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
527 unsigned push_constant_regs = reg_aligned_constant_size / 32;
528
529 if (push_state.alloc_size) {
530 anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD),
531 .CURBETotalDataLength = push_state.alloc_size,
532 .CURBEDataStartAddress = push_state.offset);
533 }
534
535 assert(prog_data->total_shared <= 64 * 1024);
536 uint32_t slm_size = 0;
537 if (prog_data->total_shared > 0) {
538 /* slm_size is in 4k increments, but must be a power of 2. */
539 slm_size = 4 * 1024;
540 while (slm_size < prog_data->total_shared)
541 slm_size <<= 1;
542 slm_size /= 4 * 1024;
543 }
544
545 struct anv_state state =
546 anv_state_pool_emit(&device->dynamic_state_pool,
547 GENX(INTERFACE_DESCRIPTOR_DATA), 64,
548 .KernelStartPointer = pipeline->cs_simd,
549 .KernelStartPointerHigh = 0,
550 .BindingTablePointer = surfaces.offset,
551 .BindingTableEntryCount = 0,
552 .SamplerStatePointer = samplers.offset,
553 .SamplerCount = 0,
554 .ConstantIndirectURBEntryReadLength = push_constant_regs,
555 .ConstantURBEntryReadOffset = 0,
556 .BarrierEnable = cs_prog_data->uses_barrier,
557 .SharedLocalMemorySize = slm_size,
558 .NumberofThreadsinGPGPUThreadGroup =
559 pipeline->cs_thread_width_max);
560
561 uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
562 anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD),
563 .InterfaceDescriptorTotalLength = size,
564 .InterfaceDescriptorDataStartAddress = state.offset);
565
566 return VK_SUCCESS;
567 }
568
569 void
570 genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
571 {
572 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
573 VkResult result;
574
575 assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
576
577 bool needs_slm = pipeline->cs_prog_data.base.total_shared > 0;
578 config_l3(cmd_buffer, needs_slm);
579
580 if (cmd_buffer->state.current_pipeline != GPGPU) {
581 #if GEN_GEN < 10
582 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
583 *
584 * Software must clear the COLOR_CALC_STATE Valid field in
585 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
586 * with Pipeline Select set to GPGPU.
587 *
588 * The internal hardware docs recommend the same workaround for Gen9
589 * hardware too.
590 */
591 anv_batch_emit(&cmd_buffer->batch,
592 GENX(3DSTATE_CC_STATE_POINTERS));
593 #endif
594
595 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT),
596 #if GEN_GEN >= 9
597 .MaskBits = 3,
598 #endif
599 .PipelineSelection = GPGPU);
600 cmd_buffer->state.current_pipeline = GPGPU;
601 }
602
603 if (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)
604 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
605
606 if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) ||
607 (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)) {
608 result = flush_compute_descriptor_set(cmd_buffer);
609 assert(result == VK_SUCCESS);
610 cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT;
611 }
612
613 cmd_buffer->state.compute_dirty = 0;
614 }
615
616 static void
617 emit_ps_depth_count(struct anv_batch *batch,
618 struct anv_bo *bo, uint32_t offset)
619 {
620 anv_batch_emit(batch, GENX(PIPE_CONTROL),
621 .DestinationAddressType = DAT_PPGTT,
622 .PostSyncOperation = WritePSDepthCount,
623 .DepthStallEnable = true,
624 .Address = { bo, offset });
625 }
626
627 static void
628 emit_query_availability(struct anv_batch *batch,
629 struct anv_bo *bo, uint32_t offset)
630 {
631 anv_batch_emit(batch, GENX(PIPE_CONTROL),
632 .DestinationAddressType = DAT_PPGTT,
633 .PostSyncOperation = WriteImmediateData,
634 .Address = { bo, offset },
635 .ImmediateData = 1);
636 }
637
638 void genX(CmdBeginQuery)(
639 VkCommandBuffer commandBuffer,
640 VkQueryPool queryPool,
641 uint32_t query,
642 VkQueryControlFlags flags)
643 {
644 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
645 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
646
647 /* Workaround: When meta uses the pipeline with the VS disabled, it seems
648 * that the pipelining of the depth write breaks. What we see is that
649 * samples from the render pass clear leaks into the first query
650 * immediately after the clear. Doing a pipecontrol with a post-sync
651 * operation and DepthStallEnable seems to work around the issue.
652 */
653 if (cmd_buffer->state.need_query_wa) {
654 cmd_buffer->state.need_query_wa = false;
655 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
656 .DepthCacheFlushEnable = true,
657 .DepthStallEnable = true);
658 }
659
660 switch (pool->type) {
661 case VK_QUERY_TYPE_OCCLUSION:
662 emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
663 query * sizeof(struct anv_query_pool_slot));
664 break;
665
666 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
667 default:
668 unreachable("");
669 }
670 }
671
672 void genX(CmdEndQuery)(
673 VkCommandBuffer commandBuffer,
674 VkQueryPool queryPool,
675 uint32_t query)
676 {
677 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
678 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
679
680 switch (pool->type) {
681 case VK_QUERY_TYPE_OCCLUSION:
682 emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
683 query * sizeof(struct anv_query_pool_slot) + 8);
684
685 emit_query_availability(&cmd_buffer->batch, &pool->bo,
686 query * sizeof(struct anv_query_pool_slot) + 16);
687 break;
688
689 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
690 default:
691 unreachable("");
692 }
693 }
694
695 #define TIMESTAMP 0x2358
696
697 void genX(CmdWriteTimestamp)(
698 VkCommandBuffer commandBuffer,
699 VkPipelineStageFlagBits pipelineStage,
700 VkQueryPool queryPool,
701 uint32_t query)
702 {
703 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
704 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
705 uint32_t offset = query * sizeof(struct anv_query_pool_slot);
706
707 assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
708
709 switch (pipelineStage) {
710 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
711 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
712 .RegisterAddress = TIMESTAMP,
713 .MemoryAddress = { &pool->bo, offset });
714 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
715 .RegisterAddress = TIMESTAMP + 4,
716 .MemoryAddress = { &pool->bo, offset + 4 });
717 break;
718
719 default:
720 /* Everything else is bottom-of-pipe */
721 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
722 .DestinationAddressType = DAT_PPGTT,
723 .PostSyncOperation = WriteTimestamp,
724 .Address = { &pool->bo, offset });
725 break;
726 }
727
728 emit_query_availability(&cmd_buffer->batch, &pool->bo, query + 16);
729 }
730
731 #define alu_opcode(v) __gen_uint((v), 20, 31)
732 #define alu_operand1(v) __gen_uint((v), 10, 19)
733 #define alu_operand2(v) __gen_uint((v), 0, 9)
734 #define alu(opcode, operand1, operand2) \
735 alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
736
737 #define OPCODE_NOOP 0x000
738 #define OPCODE_LOAD 0x080
739 #define OPCODE_LOADINV 0x480
740 #define OPCODE_LOAD0 0x081
741 #define OPCODE_LOAD1 0x481
742 #define OPCODE_ADD 0x100
743 #define OPCODE_SUB 0x101
744 #define OPCODE_AND 0x102
745 #define OPCODE_OR 0x103
746 #define OPCODE_XOR 0x104
747 #define OPCODE_STORE 0x180
748 #define OPCODE_STOREINV 0x580
749
750 #define OPERAND_R0 0x00
751 #define OPERAND_R1 0x01
752 #define OPERAND_R2 0x02
753 #define OPERAND_R3 0x03
754 #define OPERAND_R4 0x04
755 #define OPERAND_SRCA 0x20
756 #define OPERAND_SRCB 0x21
757 #define OPERAND_ACCU 0x31
758 #define OPERAND_ZF 0x32
759 #define OPERAND_CF 0x33
760
761 #define CS_GPR(n) (0x2600 + (n) * 8)
762
763 static void
764 emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
765 struct anv_bo *bo, uint32_t offset)
766 {
767 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
768 .RegisterAddress = reg,
769 .MemoryAddress = { bo, offset });
770 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
771 .RegisterAddress = reg + 4,
772 .MemoryAddress = { bo, offset + 4 });
773 }
774
775 static void
776 store_query_result(struct anv_batch *batch, uint32_t reg,
777 struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
778 {
779 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM),
780 .RegisterAddress = reg,
781 .MemoryAddress = { bo, offset });
782
783 if (flags & VK_QUERY_RESULT_64_BIT)
784 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM),
785 .RegisterAddress = reg + 4,
786 .MemoryAddress = { bo, offset + 4 });
787 }
788
789 void genX(CmdCopyQueryPoolResults)(
790 VkCommandBuffer commandBuffer,
791 VkQueryPool queryPool,
792 uint32_t firstQuery,
793 uint32_t queryCount,
794 VkBuffer destBuffer,
795 VkDeviceSize destOffset,
796 VkDeviceSize destStride,
797 VkQueryResultFlags flags)
798 {
799 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
800 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
801 ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
802 uint32_t slot_offset, dst_offset;
803
804 if (flags & VK_QUERY_RESULT_WAIT_BIT)
805 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
806 .CommandStreamerStallEnable = true,
807 .StallAtPixelScoreboard = true);
808
809 dst_offset = buffer->offset + destOffset;
810 for (uint32_t i = 0; i < queryCount; i++) {
811
812 slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
813 switch (pool->type) {
814 case VK_QUERY_TYPE_OCCLUSION:
815 emit_load_alu_reg_u64(&cmd_buffer->batch,
816 CS_GPR(0), &pool->bo, slot_offset);
817 emit_load_alu_reg_u64(&cmd_buffer->batch,
818 CS_GPR(1), &pool->bo, slot_offset + 8);
819
820 /* FIXME: We need to clamp the result for 32 bit. */
821
822 uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
823 dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
824 dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
825 dw[3] = alu(OPCODE_SUB, 0, 0);
826 dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
827 break;
828
829 case VK_QUERY_TYPE_TIMESTAMP:
830 emit_load_alu_reg_u64(&cmd_buffer->batch,
831 CS_GPR(2), &pool->bo, slot_offset);
832 break;
833
834 default:
835 unreachable("unhandled query type");
836 }
837
838 store_query_result(&cmd_buffer->batch,
839 CS_GPR(2), buffer->bo, dst_offset, flags);
840
841 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
842 emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
843 &pool->bo, slot_offset + 16);
844 if (flags & VK_QUERY_RESULT_64_BIT)
845 store_query_result(&cmd_buffer->batch,
846 CS_GPR(0), buffer->bo, dst_offset + 8, flags);
847 else
848 store_query_result(&cmd_buffer->batch,
849 CS_GPR(0), buffer->bo, dst_offset + 4, flags);
850 }
851
852 dst_offset += destStride;
853 }
854 }
855
856 void genX(CmdSetEvent)(
857 VkCommandBuffer commandBuffer,
858 VkEvent _event,
859 VkPipelineStageFlags stageMask)
860 {
861 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
862 ANV_FROM_HANDLE(anv_event, event, _event);
863
864 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
865 .DestinationAddressType = DAT_PPGTT,
866 .PostSyncOperation = WriteImmediateData,
867 .Address = {
868 &cmd_buffer->device->dynamic_state_block_pool.bo,
869 event->state.offset
870 },
871 .ImmediateData = VK_EVENT_SET);
872 }
873
874 void genX(CmdResetEvent)(
875 VkCommandBuffer commandBuffer,
876 VkEvent _event,
877 VkPipelineStageFlags stageMask)
878 {
879 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
880 ANV_FROM_HANDLE(anv_event, event, _event);
881
882 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
883 .DestinationAddressType = DAT_PPGTT,
884 .PostSyncOperation = WriteImmediateData,
885 .Address = {
886 &cmd_buffer->device->dynamic_state_block_pool.bo,
887 event->state.offset
888 },
889 .ImmediateData = VK_EVENT_RESET);
890 }
891
892 void genX(CmdWaitEvents)(
893 VkCommandBuffer commandBuffer,
894 uint32_t eventCount,
895 const VkEvent* pEvents,
896 VkPipelineStageFlags srcStageMask,
897 VkPipelineStageFlags destStageMask,
898 uint32_t memoryBarrierCount,
899 const VkMemoryBarrier* pMemoryBarriers,
900 uint32_t bufferMemoryBarrierCount,
901 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
902 uint32_t imageMemoryBarrierCount,
903 const VkImageMemoryBarrier* pImageMemoryBarriers)
904 {
905 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
906 for (uint32_t i = 0; i < eventCount; i++) {
907 ANV_FROM_HANDLE(anv_event, event, pEvents[i]);
908
909 anv_batch_emit(&cmd_buffer->batch, GENX(MI_SEMAPHORE_WAIT),
910 .WaitMode = PollingMode,
911 .CompareOperation = COMPARE_SAD_EQUAL_SDD,
912 .SemaphoreDataDword = VK_EVENT_SET,
913 .SemaphoreAddress = {
914 &cmd_buffer->device->dynamic_state_block_pool.bo,
915 event->state.offset
916 });
917 }
918
919 genX(CmdPipelineBarrier)(commandBuffer, srcStageMask, destStageMask,
920 false, /* byRegion */
921 memoryBarrierCount, pMemoryBarriers,
922 bufferMemoryBarrierCount, pBufferMemoryBarriers,
923 imageMemoryBarrierCount, pImageMemoryBarriers);
924 }