intel/blorp: Use 3DSTATE_CONSTANT_ALL to setup push constants.
[mesa.git] / src / intel / blorp / blorp_genX_exec.h
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef BLORP_GENX_EXEC_H
25 #define BLORP_GENX_EXEC_H
26
27 #include "blorp_priv.h"
28 #include "dev/gen_device_info.h"
29 #include "common/gen_sample_positions.h"
30 #include "genxml/gen_macros.h"
31
32 /**
33 * This file provides the blorp pipeline setup and execution functionality.
34 * It defines the following function:
35 *
36 * static void
37 * blorp_exec(struct blorp_context *blorp, void *batch_data,
38 * const struct blorp_params *params);
39 *
40 * It is the job of whoever includes this header to wrap this in something
41 * to get an externally visible symbol.
42 *
43 * In order for the blorp_exec function to work, the driver must provide
44 * implementations of the following static helper functions.
45 */
46
47 static void *
48 blorp_emit_dwords(struct blorp_batch *batch, unsigned n);
49
50 static uint64_t
51 blorp_emit_reloc(struct blorp_batch *batch,
52 void *location, struct blorp_address address, uint32_t delta);
53
54 static void *
55 blorp_alloc_dynamic_state(struct blorp_batch *batch,
56 uint32_t size,
57 uint32_t alignment,
58 uint32_t *offset);
59 static void *
60 blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size,
61 struct blorp_address *addr);
62 static void
63 blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *batch,
64 const struct blorp_address *addrs,
65 unsigned num_vbs);
66
67 #if GEN_GEN >= 8
68 static struct blorp_address
69 blorp_get_workaround_page(struct blorp_batch *batch);
70 #endif
71
72 static void
73 blorp_alloc_binding_table(struct blorp_batch *batch, unsigned num_entries,
74 unsigned state_size, unsigned state_alignment,
75 uint32_t *bt_offset, uint32_t *surface_offsets,
76 void **surface_maps);
77
78 static void
79 blorp_flush_range(struct blorp_batch *batch, void *start, size_t size);
80
81 static void
82 blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
83 struct blorp_address address, uint32_t delta);
84
85 static uint64_t
86 blorp_get_surface_address(struct blorp_batch *batch,
87 struct blorp_address address);
88
89 #if GEN_GEN >= 7 && GEN_GEN < 10
90 static struct blorp_address
91 blorp_get_surface_base_address(struct blorp_batch *batch);
92 #endif
93
94 static void
95 blorp_emit_urb_config(struct blorp_batch *batch,
96 unsigned vs_entry_size, unsigned sf_entry_size);
97
98 static void
99 blorp_emit_pipeline(struct blorp_batch *batch,
100 const struct blorp_params *params);
101
102 /***** BEGIN blorp_exec implementation ******/
103
104 static uint64_t
105 _blorp_combine_address(struct blorp_batch *batch, void *location,
106 struct blorp_address address, uint32_t delta)
107 {
108 if (address.buffer == NULL) {
109 return address.offset + delta;
110 } else {
111 return blorp_emit_reloc(batch, location, address, delta);
112 }
113 }
114
115 #define __gen_address_type struct blorp_address
116 #define __gen_user_data struct blorp_batch
117 #define __gen_combine_address _blorp_combine_address
118
119 #include "genxml/genX_pack.h"
120
121 #define _blorp_cmd_length(cmd) cmd ## _length
122 #define _blorp_cmd_length_bias(cmd) cmd ## _length_bias
123 #define _blorp_cmd_header(cmd) cmd ## _header
124 #define _blorp_cmd_pack(cmd) cmd ## _pack
125
126 #define blorp_emit(batch, cmd, name) \
127 for (struct cmd name = { _blorp_cmd_header(cmd) }, \
128 *_dst = blorp_emit_dwords(batch, _blorp_cmd_length(cmd)); \
129 __builtin_expect(_dst != NULL, 1); \
130 _blorp_cmd_pack(cmd)(batch, (void *)_dst, &name), \
131 _dst = NULL)
132
133 #define blorp_emitn(batch, cmd, n, ...) ({ \
134 uint32_t *_dw = blorp_emit_dwords(batch, n); \
135 if (_dw) { \
136 struct cmd template = { \
137 _blorp_cmd_header(cmd), \
138 .DWordLength = n - _blorp_cmd_length_bias(cmd), \
139 __VA_ARGS__ \
140 }; \
141 _blorp_cmd_pack(cmd)(batch, _dw, &template); \
142 } \
143 _dw ? _dw + 1 : NULL; /* Array starts at dw[1] */ \
144 })
145
146 #define STRUCT_ZERO(S) ({ struct S t; memset(&t, 0, sizeof(t)); t; })
147
148 #define blorp_emit_dynamic(batch, state, name, align, offset) \
149 for (struct state name = STRUCT_ZERO(state), \
150 *_dst = blorp_alloc_dynamic_state(batch, \
151 _blorp_cmd_length(state) * 4, \
152 align, offset); \
153 __builtin_expect(_dst != NULL, 1); \
154 _blorp_cmd_pack(state)(batch, (void *)_dst, &name), \
155 blorp_flush_range(batch, _dst, _blorp_cmd_length(state) * 4), \
156 _dst = NULL)
157
158 /* 3DSTATE_URB
159 * 3DSTATE_URB_VS
160 * 3DSTATE_URB_HS
161 * 3DSTATE_URB_DS
162 * 3DSTATE_URB_GS
163 *
164 * Assign the entire URB to the VS. Even though the VS disabled, URB space
165 * is still needed because the clipper loads the VUE's from the URB. From
166 * the Sandybridge PRM, Volume 2, Part 1, Section 3DSTATE,
167 * Dword 1.15:0 "VS Number of URB Entries":
168 * This field is always used (even if VS Function Enable is DISABLED).
169 *
170 * The warning below appears in the PRM (Section 3DSTATE_URB), but we can
171 * safely ignore it because this batch contains only one draw call.
172 * Because of URB corruption caused by allocating a previous GS unit
173 * URB entry to the VS unit, software is required to send a “GS NULL
174 * Fence” (Send URB fence with VS URB size == 1 and GS URB size == 0)
175 * plus a dummy DRAW call before any case where VS will be taking over
176 * GS URB space.
177 *
178 * If the 3DSTATE_URB_VS is emitted, than the others must be also.
179 * From the Ivybridge PRM, Volume 2 Part 1, section 1.7.1 3DSTATE_URB_VS:
180 *
181 * 3DSTATE_URB_HS, 3DSTATE_URB_DS, and 3DSTATE_URB_GS must also be
182 * programmed in order for the programming of this state to be
183 * valid.
184 */
185 static void
186 emit_urb_config(struct blorp_batch *batch,
187 const struct blorp_params *params)
188 {
189 /* Once vertex fetcher has written full VUE entries with complete
190 * header the space requirement is as follows per vertex (in bytes):
191 *
192 * Header Position Program constants
193 * +--------+------------+-------------------+
194 * | 16 | 16 | n x 16 |
195 * +--------+------------+-------------------+
196 *
197 * where 'n' stands for number of varying inputs expressed as vec4s.
198 */
199 const unsigned num_varyings =
200 params->wm_prog_data ? params->wm_prog_data->num_varying_inputs : 0;
201 const unsigned total_needed = 16 + 16 + num_varyings * 16;
202
203 /* The URB size is expressed in units of 64 bytes (512 bits) */
204 const unsigned vs_entry_size = DIV_ROUND_UP(total_needed, 64);
205
206 const unsigned sf_entry_size =
207 params->sf_prog_data ? params->sf_prog_data->urb_entry_size : 0;
208
209 blorp_emit_urb_config(batch, vs_entry_size, sf_entry_size);
210 }
211
212 #if GEN_GEN >= 7
213 static void
214 blorp_emit_memcpy(struct blorp_batch *batch,
215 struct blorp_address dst,
216 struct blorp_address src,
217 uint32_t size);
218 #endif
219
220 static void
221 blorp_emit_vertex_data(struct blorp_batch *batch,
222 const struct blorp_params *params,
223 struct blorp_address *addr,
224 uint32_t *size)
225 {
226 const float vertices[] = {
227 /* v0 */ (float)params->x1, (float)params->y1, params->z,
228 /* v1 */ (float)params->x0, (float)params->y1, params->z,
229 /* v2 */ (float)params->x0, (float)params->y0, params->z,
230 };
231
232 void *data = blorp_alloc_vertex_buffer(batch, sizeof(vertices), addr);
233 memcpy(data, vertices, sizeof(vertices));
234 *size = sizeof(vertices);
235 blorp_flush_range(batch, data, *size);
236 }
237
238 static void
239 blorp_emit_input_varying_data(struct blorp_batch *batch,
240 const struct blorp_params *params,
241 struct blorp_address *addr,
242 uint32_t *size)
243 {
244 const unsigned vec4_size_in_bytes = 4 * sizeof(float);
245 const unsigned max_num_varyings =
246 DIV_ROUND_UP(sizeof(params->wm_inputs), vec4_size_in_bytes);
247 const unsigned num_varyings =
248 params->wm_prog_data ? params->wm_prog_data->num_varying_inputs : 0;
249
250 *size = 16 + num_varyings * vec4_size_in_bytes;
251
252 const uint32_t *const inputs_src = (const uint32_t *)&params->wm_inputs;
253 void *data = blorp_alloc_vertex_buffer(batch, *size, addr);
254 uint32_t *inputs = data;
255
256 /* Copy in the VS inputs */
257 assert(sizeof(params->vs_inputs) == 16);
258 memcpy(inputs, &params->vs_inputs, sizeof(params->vs_inputs));
259 inputs += 4;
260
261 if (params->wm_prog_data) {
262 /* Walk over the attribute slots, determine if the attribute is used by
263 * the program and when necessary copy the values from the input storage
264 * to the vertex data buffer.
265 */
266 for (unsigned i = 0; i < max_num_varyings; i++) {
267 const gl_varying_slot attr = VARYING_SLOT_VAR0 + i;
268
269 const int input_index = params->wm_prog_data->urb_setup[attr];
270 if (input_index < 0)
271 continue;
272
273 memcpy(inputs, inputs_src + i * 4, vec4_size_in_bytes);
274
275 inputs += 4;
276 }
277 }
278
279 blorp_flush_range(batch, data, *size);
280
281 if (params->dst_clear_color_as_input) {
282 #if GEN_GEN >= 7
283 /* In this case, the clear color isn't known statically and instead
284 * comes in through an indirect which we have to copy into the vertex
285 * buffer before we execute the 3DPRIMITIVE. We already copied the
286 * value of params->wm_inputs.clear_color into the vertex buffer in the
287 * loop above. Now we emit code to stomp it from the GPU with the
288 * actual clear color value.
289 */
290 assert(num_varyings == 1);
291
292 /* The clear color is the first thing after the header */
293 struct blorp_address clear_color_input_addr = *addr;
294 clear_color_input_addr.offset += 16;
295
296 const unsigned clear_color_size =
297 GEN_GEN < 10 ? batch->blorp->isl_dev->ss.clear_value_size : 4 * 4;
298 blorp_emit_memcpy(batch, clear_color_input_addr,
299 params->dst.clear_color_addr,
300 clear_color_size);
301 #else
302 unreachable("MCS partial resolve is not a thing on SNB and earlier");
303 #endif
304 }
305 }
306
307 static void
308 blorp_fill_vertex_buffer_state(struct blorp_batch *batch,
309 struct GENX(VERTEX_BUFFER_STATE) *vb,
310 unsigned idx,
311 struct blorp_address addr, uint32_t size,
312 uint32_t stride)
313 {
314 vb[idx].VertexBufferIndex = idx;
315 vb[idx].BufferStartingAddress = addr;
316 vb[idx].BufferPitch = stride;
317
318 #if GEN_GEN >= 6
319 vb[idx].MOCS = addr.mocs;
320 #endif
321
322 #if GEN_GEN >= 7
323 vb[idx].AddressModifyEnable = true;
324 #endif
325
326 #if GEN_GEN >= 8
327 vb[idx].BufferSize = size;
328 #elif GEN_GEN >= 5
329 vb[idx].BufferAccessType = stride > 0 ? VERTEXDATA : INSTANCEDATA;
330 vb[idx].EndAddress = vb[idx].BufferStartingAddress;
331 vb[idx].EndAddress.offset += size - 1;
332 #elif GEN_GEN == 4
333 vb[idx].BufferAccessType = stride > 0 ? VERTEXDATA : INSTANCEDATA;
334 vb[idx].MaxIndex = stride > 0 ? size / stride : 0;
335 #endif
336 }
337
338 static void
339 blorp_emit_vertex_buffers(struct blorp_batch *batch,
340 const struct blorp_params *params)
341 {
342 struct GENX(VERTEX_BUFFER_STATE) vb[3];
343 uint32_t num_vbs = 2;
344 memset(vb, 0, sizeof(vb));
345
346 struct blorp_address addrs[2] = {};
347 uint32_t size;
348 blorp_emit_vertex_data(batch, params, &addrs[0], &size);
349 blorp_fill_vertex_buffer_state(batch, vb, 0, addrs[0], size,
350 3 * sizeof(float));
351
352 blorp_emit_input_varying_data(batch, params, &addrs[1], &size);
353 blorp_fill_vertex_buffer_state(batch, vb, 1, addrs[1], size, 0);
354
355 blorp_vf_invalidate_for_vb_48b_transitions(batch, addrs, num_vbs);
356
357 const unsigned num_dwords = 1 + num_vbs * GENX(VERTEX_BUFFER_STATE_length);
358 uint32_t *dw = blorp_emitn(batch, GENX(3DSTATE_VERTEX_BUFFERS), num_dwords);
359 if (!dw)
360 return;
361
362 for (unsigned i = 0; i < num_vbs; i++) {
363 GENX(VERTEX_BUFFER_STATE_pack)(batch, dw, &vb[i]);
364 dw += GENX(VERTEX_BUFFER_STATE_length);
365 }
366 }
367
368 static void
369 blorp_emit_vertex_elements(struct blorp_batch *batch,
370 const struct blorp_params *params)
371 {
372 const unsigned num_varyings =
373 params->wm_prog_data ? params->wm_prog_data->num_varying_inputs : 0;
374 bool need_ndc = batch->blorp->compiler->devinfo->gen <= 5;
375 const unsigned num_elements = 2 + need_ndc + num_varyings;
376
377 struct GENX(VERTEX_ELEMENT_STATE) ve[num_elements];
378 memset(ve, 0, num_elements * sizeof(*ve));
379
380 /* Setup VBO for the rectangle primitive..
381 *
382 * A rectangle primitive (3DPRIM_RECTLIST) consists of only three
383 * vertices. The vertices reside in screen space with DirectX
384 * coordinates (that is, (0, 0) is the upper left corner).
385 *
386 * v2 ------ implied
387 * | |
388 * | |
389 * v1 ----- v0
390 *
391 * Since the VS is disabled, the clipper loads each VUE directly from
392 * the URB. This is controlled by the 3DSTATE_VERTEX_BUFFERS and
393 * 3DSTATE_VERTEX_ELEMENTS packets below. The VUE contents are as follows:
394 * dw0: Reserved, MBZ.
395 * dw1: Render Target Array Index. Below vertex fetcher gets programmed
396 * to assign this with primitive instance identifier which will be
397 * used for layered clears. All other renders have only one instance
398 * and therefore the value will be effectively zero.
399 * dw2: Viewport Index. The HiZ op disables viewport mapping and
400 * scissoring, so set the dword to 0.
401 * dw3: Point Width: The HiZ op does not emit the POINTLIST primitive,
402 * so set the dword to 0.
403 * dw4: Vertex Position X.
404 * dw5: Vertex Position Y.
405 * dw6: Vertex Position Z.
406 * dw7: Vertex Position W.
407 *
408 * dw8: Flat vertex input 0
409 * dw9: Flat vertex input 1
410 * ...
411 * dwn: Flat vertex input n - 8
412 *
413 * For details, see the Sandybridge PRM, Volume 2, Part 1, Section 1.5.1
414 * "Vertex URB Entry (VUE) Formats".
415 *
416 * Only vertex position X and Y are going to be variable, Z is fixed to
417 * zero and W to one. Header words dw0,2,3 are zero. There is no need to
418 * include the fixed values in the vertex buffer. Vertex fetcher can be
419 * instructed to fill vertex elements with constant values of one and zero
420 * instead of reading them from the buffer.
421 * Flat inputs are program constants that are not interpolated. Moreover
422 * their values will be the same between vertices.
423 *
424 * See the vertex element setup below.
425 */
426 unsigned slot = 0;
427
428 ve[slot] = (struct GENX(VERTEX_ELEMENT_STATE)) {
429 .VertexBufferIndex = 1,
430 .Valid = true,
431 .SourceElementFormat = ISL_FORMAT_R32G32B32A32_FLOAT,
432 .SourceElementOffset = 0,
433 .Component0Control = VFCOMP_STORE_SRC,
434
435 /* From Gen8 onwards hardware is no more instructed to overwrite
436 * components using an element specifier. Instead one has separate
437 * 3DSTATE_VF_SGVS (System Generated Value Setup) state packet for it.
438 */
439 #if GEN_GEN >= 8
440 .Component1Control = VFCOMP_STORE_0,
441 #elif GEN_GEN >= 5
442 .Component1Control = VFCOMP_STORE_IID,
443 #else
444 .Component1Control = VFCOMP_STORE_0,
445 #endif
446 .Component2Control = VFCOMP_STORE_0,
447 .Component3Control = VFCOMP_STORE_0,
448 #if GEN_GEN <= 5
449 .DestinationElementOffset = slot * 4,
450 #endif
451 };
452 slot++;
453
454 #if GEN_GEN <= 5
455 /* On Iron Lake and earlier, a native device coordinates version of the
456 * position goes right after the normal VUE header and before position.
457 * Since w == 1 for all of our coordinates, this is just a copy of the
458 * position.
459 */
460 ve[slot] = (struct GENX(VERTEX_ELEMENT_STATE)) {
461 .VertexBufferIndex = 0,
462 .Valid = true,
463 .SourceElementFormat = ISL_FORMAT_R32G32B32_FLOAT,
464 .SourceElementOffset = 0,
465 .Component0Control = VFCOMP_STORE_SRC,
466 .Component1Control = VFCOMP_STORE_SRC,
467 .Component2Control = VFCOMP_STORE_SRC,
468 .Component3Control = VFCOMP_STORE_1_FP,
469 .DestinationElementOffset = slot * 4,
470 };
471 slot++;
472 #endif
473
474 ve[slot] = (struct GENX(VERTEX_ELEMENT_STATE)) {
475 .VertexBufferIndex = 0,
476 .Valid = true,
477 .SourceElementFormat = ISL_FORMAT_R32G32B32_FLOAT,
478 .SourceElementOffset = 0,
479 .Component0Control = VFCOMP_STORE_SRC,
480 .Component1Control = VFCOMP_STORE_SRC,
481 .Component2Control = VFCOMP_STORE_SRC,
482 .Component3Control = VFCOMP_STORE_1_FP,
483 #if GEN_GEN <= 5
484 .DestinationElementOffset = slot * 4,
485 #endif
486 };
487 slot++;
488
489 for (unsigned i = 0; i < num_varyings; ++i) {
490 ve[slot] = (struct GENX(VERTEX_ELEMENT_STATE)) {
491 .VertexBufferIndex = 1,
492 .Valid = true,
493 .SourceElementFormat = ISL_FORMAT_R32G32B32A32_FLOAT,
494 .SourceElementOffset = 16 + i * 4 * sizeof(float),
495 .Component0Control = VFCOMP_STORE_SRC,
496 .Component1Control = VFCOMP_STORE_SRC,
497 .Component2Control = VFCOMP_STORE_SRC,
498 .Component3Control = VFCOMP_STORE_SRC,
499 #if GEN_GEN <= 5
500 .DestinationElementOffset = slot * 4,
501 #endif
502 };
503 slot++;
504 }
505
506 const unsigned num_dwords =
507 1 + GENX(VERTEX_ELEMENT_STATE_length) * num_elements;
508 uint32_t *dw = blorp_emitn(batch, GENX(3DSTATE_VERTEX_ELEMENTS), num_dwords);
509 if (!dw)
510 return;
511
512 for (unsigned i = 0; i < num_elements; i++) {
513 GENX(VERTEX_ELEMENT_STATE_pack)(batch, dw, &ve[i]);
514 dw += GENX(VERTEX_ELEMENT_STATE_length);
515 }
516
517 blorp_emit(batch, GENX(3DSTATE_VF_STATISTICS), vf) {
518 vf.StatisticsEnable = false;
519 }
520
521 #if GEN_GEN >= 8
522 /* Overwrite Render Target Array Index (2nd dword) in the VUE header with
523 * primitive instance identifier. This is used for layered clears.
524 */
525 blorp_emit(batch, GENX(3DSTATE_VF_SGVS), sgvs) {
526 sgvs.InstanceIDEnable = true;
527 sgvs.InstanceIDComponentNumber = COMP_1;
528 sgvs.InstanceIDElementOffset = 0;
529 }
530
531 for (unsigned i = 0; i < num_elements; i++) {
532 blorp_emit(batch, GENX(3DSTATE_VF_INSTANCING), vf) {
533 vf.VertexElementIndex = i;
534 vf.InstancingEnable = false;
535 }
536 }
537
538 blorp_emit(batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
539 topo.PrimitiveTopologyType = _3DPRIM_RECTLIST;
540 }
541 #endif
542 }
543
544 /* 3DSTATE_VIEWPORT_STATE_POINTERS */
545 static uint32_t
546 blorp_emit_cc_viewport(struct blorp_batch *batch)
547 {
548 uint32_t cc_vp_offset;
549 blorp_emit_dynamic(batch, GENX(CC_VIEWPORT), vp, 32, &cc_vp_offset) {
550 vp.MinimumDepth = 0.0;
551 vp.MaximumDepth = 1.0;
552 }
553
554 #if GEN_GEN >= 7
555 blorp_emit(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), vsp) {
556 vsp.CCViewportPointer = cc_vp_offset;
557 }
558 #elif GEN_GEN == 6
559 blorp_emit(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS), vsp) {
560 vsp.CCViewportStateChange = true;
561 vsp.PointertoCC_VIEWPORT = cc_vp_offset;
562 }
563 #endif
564
565 return cc_vp_offset;
566 }
567
568 static uint32_t
569 blorp_emit_sampler_state(struct blorp_batch *batch)
570 {
571 uint32_t offset;
572 blorp_emit_dynamic(batch, GENX(SAMPLER_STATE), sampler, 32, &offset) {
573 sampler.MipModeFilter = MIPFILTER_NONE;
574 sampler.MagModeFilter = MAPFILTER_LINEAR;
575 sampler.MinModeFilter = MAPFILTER_LINEAR;
576 sampler.MinLOD = 0;
577 sampler.MaxLOD = 0;
578 sampler.TCXAddressControlMode = TCM_CLAMP;
579 sampler.TCYAddressControlMode = TCM_CLAMP;
580 sampler.TCZAddressControlMode = TCM_CLAMP;
581 sampler.MaximumAnisotropy = RATIO21;
582 sampler.RAddressMinFilterRoundingEnable = true;
583 sampler.RAddressMagFilterRoundingEnable = true;
584 sampler.VAddressMinFilterRoundingEnable = true;
585 sampler.VAddressMagFilterRoundingEnable = true;
586 sampler.UAddressMinFilterRoundingEnable = true;
587 sampler.UAddressMagFilterRoundingEnable = true;
588 #if GEN_GEN > 6
589 sampler.NonnormalizedCoordinateEnable = true;
590 #endif
591 }
592
593 #if GEN_GEN >= 7
594 blorp_emit(batch, GENX(3DSTATE_SAMPLER_STATE_POINTERS_PS), ssp) {
595 ssp.PointertoPSSamplerState = offset;
596 }
597 #elif GEN_GEN == 6
598 blorp_emit(batch, GENX(3DSTATE_SAMPLER_STATE_POINTERS), ssp) {
599 ssp.VSSamplerStateChange = true;
600 ssp.GSSamplerStateChange = true;
601 ssp.PSSamplerStateChange = true;
602 ssp.PointertoPSSamplerState = offset;
603 }
604 #endif
605
606 return offset;
607 }
608
609 /* What follows is the code for setting up a "pipeline" on Sandy Bridge and
610 * later hardware. This file will be included by i965 for gen4-5 as well, so
611 * this code is guarded by GEN_GEN >= 6.
612 */
613 #if GEN_GEN >= 6
614
615 static void
616 blorp_emit_vs_config(struct blorp_batch *batch,
617 const struct blorp_params *params)
618 {
619 struct brw_vs_prog_data *vs_prog_data = params->vs_prog_data;
620 assert(!vs_prog_data || GEN_GEN < 11 ||
621 vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8);
622
623 blorp_emit(batch, GENX(3DSTATE_VS), vs) {
624 if (vs_prog_data) {
625 vs.Enable = true;
626
627 vs.KernelStartPointer = params->vs_prog_kernel;
628
629 vs.DispatchGRFStartRegisterForURBData =
630 vs_prog_data->base.base.dispatch_grf_start_reg;
631 vs.VertexURBEntryReadLength =
632 vs_prog_data->base.urb_read_length;
633 vs.VertexURBEntryReadOffset = 0;
634
635 vs.MaximumNumberofThreads =
636 batch->blorp->isl_dev->info->max_vs_threads - 1;
637
638 #if GEN_GEN >= 8
639 vs.SIMD8DispatchEnable =
640 vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8;
641 #endif
642 }
643 }
644 }
645
646 static void
647 blorp_emit_sf_config(struct blorp_batch *batch,
648 const struct blorp_params *params)
649 {
650 const struct brw_wm_prog_data *prog_data = params->wm_prog_data;
651
652 /* 3DSTATE_SF
653 *
654 * Disable ViewportTransformEnable (dw2.1)
655 *
656 * From the SandyBridge PRM, Volume 2, Part 1, Section 1.3, "3D
657 * Primitives Overview":
658 * RECTLIST: Viewport Mapping must be DISABLED (as is typical with the
659 * use of screen- space coordinates).
660 *
661 * A solid rectangle must be rendered, so set FrontFaceFillMode (dw2.4:3)
662 * and BackFaceFillMode (dw2.5:6) to SOLID(0).
663 *
664 * From the Sandy Bridge PRM, Volume 2, Part 1, Section
665 * 6.4.1.1 3DSTATE_SF, Field FrontFaceFillMode:
666 * SOLID: Any triangle or rectangle object found to be front-facing
667 * is rendered as a solid object. This setting is required when
668 * (rendering rectangle (RECTLIST) objects.
669 */
670
671 #if GEN_GEN >= 8
672
673 blorp_emit(batch, GENX(3DSTATE_SF), sf);
674
675 blorp_emit(batch, GENX(3DSTATE_RASTER), raster) {
676 raster.CullMode = CULLMODE_NONE;
677 }
678
679 blorp_emit(batch, GENX(3DSTATE_SBE), sbe) {
680 sbe.VertexURBEntryReadOffset = 1;
681 if (prog_data) {
682 sbe.NumberofSFOutputAttributes = prog_data->num_varying_inputs;
683 sbe.VertexURBEntryReadLength = brw_blorp_get_urb_length(prog_data);
684 sbe.ConstantInterpolationEnable = prog_data->flat_inputs;
685 } else {
686 sbe.NumberofSFOutputAttributes = 0;
687 sbe.VertexURBEntryReadLength = 1;
688 }
689 sbe.ForceVertexURBEntryReadLength = true;
690 sbe.ForceVertexURBEntryReadOffset = true;
691
692 #if GEN_GEN >= 9
693 for (unsigned i = 0; i < 32; i++)
694 sbe.AttributeActiveComponentFormat[i] = ACF_XYZW;
695 #endif
696 }
697
698 #elif GEN_GEN >= 7
699
700 blorp_emit(batch, GENX(3DSTATE_SF), sf) {
701 sf.FrontFaceFillMode = FILL_MODE_SOLID;
702 sf.BackFaceFillMode = FILL_MODE_SOLID;
703
704 sf.MultisampleRasterizationMode = params->num_samples > 1 ?
705 MSRASTMODE_ON_PATTERN : MSRASTMODE_OFF_PIXEL;
706
707 #if GEN_GEN == 7
708 sf.DepthBufferSurfaceFormat = params->depth_format;
709 #endif
710 }
711
712 blorp_emit(batch, GENX(3DSTATE_SBE), sbe) {
713 sbe.VertexURBEntryReadOffset = 1;
714 if (prog_data) {
715 sbe.NumberofSFOutputAttributes = prog_data->num_varying_inputs;
716 sbe.VertexURBEntryReadLength = brw_blorp_get_urb_length(prog_data);
717 sbe.ConstantInterpolationEnable = prog_data->flat_inputs;
718 } else {
719 sbe.NumberofSFOutputAttributes = 0;
720 sbe.VertexURBEntryReadLength = 1;
721 }
722 }
723
724 #else /* GEN_GEN <= 6 */
725
726 blorp_emit(batch, GENX(3DSTATE_SF), sf) {
727 sf.FrontFaceFillMode = FILL_MODE_SOLID;
728 sf.BackFaceFillMode = FILL_MODE_SOLID;
729
730 sf.MultisampleRasterizationMode = params->num_samples > 1 ?
731 MSRASTMODE_ON_PATTERN : MSRASTMODE_OFF_PIXEL;
732
733 sf.VertexURBEntryReadOffset = 1;
734 if (prog_data) {
735 sf.NumberofSFOutputAttributes = prog_data->num_varying_inputs;
736 sf.VertexURBEntryReadLength = brw_blorp_get_urb_length(prog_data);
737 sf.ConstantInterpolationEnable = prog_data->flat_inputs;
738 } else {
739 sf.NumberofSFOutputAttributes = 0;
740 sf.VertexURBEntryReadLength = 1;
741 }
742 }
743
744 #endif /* GEN_GEN */
745 }
746
747 static void
748 blorp_emit_ps_config(struct blorp_batch *batch,
749 const struct blorp_params *params)
750 {
751 const struct brw_wm_prog_data *prog_data = params->wm_prog_data;
752
753 /* Even when thread dispatch is disabled, max threads (dw5.25:31) must be
754 * nonzero to prevent the GPU from hanging. While the documentation doesn't
755 * mention this explicitly, it notes that the valid range for the field is
756 * [1,39] = [2,40] threads, which excludes zero.
757 *
758 * To be safe (and to minimize extraneous code) we go ahead and fully
759 * configure the WM state whether or not there is a WM program.
760 */
761
762 #if GEN_GEN >= 8
763
764 blorp_emit(batch, GENX(3DSTATE_WM), wm);
765
766 blorp_emit(batch, GENX(3DSTATE_PS), ps) {
767 if (params->src.enabled) {
768 ps.SamplerCount = 1; /* Up to 4 samplers */
769 ps.BindingTableEntryCount = 2;
770 } else {
771 ps.BindingTableEntryCount = 1;
772 }
773
774 /* Gen 11 workarounds table #2056 WABTPPrefetchDisable suggests to
775 * disable prefetching of binding tables on A0 and B0 steppings.
776 * TODO: Revisit this WA on C0 stepping.
777 */
778 if (GEN_GEN == 11)
779 ps.BindingTableEntryCount = 0;
780
781 /* SAMPLER_STATE prefetching is broken on Gen11 - WA_1606682166 */
782 if (GEN_GEN == 11)
783 ps.SamplerCount = 0;
784
785 if (prog_data) {
786 ps._8PixelDispatchEnable = prog_data->dispatch_8;
787 ps._16PixelDispatchEnable = prog_data->dispatch_16;
788 ps._32PixelDispatchEnable = prog_data->dispatch_32;
789
790 /* From the Sky Lake PRM 3DSTATE_PS::32 Pixel Dispatch Enable:
791 *
792 * "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16, SIMD32
793 * Dispatch must not be enabled for PER_PIXEL dispatch mode."
794 *
795 * Since 16x MSAA is first introduced on SKL, we don't need to apply
796 * the workaround on any older hardware.
797 */
798 if (GEN_GEN >= 9 && !prog_data->persample_dispatch &&
799 params->num_samples == 16) {
800 assert(ps._8PixelDispatchEnable || ps._16PixelDispatchEnable);
801 ps._32PixelDispatchEnable = false;
802 }
803
804 ps.DispatchGRFStartRegisterForConstantSetupData0 =
805 brw_wm_prog_data_dispatch_grf_start_reg(prog_data, ps, 0);
806 ps.DispatchGRFStartRegisterForConstantSetupData1 =
807 brw_wm_prog_data_dispatch_grf_start_reg(prog_data, ps, 1);
808 ps.DispatchGRFStartRegisterForConstantSetupData2 =
809 brw_wm_prog_data_dispatch_grf_start_reg(prog_data, ps, 2);
810
811 ps.KernelStartPointer0 = params->wm_prog_kernel +
812 brw_wm_prog_data_prog_offset(prog_data, ps, 0);
813 ps.KernelStartPointer1 = params->wm_prog_kernel +
814 brw_wm_prog_data_prog_offset(prog_data, ps, 1);
815 ps.KernelStartPointer2 = params->wm_prog_kernel +
816 brw_wm_prog_data_prog_offset(prog_data, ps, 2);
817 }
818
819 /* 3DSTATE_PS expects the number of threads per PSD, which is always 64
820 * for pre Gen11 and 128 for gen11+; On gen11+ If a programmed value is
821 * k, it implies 2(k+1) threads. It implicitly scales for different GT
822 * levels (which have some # of PSDs).
823 *
824 * In Gen8 the format is U8-2 whereas in Gen9+ it is U9-1.
825 */
826 if (GEN_GEN >= 9)
827 ps.MaximumNumberofThreadsPerPSD = 64 - 1;
828 else
829 ps.MaximumNumberofThreadsPerPSD = 64 - 2;
830
831 switch (params->fast_clear_op) {
832 case ISL_AUX_OP_NONE:
833 break;
834 #if GEN_GEN >= 10
835 case ISL_AUX_OP_AMBIGUATE:
836 ps.RenderTargetFastClearEnable = true;
837 ps.RenderTargetResolveType = FAST_CLEAR_0;
838 break;
839 #endif
840 #if GEN_GEN >= 9
841 case ISL_AUX_OP_PARTIAL_RESOLVE:
842 ps.RenderTargetResolveType = RESOLVE_PARTIAL;
843 break;
844 case ISL_AUX_OP_FULL_RESOLVE:
845 ps.RenderTargetResolveType = RESOLVE_FULL;
846 break;
847 #else
848 case ISL_AUX_OP_FULL_RESOLVE:
849 ps.RenderTargetResolveEnable = true;
850 break;
851 #endif
852 case ISL_AUX_OP_FAST_CLEAR:
853 ps.RenderTargetFastClearEnable = true;
854 break;
855 default:
856 unreachable("Invalid fast clear op");
857 }
858 }
859
860 blorp_emit(batch, GENX(3DSTATE_PS_EXTRA), psx) {
861 if (prog_data) {
862 psx.PixelShaderValid = true;
863 psx.AttributeEnable = prog_data->num_varying_inputs > 0;
864 psx.PixelShaderIsPerSample = prog_data->persample_dispatch;
865 }
866
867 if (params->src.enabled)
868 psx.PixelShaderKillsPixel = true;
869 }
870
871 #elif GEN_GEN >= 7
872
873 blorp_emit(batch, GENX(3DSTATE_WM), wm) {
874 switch (params->hiz_op) {
875 case ISL_AUX_OP_FAST_CLEAR:
876 wm.DepthBufferClear = true;
877 break;
878 case ISL_AUX_OP_FULL_RESOLVE:
879 wm.DepthBufferResolveEnable = true;
880 break;
881 case ISL_AUX_OP_AMBIGUATE:
882 wm.HierarchicalDepthBufferResolveEnable = true;
883 break;
884 case ISL_AUX_OP_NONE:
885 break;
886 default:
887 unreachable("not reached");
888 }
889
890 if (prog_data)
891 wm.ThreadDispatchEnable = true;
892
893 if (params->src.enabled)
894 wm.PixelShaderKillsPixel = true;
895
896 if (params->num_samples > 1) {
897 wm.MultisampleRasterizationMode = MSRASTMODE_ON_PATTERN;
898 wm.MultisampleDispatchMode =
899 (prog_data && prog_data->persample_dispatch) ?
900 MSDISPMODE_PERSAMPLE : MSDISPMODE_PERPIXEL;
901 } else {
902 wm.MultisampleRasterizationMode = MSRASTMODE_OFF_PIXEL;
903 wm.MultisampleDispatchMode = MSDISPMODE_PERSAMPLE;
904 }
905 }
906
907 blorp_emit(batch, GENX(3DSTATE_PS), ps) {
908 ps.MaximumNumberofThreads =
909 batch->blorp->isl_dev->info->max_wm_threads - 1;
910
911 #if GEN_IS_HASWELL
912 ps.SampleMask = 1;
913 #endif
914
915 if (prog_data) {
916 ps._8PixelDispatchEnable = prog_data->dispatch_8;
917 ps._16PixelDispatchEnable = prog_data->dispatch_16;
918 ps._32PixelDispatchEnable = prog_data->dispatch_32;
919
920 ps.DispatchGRFStartRegisterForConstantSetupData0 =
921 brw_wm_prog_data_dispatch_grf_start_reg(prog_data, ps, 0);
922 ps.DispatchGRFStartRegisterForConstantSetupData1 =
923 brw_wm_prog_data_dispatch_grf_start_reg(prog_data, ps, 1);
924 ps.DispatchGRFStartRegisterForConstantSetupData2 =
925 brw_wm_prog_data_dispatch_grf_start_reg(prog_data, ps, 2);
926
927 ps.KernelStartPointer0 = params->wm_prog_kernel +
928 brw_wm_prog_data_prog_offset(prog_data, ps, 0);
929 ps.KernelStartPointer1 = params->wm_prog_kernel +
930 brw_wm_prog_data_prog_offset(prog_data, ps, 1);
931 ps.KernelStartPointer2 = params->wm_prog_kernel +
932 brw_wm_prog_data_prog_offset(prog_data, ps, 2);
933
934 ps.AttributeEnable = prog_data->num_varying_inputs > 0;
935 } else {
936 /* Gen7 hardware gets angry if we don't enable at least one dispatch
937 * mode, so just enable 16-pixel dispatch if we don't have a program.
938 */
939 ps._16PixelDispatchEnable = true;
940 }
941
942 if (params->src.enabled)
943 ps.SamplerCount = 1; /* Up to 4 samplers */
944
945 switch (params->fast_clear_op) {
946 case ISL_AUX_OP_NONE:
947 break;
948 case ISL_AUX_OP_FULL_RESOLVE:
949 ps.RenderTargetResolveEnable = true;
950 break;
951 case ISL_AUX_OP_FAST_CLEAR:
952 ps.RenderTargetFastClearEnable = true;
953 break;
954 default:
955 unreachable("Invalid fast clear op");
956 }
957 }
958
959 #else /* GEN_GEN <= 6 */
960
961 blorp_emit(batch, GENX(3DSTATE_WM), wm) {
962 wm.MaximumNumberofThreads =
963 batch->blorp->isl_dev->info->max_wm_threads - 1;
964
965 switch (params->hiz_op) {
966 case ISL_AUX_OP_FAST_CLEAR:
967 wm.DepthBufferClear = true;
968 break;
969 case ISL_AUX_OP_FULL_RESOLVE:
970 wm.DepthBufferResolveEnable = true;
971 break;
972 case ISL_AUX_OP_AMBIGUATE:
973 wm.HierarchicalDepthBufferResolveEnable = true;
974 break;
975 case ISL_AUX_OP_NONE:
976 break;
977 default:
978 unreachable("not reached");
979 }
980
981 if (prog_data) {
982 wm.ThreadDispatchEnable = true;
983
984 wm._8PixelDispatchEnable = prog_data->dispatch_8;
985 wm._16PixelDispatchEnable = prog_data->dispatch_16;
986 wm._32PixelDispatchEnable = prog_data->dispatch_32;
987
988 wm.DispatchGRFStartRegisterForConstantSetupData0 =
989 brw_wm_prog_data_dispatch_grf_start_reg(prog_data, wm, 0);
990 wm.DispatchGRFStartRegisterForConstantSetupData1 =
991 brw_wm_prog_data_dispatch_grf_start_reg(prog_data, wm, 1);
992 wm.DispatchGRFStartRegisterForConstantSetupData2 =
993 brw_wm_prog_data_dispatch_grf_start_reg(prog_data, wm, 2);
994
995 wm.KernelStartPointer0 = params->wm_prog_kernel +
996 brw_wm_prog_data_prog_offset(prog_data, wm, 0);
997 wm.KernelStartPointer1 = params->wm_prog_kernel +
998 brw_wm_prog_data_prog_offset(prog_data, wm, 1);
999 wm.KernelStartPointer2 = params->wm_prog_kernel +
1000 brw_wm_prog_data_prog_offset(prog_data, wm, 2);
1001
1002 wm.NumberofSFOutputAttributes = prog_data->num_varying_inputs;
1003 }
1004
1005 if (params->src.enabled) {
1006 wm.SamplerCount = 1; /* Up to 4 samplers */
1007 wm.PixelShaderKillsPixel = true; /* TODO: temporarily smash on */
1008 }
1009
1010 if (params->num_samples > 1) {
1011 wm.MultisampleRasterizationMode = MSRASTMODE_ON_PATTERN;
1012 wm.MultisampleDispatchMode =
1013 (prog_data && prog_data->persample_dispatch) ?
1014 MSDISPMODE_PERSAMPLE : MSDISPMODE_PERPIXEL;
1015 } else {
1016 wm.MultisampleRasterizationMode = MSRASTMODE_OFF_PIXEL;
1017 wm.MultisampleDispatchMode = MSDISPMODE_PERSAMPLE;
1018 }
1019 }
1020
1021 #endif /* GEN_GEN */
1022 }
1023
1024 static uint32_t
1025 blorp_emit_blend_state(struct blorp_batch *batch,
1026 const struct blorp_params *params)
1027 {
1028 struct GENX(BLEND_STATE) blend;
1029 memset(&blend, 0, sizeof(blend));
1030
1031 uint32_t offset;
1032 int size = GENX(BLEND_STATE_length) * 4;
1033 size += GENX(BLEND_STATE_ENTRY_length) * 4 * params->num_draw_buffers;
1034 uint32_t *state = blorp_alloc_dynamic_state(batch, size, 64, &offset);
1035 uint32_t *pos = state;
1036
1037 GENX(BLEND_STATE_pack)(NULL, pos, &blend);
1038 pos += GENX(BLEND_STATE_length);
1039
1040 for (unsigned i = 0; i < params->num_draw_buffers; ++i) {
1041 struct GENX(BLEND_STATE_ENTRY) entry = {
1042 .PreBlendColorClampEnable = true,
1043 .PostBlendColorClampEnable = true,
1044 .ColorClampRange = COLORCLAMP_RTFORMAT,
1045
1046 .WriteDisableRed = params->color_write_disable[0],
1047 .WriteDisableGreen = params->color_write_disable[1],
1048 .WriteDisableBlue = params->color_write_disable[2],
1049 .WriteDisableAlpha = params->color_write_disable[3],
1050 };
1051 GENX(BLEND_STATE_ENTRY_pack)(NULL, pos, &entry);
1052 pos += GENX(BLEND_STATE_ENTRY_length);
1053 }
1054
1055 blorp_flush_range(batch, state, size);
1056
1057 #if GEN_GEN >= 7
1058 blorp_emit(batch, GENX(3DSTATE_BLEND_STATE_POINTERS), sp) {
1059 sp.BlendStatePointer = offset;
1060 #if GEN_GEN >= 8
1061 sp.BlendStatePointerValid = true;
1062 #endif
1063 }
1064 #endif
1065
1066 #if GEN_GEN >= 8
1067 blorp_emit(batch, GENX(3DSTATE_PS_BLEND), ps_blend) {
1068 ps_blend.HasWriteableRT = true;
1069 }
1070 #endif
1071
1072 return offset;
1073 }
1074
1075 static uint32_t
1076 blorp_emit_color_calc_state(struct blorp_batch *batch,
1077 UNUSED const struct blorp_params *params)
1078 {
1079 uint32_t offset;
1080 blorp_emit_dynamic(batch, GENX(COLOR_CALC_STATE), cc, 64, &offset) {
1081 #if GEN_GEN <= 8
1082 cc.StencilReferenceValue = params->stencil_ref;
1083 #endif
1084 }
1085
1086 #if GEN_GEN >= 7
1087 blorp_emit(batch, GENX(3DSTATE_CC_STATE_POINTERS), sp) {
1088 sp.ColorCalcStatePointer = offset;
1089 #if GEN_GEN >= 8
1090 sp.ColorCalcStatePointerValid = true;
1091 #endif
1092 }
1093 #endif
1094
1095 return offset;
1096 }
1097
1098 static uint32_t
1099 blorp_emit_depth_stencil_state(struct blorp_batch *batch,
1100 const struct blorp_params *params)
1101 {
1102 #if GEN_GEN >= 8
1103 struct GENX(3DSTATE_WM_DEPTH_STENCIL) ds = {
1104 GENX(3DSTATE_WM_DEPTH_STENCIL_header),
1105 };
1106 #else
1107 struct GENX(DEPTH_STENCIL_STATE) ds = { 0 };
1108 #endif
1109
1110 if (params->depth.enabled) {
1111 ds.DepthBufferWriteEnable = true;
1112
1113 switch (params->hiz_op) {
1114 /* See the following sections of the Sandy Bridge PRM, Volume 2, Part1:
1115 * - 7.5.3.1 Depth Buffer Clear
1116 * - 7.5.3.2 Depth Buffer Resolve
1117 * - 7.5.3.3 Hierarchical Depth Buffer Resolve
1118 */
1119 case ISL_AUX_OP_FULL_RESOLVE:
1120 ds.DepthTestEnable = true;
1121 ds.DepthTestFunction = COMPAREFUNCTION_NEVER;
1122 break;
1123
1124 case ISL_AUX_OP_NONE:
1125 case ISL_AUX_OP_FAST_CLEAR:
1126 case ISL_AUX_OP_AMBIGUATE:
1127 ds.DepthTestEnable = false;
1128 break;
1129 case ISL_AUX_OP_PARTIAL_RESOLVE:
1130 unreachable("Invalid HIZ op");
1131 }
1132 }
1133
1134 if (params->stencil.enabled) {
1135 ds.StencilBufferWriteEnable = true;
1136 ds.StencilTestEnable = true;
1137 ds.DoubleSidedStencilEnable = false;
1138
1139 ds.StencilTestFunction = COMPAREFUNCTION_ALWAYS;
1140 ds.StencilPassDepthPassOp = STENCILOP_REPLACE;
1141
1142 ds.StencilWriteMask = params->stencil_mask;
1143 #if GEN_GEN >= 9
1144 ds.StencilReferenceValue = params->stencil_ref;
1145 #endif
1146 }
1147
1148 #if GEN_GEN >= 8
1149 uint32_t offset = 0;
1150 uint32_t *dw = blorp_emit_dwords(batch,
1151 GENX(3DSTATE_WM_DEPTH_STENCIL_length));
1152 if (!dw)
1153 return 0;
1154
1155 GENX(3DSTATE_WM_DEPTH_STENCIL_pack)(NULL, dw, &ds);
1156 #else
1157 uint32_t offset;
1158 void *state = blorp_alloc_dynamic_state(batch,
1159 GENX(DEPTH_STENCIL_STATE_length) * 4,
1160 64, &offset);
1161 GENX(DEPTH_STENCIL_STATE_pack)(NULL, state, &ds);
1162 blorp_flush_range(batch, state, GENX(DEPTH_STENCIL_STATE_length) * 4);
1163 #endif
1164
1165 #if GEN_GEN == 7
1166 blorp_emit(batch, GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS), sp) {
1167 sp.PointertoDEPTH_STENCIL_STATE = offset;
1168 }
1169 #endif
1170
1171 return offset;
1172 }
1173
1174 static void
1175 blorp_emit_3dstate_multisample(struct blorp_batch *batch,
1176 const struct blorp_params *params)
1177 {
1178 blorp_emit(batch, GENX(3DSTATE_MULTISAMPLE), ms) {
1179 ms.NumberofMultisamples = __builtin_ffs(params->num_samples) - 1;
1180
1181 #if GEN_GEN >= 8
1182 /* The PRM says that this bit is valid only for DX9:
1183 *
1184 * SW can choose to set this bit only for DX9 API. DX10/OGL API's
1185 * should not have any effect by setting or not setting this bit.
1186 */
1187 ms.PixelPositionOffsetEnable = false;
1188 #elif GEN_GEN >= 7
1189
1190 switch (params->num_samples) {
1191 case 1:
1192 GEN_SAMPLE_POS_1X(ms.Sample);
1193 break;
1194 case 2:
1195 GEN_SAMPLE_POS_2X(ms.Sample);
1196 break;
1197 case 4:
1198 GEN_SAMPLE_POS_4X(ms.Sample);
1199 break;
1200 case 8:
1201 GEN_SAMPLE_POS_8X(ms.Sample);
1202 break;
1203 default:
1204 break;
1205 }
1206 #else
1207 GEN_SAMPLE_POS_4X(ms.Sample);
1208 #endif
1209 ms.PixelLocation = CENTER;
1210 }
1211 }
1212
1213 static void
1214 blorp_emit_pipeline(struct blorp_batch *batch,
1215 const struct blorp_params *params)
1216 {
1217 uint32_t blend_state_offset = 0;
1218 uint32_t color_calc_state_offset;
1219 uint32_t depth_stencil_state_offset;
1220
1221 emit_urb_config(batch, params);
1222
1223 if (params->wm_prog_data) {
1224 blend_state_offset = blorp_emit_blend_state(batch, params);
1225 }
1226 color_calc_state_offset = blorp_emit_color_calc_state(batch, params);
1227 depth_stencil_state_offset = blorp_emit_depth_stencil_state(batch, params);
1228
1229 #if GEN_GEN == 6
1230 /* 3DSTATE_CC_STATE_POINTERS
1231 *
1232 * The pointer offsets are relative to
1233 * CMD_STATE_BASE_ADDRESS.DynamicStateBaseAddress.
1234 *
1235 * The HiZ op doesn't use BLEND_STATE or COLOR_CALC_STATE.
1236 *
1237 * The dynamic state emit helpers emit their own STATE_POINTERS packets on
1238 * gen7+. However, on gen6 and earlier, they're all lumpped together in
1239 * one CC_STATE_POINTERS packet so we have to emit that here.
1240 */
1241 blorp_emit(batch, GENX(3DSTATE_CC_STATE_POINTERS), cc) {
1242 cc.BLEND_STATEChange = true;
1243 cc.ColorCalcStatePointerValid = true;
1244 cc.DEPTH_STENCIL_STATEChange = true;
1245 cc.PointertoBLEND_STATE = blend_state_offset;
1246 cc.ColorCalcStatePointer = color_calc_state_offset;
1247 cc.PointertoDEPTH_STENCIL_STATE = depth_stencil_state_offset;
1248 }
1249 #else
1250 (void)blend_state_offset;
1251 (void)color_calc_state_offset;
1252 (void)depth_stencil_state_offset;
1253 #endif
1254
1255 #if GEN_GEN >= 12
1256 blorp_emit(batch, GENX(3DSTATE_CONSTANT_ALL), pc) {
1257 /* Update empty push constants for all stages (bitmask = 11111b) */
1258 pc.ShaderUpdateEnable = 0x1f;
1259 }
1260 #else
1261 blorp_emit(batch, GENX(3DSTATE_CONSTANT_VS), vs);
1262 #if GEN_GEN >= 7
1263 blorp_emit(batch, GENX(3DSTATE_CONSTANT_HS), hs);
1264 blorp_emit(batch, GENX(3DSTATE_CONSTANT_DS), DS);
1265 #endif
1266 blorp_emit(batch, GENX(3DSTATE_CONSTANT_GS), gs);
1267 blorp_emit(batch, GENX(3DSTATE_CONSTANT_PS), ps);
1268 #endif
1269
1270 if (params->src.enabled)
1271 blorp_emit_sampler_state(batch);
1272
1273 blorp_emit_3dstate_multisample(batch, params);
1274
1275 blorp_emit(batch, GENX(3DSTATE_SAMPLE_MASK), mask) {
1276 mask.SampleMask = (1 << params->num_samples) - 1;
1277 }
1278
1279 /* From the BSpec, 3D Pipeline > Geometry > Vertex Shader > State,
1280 * 3DSTATE_VS, Dword 5.0 "VS Function Enable":
1281 *
1282 * [DevSNB] A pipeline flush must be programmed prior to a
1283 * 3DSTATE_VS command that causes the VS Function Enable to
1284 * toggle. Pipeline flush can be executed by sending a PIPE_CONTROL
1285 * command with CS stall bit set and a post sync operation.
1286 *
1287 * We've already done one at the start of the BLORP operation.
1288 */
1289 blorp_emit_vs_config(batch, params);
1290 #if GEN_GEN >= 7
1291 blorp_emit(batch, GENX(3DSTATE_HS), hs);
1292 blorp_emit(batch, GENX(3DSTATE_TE), te);
1293 blorp_emit(batch, GENX(3DSTATE_DS), DS);
1294 blorp_emit(batch, GENX(3DSTATE_STREAMOUT), so);
1295 #endif
1296 blorp_emit(batch, GENX(3DSTATE_GS), gs);
1297
1298 blorp_emit(batch, GENX(3DSTATE_CLIP), clip) {
1299 clip.PerspectiveDivideDisable = true;
1300 }
1301
1302 blorp_emit_sf_config(batch, params);
1303 blorp_emit_ps_config(batch, params);
1304
1305 blorp_emit_cc_viewport(batch);
1306 }
1307
1308 /******** This is the end of the pipeline setup code ********/
1309
1310 #endif /* GEN_GEN >= 6 */
1311
1312 #if GEN_GEN >= 7
1313 static void
1314 blorp_emit_memcpy(struct blorp_batch *batch,
1315 struct blorp_address dst,
1316 struct blorp_address src,
1317 uint32_t size)
1318 {
1319 assert(size % 4 == 0);
1320
1321 for (unsigned dw = 0; dw < size; dw += 4) {
1322 #if GEN_GEN >= 8
1323 blorp_emit(batch, GENX(MI_COPY_MEM_MEM), cp) {
1324 cp.DestinationMemoryAddress = dst;
1325 cp.SourceMemoryAddress = src;
1326 }
1327 #else
1328 /* IVB does not have a general purpose register for command streamer
1329 * commands. Therefore, we use an alternate temporary register.
1330 */
1331 #define BLORP_TEMP_REG 0x2440 /* GEN7_3DPRIM_BASE_VERTEX */
1332 blorp_emit(batch, GENX(MI_LOAD_REGISTER_MEM), load) {
1333 load.RegisterAddress = BLORP_TEMP_REG;
1334 load.MemoryAddress = src;
1335 }
1336 blorp_emit(batch, GENX(MI_STORE_REGISTER_MEM), store) {
1337 store.RegisterAddress = BLORP_TEMP_REG;
1338 store.MemoryAddress = dst;
1339 }
1340 #undef BLORP_TEMP_REG
1341 #endif
1342 dst.offset += 4;
1343 src.offset += 4;
1344 }
1345 }
1346 #endif
1347
1348 static void
1349 blorp_emit_surface_state(struct blorp_batch *batch,
1350 const struct brw_blorp_surface_info *surface,
1351 enum isl_aux_op aux_op,
1352 void *state, uint32_t state_offset,
1353 const bool color_write_disables[4],
1354 bool is_render_target)
1355 {
1356 const struct isl_device *isl_dev = batch->blorp->isl_dev;
1357 struct isl_surf surf = surface->surf;
1358
1359 if (surf.dim == ISL_SURF_DIM_1D &&
1360 surf.dim_layout == ISL_DIM_LAYOUT_GEN4_2D) {
1361 assert(surf.logical_level0_px.height == 1);
1362 surf.dim = ISL_SURF_DIM_2D;
1363 }
1364
1365 /* Blorp doesn't support HiZ in any of the blit or slow-clear paths */
1366 assert(!isl_aux_usage_has_hiz(surface->aux_usage));
1367 enum isl_aux_usage aux_usage = surface->aux_usage;
1368
1369 isl_channel_mask_t write_disable_mask = 0;
1370 if (is_render_target && GEN_GEN <= 5) {
1371 if (color_write_disables[0])
1372 write_disable_mask |= ISL_CHANNEL_RED_BIT;
1373 if (color_write_disables[1])
1374 write_disable_mask |= ISL_CHANNEL_GREEN_BIT;
1375 if (color_write_disables[2])
1376 write_disable_mask |= ISL_CHANNEL_BLUE_BIT;
1377 if (color_write_disables[3])
1378 write_disable_mask |= ISL_CHANNEL_ALPHA_BIT;
1379 }
1380
1381 const bool use_clear_address =
1382 GEN_GEN >= 10 && (surface->clear_color_addr.buffer != NULL);
1383
1384 isl_surf_fill_state(batch->blorp->isl_dev, state,
1385 .surf = &surf, .view = &surface->view,
1386 .aux_surf = &surface->aux_surf, .aux_usage = aux_usage,
1387 .address =
1388 blorp_get_surface_address(batch, surface->addr),
1389 .aux_address = aux_usage == ISL_AUX_USAGE_NONE ? 0 :
1390 blorp_get_surface_address(batch, surface->aux_addr),
1391 .clear_address = !use_clear_address ? 0 :
1392 blorp_get_surface_address(batch,
1393 surface->clear_color_addr),
1394 .mocs = surface->addr.mocs,
1395 .clear_color = surface->clear_color,
1396 .use_clear_address = use_clear_address,
1397 .write_disables = write_disable_mask);
1398
1399 blorp_surface_reloc(batch, state_offset + isl_dev->ss.addr_offset,
1400 surface->addr, 0);
1401
1402 if (aux_usage != ISL_AUX_USAGE_NONE) {
1403 /* On gen7 and prior, the bottom 12 bits of the MCS base address are
1404 * used to store other information. This should be ok, however, because
1405 * surface buffer addresses are always 4K page alinged.
1406 */
1407 assert((surface->aux_addr.offset & 0xfff) == 0);
1408 uint32_t *aux_addr = state + isl_dev->ss.aux_addr_offset;
1409 blorp_surface_reloc(batch, state_offset + isl_dev->ss.aux_addr_offset,
1410 surface->aux_addr, *aux_addr);
1411 }
1412
1413 if (aux_usage != ISL_AUX_USAGE_NONE && surface->clear_color_addr.buffer) {
1414 #if GEN_GEN >= 10
1415 assert((surface->clear_color_addr.offset & 0x3f) == 0);
1416 uint32_t *clear_addr = state + isl_dev->ss.clear_color_state_offset;
1417 blorp_surface_reloc(batch, state_offset +
1418 isl_dev->ss.clear_color_state_offset,
1419 surface->clear_color_addr, *clear_addr);
1420 #elif GEN_GEN >= 7
1421 /* Fast clears just whack the AUX surface and don't actually use the
1422 * clear color for anything. We can avoid the MI memcpy on that case.
1423 */
1424 if (aux_op != ISL_AUX_OP_FAST_CLEAR) {
1425 struct blorp_address dst_addr = blorp_get_surface_base_address(batch);
1426 dst_addr.offset += state_offset + isl_dev->ss.clear_value_offset;
1427 blorp_emit_memcpy(batch, dst_addr, surface->clear_color_addr,
1428 isl_dev->ss.clear_value_size);
1429 }
1430 #else
1431 unreachable("Fast clears are only supported on gen7+");
1432 #endif
1433 }
1434
1435 blorp_flush_range(batch, state, GENX(RENDER_SURFACE_STATE_length) * 4);
1436 }
1437
1438 static void
1439 blorp_emit_null_surface_state(struct blorp_batch *batch,
1440 const struct brw_blorp_surface_info *surface,
1441 uint32_t *state)
1442 {
1443 struct GENX(RENDER_SURFACE_STATE) ss = {
1444 .SurfaceType = SURFTYPE_NULL,
1445 .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
1446 .Width = surface->surf.logical_level0_px.width - 1,
1447 .Height = surface->surf.logical_level0_px.height - 1,
1448 .MIPCountLOD = surface->view.base_level,
1449 .MinimumArrayElement = surface->view.base_array_layer,
1450 .Depth = surface->view.array_len - 1,
1451 .RenderTargetViewExtent = surface->view.array_len - 1,
1452 #if GEN_GEN >= 6
1453 .NumberofMultisamples = ffs(surface->surf.samples) - 1,
1454 #endif
1455
1456 #if GEN_GEN >= 7
1457 .SurfaceArray = surface->surf.dim != ISL_SURF_DIM_3D,
1458 #endif
1459
1460 #if GEN_GEN >= 8
1461 .TileMode = YMAJOR,
1462 #else
1463 .TiledSurface = true,
1464 #endif
1465 };
1466
1467 GENX(RENDER_SURFACE_STATE_pack)(NULL, state, &ss);
1468
1469 blorp_flush_range(batch, state, GENX(RENDER_SURFACE_STATE_length) * 4);
1470 }
1471
1472 static void
1473 blorp_emit_surface_states(struct blorp_batch *batch,
1474 const struct blorp_params *params)
1475 {
1476 const struct isl_device *isl_dev = batch->blorp->isl_dev;
1477 uint32_t bind_offset = 0, surface_offsets[2];
1478 void *surface_maps[2];
1479
1480 UNUSED bool has_indirect_clear_color = false;
1481 if (params->use_pre_baked_binding_table) {
1482 bind_offset = params->pre_baked_binding_table_offset;
1483 } else {
1484 unsigned num_surfaces = 1 + params->src.enabled;
1485 blorp_alloc_binding_table(batch, num_surfaces,
1486 isl_dev->ss.size, isl_dev->ss.align,
1487 &bind_offset, surface_offsets, surface_maps);
1488
1489 if (params->dst.enabled) {
1490 blorp_emit_surface_state(batch, &params->dst,
1491 params->fast_clear_op,
1492 surface_maps[BLORP_RENDERBUFFER_BT_INDEX],
1493 surface_offsets[BLORP_RENDERBUFFER_BT_INDEX],
1494 params->color_write_disable, true);
1495 if (params->dst.clear_color_addr.buffer != NULL)
1496 has_indirect_clear_color = true;
1497 } else {
1498 assert(params->depth.enabled || params->stencil.enabled);
1499 const struct brw_blorp_surface_info *surface =
1500 params->depth.enabled ? &params->depth : &params->stencil;
1501 blorp_emit_null_surface_state(batch, surface,
1502 surface_maps[BLORP_RENDERBUFFER_BT_INDEX]);
1503 }
1504
1505 if (params->src.enabled) {
1506 blorp_emit_surface_state(batch, &params->src,
1507 params->fast_clear_op,
1508 surface_maps[BLORP_TEXTURE_BT_INDEX],
1509 surface_offsets[BLORP_TEXTURE_BT_INDEX],
1510 NULL, false);
1511 if (params->src.clear_color_addr.buffer != NULL)
1512 has_indirect_clear_color = true;
1513 }
1514 }
1515
1516 #if GEN_GEN >= 7
1517 if (has_indirect_clear_color) {
1518 /* Updating a surface state object may require that the state cache be
1519 * invalidated. From the SKL PRM, Shared Functions -> State -> State
1520 * Caching:
1521 *
1522 * Whenever the RENDER_SURFACE_STATE object in memory pointed to by
1523 * the Binding Table Pointer (BTP) and Binding Table Index (BTI) is
1524 * modified [...], the L1 state cache must be invalidated to ensure
1525 * the new surface or sampler state is fetched from system memory.
1526 */
1527 blorp_emit(batch, GENX(PIPE_CONTROL), pipe) {
1528 pipe.StateCacheInvalidationEnable = true;
1529 #if GEN_GEN >= 12
1530 pipe.TileCacheFlushEnable = true;
1531 #endif
1532 }
1533 }
1534 #endif
1535
1536 #if GEN_GEN >= 7
1537 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), bt);
1538 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_HS), bt);
1539 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_DS), bt);
1540 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_GS), bt);
1541
1542 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_PS), bt) {
1543 bt.PointertoPSBindingTable = bind_offset;
1544 }
1545 #elif GEN_GEN >= 6
1546 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS), bt) {
1547 bt.PSBindingTableChange = true;
1548 bt.PointertoPSBindingTable = bind_offset;
1549 }
1550 #else
1551 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS), bt) {
1552 bt.PointertoPSBindingTable = bind_offset;
1553 }
1554 #endif
1555 }
1556
1557 static void
1558 blorp_emit_depth_stencil_config(struct blorp_batch *batch,
1559 const struct blorp_params *params)
1560 {
1561 const struct isl_device *isl_dev = batch->blorp->isl_dev;
1562
1563 uint32_t *dw = blorp_emit_dwords(batch, isl_dev->ds.size / 4);
1564 if (dw == NULL)
1565 return;
1566
1567 struct isl_depth_stencil_hiz_emit_info info = { };
1568
1569 if (params->depth.enabled) {
1570 info.view = &params->depth.view;
1571 info.mocs = params->depth.addr.mocs;
1572 } else if (params->stencil.enabled) {
1573 info.view = &params->stencil.view;
1574 info.mocs = params->stencil.addr.mocs;
1575 }
1576
1577 if (params->depth.enabled) {
1578 info.depth_surf = &params->depth.surf;
1579
1580 info.depth_address =
1581 blorp_emit_reloc(batch, dw + isl_dev->ds.depth_offset / 4,
1582 params->depth.addr, 0);
1583
1584 info.hiz_usage = params->depth.aux_usage;
1585 if (isl_aux_usage_has_hiz(info.hiz_usage)) {
1586 info.hiz_surf = &params->depth.aux_surf;
1587
1588 struct blorp_address hiz_address = params->depth.aux_addr;
1589 #if GEN_GEN == 6
1590 /* Sandy bridge hardware does not technically support mipmapped HiZ.
1591 * However, we have a special layout that allows us to make it work
1592 * anyway by manually offsetting to the specified miplevel.
1593 */
1594 assert(info.hiz_surf->dim_layout == ISL_DIM_LAYOUT_GEN6_STENCIL_HIZ);
1595 uint32_t offset_B;
1596 isl_surf_get_image_offset_B_tile_sa(info.hiz_surf,
1597 info.view->base_level, 0, 0,
1598 &offset_B, NULL, NULL);
1599 hiz_address.offset += offset_B;
1600 #endif
1601
1602 info.hiz_address =
1603 blorp_emit_reloc(batch, dw + isl_dev->ds.hiz_offset / 4,
1604 hiz_address, 0);
1605
1606 info.depth_clear_value = params->depth.clear_color.f32[0];
1607 }
1608 }
1609
1610 if (params->stencil.enabled) {
1611 info.stencil_surf = &params->stencil.surf;
1612
1613 info.stencil_aux_usage = params->stencil.aux_usage;
1614 struct blorp_address stencil_address = params->stencil.addr;
1615 #if GEN_GEN == 6
1616 /* Sandy bridge hardware does not technically support mipmapped stencil.
1617 * However, we have a special layout that allows us to make it work
1618 * anyway by manually offsetting to the specified miplevel.
1619 */
1620 assert(info.stencil_surf->dim_layout == ISL_DIM_LAYOUT_GEN6_STENCIL_HIZ);
1621 uint32_t offset_B;
1622 isl_surf_get_image_offset_B_tile_sa(info.stencil_surf,
1623 info.view->base_level, 0, 0,
1624 &offset_B, NULL, NULL);
1625 stencil_address.offset += offset_B;
1626 #endif
1627
1628 info.stencil_address =
1629 blorp_emit_reloc(batch, dw + isl_dev->ds.stencil_offset / 4,
1630 stencil_address, 0);
1631 }
1632
1633 isl_emit_depth_stencil_hiz_s(isl_dev, dw, &info);
1634
1635 #if GEN_GEN >= 12
1636 /* GEN:BUG:1408224581
1637 *
1638 * Workaround: Gen12LP Astep only An additional pipe control with
1639 * post-sync = store dword operation would be required.( w/a is to
1640 * have an additional pipe control after the stencil state whenever
1641 * the surface state bits of this state is changing).
1642 */
1643 blorp_emit(batch, GENX(PIPE_CONTROL), pc) {
1644 pc.PostSyncOperation = WriteImmediateData;
1645 pc.Address = blorp_get_workaround_page(batch);
1646 }
1647 #endif
1648 }
1649
1650 #if GEN_GEN >= 8
1651 /* Emits the Optimized HiZ sequence specified in the BDW+ PRMs. The
1652 * depth/stencil buffer extents are ignored to handle APIs which perform
1653 * clearing operations without such information.
1654 * */
1655 static void
1656 blorp_emit_gen8_hiz_op(struct blorp_batch *batch,
1657 const struct blorp_params *params)
1658 {
1659 /* We should be performing an operation on a depth or stencil buffer.
1660 */
1661 assert(params->depth.enabled || params->stencil.enabled);
1662
1663 /* The stencil buffer should only be enabled on GEN == 12, if a fast clear
1664 * or full resolve operation is requested. On rest of the GEN, if a fast
1665 * clear operation is requested.
1666 */
1667 if (params->stencil.enabled) {
1668 #if GEN_GEN >= 12
1669 assert(params->hiz_op == ISL_AUX_OP_FAST_CLEAR ||
1670 params->hiz_op == ISL_AUX_OP_FULL_RESOLVE);
1671 #else
1672 assert(params->hiz_op == ISL_AUX_OP_FAST_CLEAR);
1673 #endif
1674 }
1675
1676 /* From the BDW PRM Volume 2, 3DSTATE_WM_HZ_OP:
1677 *
1678 * 3DSTATE_MULTISAMPLE packet must be used prior to this packet to change
1679 * the Number of Multisamples. This packet must not be used to change
1680 * Number of Multisamples in a rendering sequence.
1681 *
1682 * Since HIZ may be the first thing in a batch buffer, play safe and always
1683 * emit 3DSTATE_MULTISAMPLE.
1684 */
1685 blorp_emit_3dstate_multisample(batch, params);
1686
1687 /* From the BDW PRM Volume 7, Depth Buffer Clear:
1688 *
1689 * The clear value must be between the min and max depth values
1690 * (inclusive) defined in the CC_VIEWPORT. If the depth buffer format is
1691 * D32_FLOAT, then +/-DENORM values are also allowed.
1692 *
1693 * Set the bounds to match our hardware limits, [0.0, 1.0].
1694 */
1695 if (params->depth.enabled && params->hiz_op == ISL_AUX_OP_FAST_CLEAR) {
1696 assert(params->depth.clear_color.f32[0] >= 0.0f);
1697 assert(params->depth.clear_color.f32[0] <= 1.0f);
1698 blorp_emit_cc_viewport(batch);
1699 }
1700
1701 /* According to the SKL PRM formula for WM_INT::ThreadDispatchEnable, the
1702 * 3DSTATE_WM::ForceThreadDispatchEnable field can force WM thread dispatch
1703 * even when WM_HZ_OP is active. However, WM thread dispatch is normally
1704 * disabled for HiZ ops and it appears that force-enabling it can lead to
1705 * GPU hangs on at least Skylake. Since we don't know the current state of
1706 * the 3DSTATE_WM packet, just emit a dummy one prior to 3DSTATE_WM_HZ_OP.
1707 */
1708 blorp_emit(batch, GENX(3DSTATE_WM), wm);
1709
1710 /* If we can't alter the depth stencil config and multiple layers are
1711 * involved, the HiZ op will fail. This is because the op requires that a
1712 * new config is emitted for each additional layer.
1713 */
1714 if (batch->flags & BLORP_BATCH_NO_EMIT_DEPTH_STENCIL) {
1715 assert(params->num_layers <= 1);
1716 } else {
1717 blorp_emit_depth_stencil_config(batch, params);
1718 }
1719
1720 blorp_emit(batch, GENX(3DSTATE_WM_HZ_OP), hzp) {
1721 switch (params->hiz_op) {
1722 case ISL_AUX_OP_FAST_CLEAR:
1723 hzp.StencilBufferClearEnable = params->stencil.enabled;
1724 hzp.DepthBufferClearEnable = params->depth.enabled;
1725 hzp.StencilClearValue = params->stencil_ref;
1726 hzp.FullSurfaceDepthandStencilClear = params->full_surface_hiz_op;
1727 break;
1728 case ISL_AUX_OP_FULL_RESOLVE:
1729 assert(params->full_surface_hiz_op);
1730 hzp.DepthBufferResolveEnable = params->depth.enabled;
1731 #if GEN_GEN >= 12
1732 if (params->stencil.enabled) {
1733 assert(params->stencil.aux_usage == ISL_AUX_USAGE_CCS_E);
1734 hzp.StencilBufferResolveEnable = true;
1735 }
1736 #endif
1737 break;
1738 case ISL_AUX_OP_AMBIGUATE:
1739 assert(params->full_surface_hiz_op);
1740 hzp.HierarchicalDepthBufferResolveEnable = true;
1741 break;
1742 case ISL_AUX_OP_PARTIAL_RESOLVE:
1743 case ISL_AUX_OP_NONE:
1744 unreachable("Invalid HIZ op");
1745 }
1746
1747 hzp.NumberofMultisamples = ffs(params->num_samples) - 1;
1748 hzp.SampleMask = 0xFFFF;
1749
1750 /* Due to a hardware issue, this bit MBZ */
1751 assert(hzp.ScissorRectangleEnable == false);
1752
1753 /* Contrary to the HW docs both fields are inclusive */
1754 hzp.ClearRectangleXMin = params->x0;
1755 hzp.ClearRectangleYMin = params->y0;
1756
1757 /* Contrary to the HW docs both fields are exclusive */
1758 hzp.ClearRectangleXMax = params->x1;
1759 hzp.ClearRectangleYMax = params->y1;
1760 }
1761
1762 /* PIPE_CONTROL w/ all bits clear except for “Post-Sync Operation” must set
1763 * to “Write Immediate Data” enabled.
1764 */
1765 blorp_emit(batch, GENX(PIPE_CONTROL), pc) {
1766 pc.PostSyncOperation = WriteImmediateData;
1767 pc.Address = blorp_get_workaround_page(batch);
1768 }
1769
1770 blorp_emit(batch, GENX(3DSTATE_WM_HZ_OP), hzp);
1771 }
1772 #endif
1773
1774 static void
1775 blorp_update_clear_color(struct blorp_batch *batch,
1776 const struct brw_blorp_surface_info *info,
1777 enum isl_aux_op op)
1778 {
1779 if (info->clear_color_addr.buffer && op == ISL_AUX_OP_FAST_CLEAR) {
1780 #if GEN_GEN == 11
1781 blorp_emit(batch, GENX(PIPE_CONTROL), pipe) {
1782 pipe.CommandStreamerStallEnable = true;
1783 }
1784
1785 /* 2 QWORDS */
1786 const unsigned inlinedata_dw = 2 * 2;
1787 const unsigned num_dwords = GENX(MI_ATOMIC_length) + inlinedata_dw;
1788
1789 struct blorp_address clear_addr = info->clear_color_addr;
1790 uint32_t *dw = blorp_emitn(batch, GENX(MI_ATOMIC), num_dwords,
1791 .DataSize = MI_ATOMIC_QWORD,
1792 .ATOMICOPCODE = MI_ATOMIC_OP_MOVE8B,
1793 .InlineData = true,
1794 .MemoryAddress = clear_addr);
1795 /* dw starts at dword 1, but we need to fill dwords 3 and 5 */
1796 dw[2] = info->clear_color.u32[0];
1797 dw[4] = info->clear_color.u32[1];
1798
1799 clear_addr.offset += 8;
1800 dw = blorp_emitn(batch, GENX(MI_ATOMIC), num_dwords,
1801 .DataSize = MI_ATOMIC_QWORD,
1802 .ATOMICOPCODE = MI_ATOMIC_OP_MOVE8B,
1803 .CSSTALL = true,
1804 .ReturnDataControl = true,
1805 .InlineData = true,
1806 .MemoryAddress = clear_addr);
1807 /* dw starts at dword 1, but we need to fill dwords 3 and 5 */
1808 dw[2] = info->clear_color.u32[2];
1809 dw[4] = info->clear_color.u32[3];
1810
1811 blorp_emit(batch, GENX(PIPE_CONTROL), pipe) {
1812 pipe.StateCacheInvalidationEnable = true;
1813 pipe.TextureCacheInvalidationEnable = true;
1814 }
1815 #elif GEN_GEN >= 9
1816
1817 /* According to GEN:BUG:2201730850, in the Clear Color Programming Note
1818 * under the Red channel, "Software shall write the converted Depth
1819 * Clear to this dword." The only depth formats listed under the red
1820 * channel are IEEE_FP and UNORM24_X8. These two requirements are
1821 * incompatible with the UNORM16 depth format, so just ignore that case
1822 * and simply perform the conversion for all depth formats.
1823 */
1824 union isl_color_value fixed_color = info->clear_color;
1825 if (GEN_GEN == 12 && isl_surf_usage_is_depth(info->surf.usage)) {
1826 isl_color_value_pack(&info->clear_color, info->surf.format,
1827 fixed_color.u32);
1828 }
1829
1830 for (int i = 0; i < 4; i++) {
1831 blorp_emit(batch, GENX(MI_STORE_DATA_IMM), sdi) {
1832 sdi.Address = info->clear_color_addr;
1833 sdi.Address.offset += i * 4;
1834 sdi.ImmediateData = fixed_color.u32[i];
1835 #if GEN_GEN >= 12
1836 if (i == 3)
1837 sdi.ForceWriteCompletionCheck = true;
1838 #endif
1839 }
1840 }
1841
1842 /* The RENDER_SURFACE_STATE::ClearColor field states that software should
1843 * write the converted depth value 16B after the clear address:
1844 *
1845 * 3D Sampler will always fetch clear depth from the location 16-bytes
1846 * above this address, where the clear depth, converted to native
1847 * surface format by software, will be stored.
1848 *
1849 */
1850 #if GEN_GEN >= 12
1851 if (isl_surf_usage_is_depth(info->surf.usage)) {
1852 blorp_emit(batch, GENX(MI_STORE_DATA_IMM), sdi) {
1853 sdi.Address = info->clear_color_addr;
1854 sdi.Address.offset += 4 * 4;
1855 sdi.ImmediateData = fixed_color.u32[0];
1856 sdi.ForceWriteCompletionCheck = true;
1857 }
1858 }
1859 #endif
1860
1861 #elif GEN_GEN >= 7
1862 blorp_emit(batch, GENX(MI_STORE_DATA_IMM), sdi) {
1863 sdi.Address = info->clear_color_addr;
1864 sdi.ImmediateData = ISL_CHANNEL_SELECT_RED << 25 |
1865 ISL_CHANNEL_SELECT_GREEN << 22 |
1866 ISL_CHANNEL_SELECT_BLUE << 19 |
1867 ISL_CHANNEL_SELECT_ALPHA << 16;
1868 if (isl_format_has_int_channel(info->view.format)) {
1869 for (unsigned i = 0; i < 4; i++) {
1870 assert(info->clear_color.u32[i] == 0 ||
1871 info->clear_color.u32[i] == 1);
1872 }
1873 sdi.ImmediateData |= (info->clear_color.u32[0] != 0) << 31;
1874 sdi.ImmediateData |= (info->clear_color.u32[1] != 0) << 30;
1875 sdi.ImmediateData |= (info->clear_color.u32[2] != 0) << 29;
1876 sdi.ImmediateData |= (info->clear_color.u32[3] != 0) << 28;
1877 } else {
1878 for (unsigned i = 0; i < 4; i++) {
1879 assert(info->clear_color.f32[i] == 0.0f ||
1880 info->clear_color.f32[i] == 1.0f);
1881 }
1882 sdi.ImmediateData |= (info->clear_color.f32[0] != 0.0f) << 31;
1883 sdi.ImmediateData |= (info->clear_color.f32[1] != 0.0f) << 30;
1884 sdi.ImmediateData |= (info->clear_color.f32[2] != 0.0f) << 29;
1885 sdi.ImmediateData |= (info->clear_color.f32[3] != 0.0f) << 28;
1886 }
1887 }
1888 #endif
1889 }
1890 }
1891
1892 /**
1893 * \brief Execute a blit or render pass operation.
1894 *
1895 * To execute the operation, this function manually constructs and emits a
1896 * batch to draw a rectangle primitive. The batchbuffer is flushed before
1897 * constructing and after emitting the batch.
1898 *
1899 * This function alters no GL state.
1900 */
1901 static void
1902 blorp_exec(struct blorp_batch *batch, const struct blorp_params *params)
1903 {
1904 if (!(batch->flags & BLORP_BATCH_NO_UPDATE_CLEAR_COLOR)) {
1905 blorp_update_clear_color(batch, &params->dst, params->fast_clear_op);
1906 blorp_update_clear_color(batch, &params->depth, params->hiz_op);
1907 }
1908
1909 #if GEN_GEN >= 8
1910 if (params->hiz_op != ISL_AUX_OP_NONE) {
1911 blorp_emit_gen8_hiz_op(batch, params);
1912 return;
1913 }
1914 #endif
1915
1916 blorp_emit_vertex_buffers(batch, params);
1917 blorp_emit_vertex_elements(batch, params);
1918
1919 blorp_emit_pipeline(batch, params);
1920
1921 blorp_emit_surface_states(batch, params);
1922
1923 if (!(batch->flags & BLORP_BATCH_NO_EMIT_DEPTH_STENCIL))
1924 blorp_emit_depth_stencil_config(batch, params);
1925
1926 blorp_emit(batch, GENX(3DPRIMITIVE), prim) {
1927 prim.VertexAccessType = SEQUENTIAL;
1928 prim.PrimitiveTopologyType = _3DPRIM_RECTLIST;
1929 #if GEN_GEN >= 7
1930 prim.PredicateEnable = batch->flags & BLORP_BATCH_PREDICATE_ENABLE;
1931 #endif
1932 prim.VertexCountPerInstance = 3;
1933 prim.InstanceCount = params->num_layers;
1934 }
1935 }
1936
1937 #endif /* BLORP_GENX_EXEC_H */