genxml: Consistently use a numeric "MOCS" field
[mesa.git] / src / intel / blorp / blorp_genX_exec.h
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef BLORP_GENX_EXEC_H
25 #define BLORP_GENX_EXEC_H
26
27 #include "blorp_priv.h"
28 #include "dev/gen_device_info.h"
29 #include "common/gen_sample_positions.h"
30 #include "genxml/gen_macros.h"
31
32 /**
33 * This file provides the blorp pipeline setup and execution functionality.
34 * It defines the following function:
35 *
36 * static void
37 * blorp_exec(struct blorp_context *blorp, void *batch_data,
38 * const struct blorp_params *params);
39 *
40 * It is the job of whoever includes this header to wrap this in something
41 * to get an externally visible symbol.
42 *
43 * In order for the blorp_exec function to work, the driver must provide
44 * implementations of the following static helper functions.
45 */
46
47 static void *
48 blorp_emit_dwords(struct blorp_batch *batch, unsigned n);
49
50 static uint64_t
51 blorp_emit_reloc(struct blorp_batch *batch,
52 void *location, struct blorp_address address, uint32_t delta);
53
54 static void *
55 blorp_alloc_dynamic_state(struct blorp_batch *batch,
56 uint32_t size,
57 uint32_t alignment,
58 uint32_t *offset);
59 static void *
60 blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size,
61 struct blorp_address *addr);
62 static void
63 blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *batch,
64 const struct blorp_address *addrs,
65 unsigned num_vbs);
66
67 #if GEN_GEN >= 8
68 static struct blorp_address
69 blorp_get_workaround_page(struct blorp_batch *batch);
70 #endif
71
72 static void
73 blorp_alloc_binding_table(struct blorp_batch *batch, unsigned num_entries,
74 unsigned state_size, unsigned state_alignment,
75 uint32_t *bt_offset, uint32_t *surface_offsets,
76 void **surface_maps);
77
78 static void
79 blorp_flush_range(struct blorp_batch *batch, void *start, size_t size);
80
81 static void
82 blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
83 struct blorp_address address, uint32_t delta);
84
85 #if GEN_GEN >= 7 && GEN_GEN < 10
86 static struct blorp_address
87 blorp_get_surface_base_address(struct blorp_batch *batch);
88 #endif
89
90 static void
91 blorp_emit_urb_config(struct blorp_batch *batch,
92 unsigned vs_entry_size, unsigned sf_entry_size);
93
94 static void
95 blorp_emit_pipeline(struct blorp_batch *batch,
96 const struct blorp_params *params);
97
98 /***** BEGIN blorp_exec implementation ******/
99
100 static uint64_t
101 _blorp_combine_address(struct blorp_batch *batch, void *location,
102 struct blorp_address address, uint32_t delta)
103 {
104 if (address.buffer == NULL) {
105 return address.offset + delta;
106 } else {
107 return blorp_emit_reloc(batch, location, address, delta);
108 }
109 }
110
111 #define __gen_address_type struct blorp_address
112 #define __gen_user_data struct blorp_batch
113 #define __gen_combine_address _blorp_combine_address
114
115 #include "genxml/genX_pack.h"
116
117 #define _blorp_cmd_length(cmd) cmd ## _length
118 #define _blorp_cmd_length_bias(cmd) cmd ## _length_bias
119 #define _blorp_cmd_header(cmd) cmd ## _header
120 #define _blorp_cmd_pack(cmd) cmd ## _pack
121
122 #define blorp_emit(batch, cmd, name) \
123 for (struct cmd name = { _blorp_cmd_header(cmd) }, \
124 *_dst = blorp_emit_dwords(batch, _blorp_cmd_length(cmd)); \
125 __builtin_expect(_dst != NULL, 1); \
126 _blorp_cmd_pack(cmd)(batch, (void *)_dst, &name), \
127 _dst = NULL)
128
129 #define blorp_emitn(batch, cmd, n) ({ \
130 uint32_t *_dw = blorp_emit_dwords(batch, n); \
131 if (_dw) { \
132 struct cmd template = { \
133 _blorp_cmd_header(cmd), \
134 .DWordLength = n - _blorp_cmd_length_bias(cmd), \
135 }; \
136 _blorp_cmd_pack(cmd)(batch, _dw, &template); \
137 } \
138 _dw ? _dw + 1 : NULL; /* Array starts at dw[1] */ \
139 })
140
141 #define STRUCT_ZERO(S) ({ struct S t; memset(&t, 0, sizeof(t)); t; })
142
143 #define blorp_emit_dynamic(batch, state, name, align, offset) \
144 for (struct state name = STRUCT_ZERO(state), \
145 *_dst = blorp_alloc_dynamic_state(batch, \
146 _blorp_cmd_length(state) * 4, \
147 align, offset); \
148 __builtin_expect(_dst != NULL, 1); \
149 _blorp_cmd_pack(state)(batch, (void *)_dst, &name), \
150 blorp_flush_range(batch, _dst, _blorp_cmd_length(state) * 4), \
151 _dst = NULL)
152
153 /* 3DSTATE_URB
154 * 3DSTATE_URB_VS
155 * 3DSTATE_URB_HS
156 * 3DSTATE_URB_DS
157 * 3DSTATE_URB_GS
158 *
159 * Assign the entire URB to the VS. Even though the VS disabled, URB space
160 * is still needed because the clipper loads the VUE's from the URB. From
161 * the Sandybridge PRM, Volume 2, Part 1, Section 3DSTATE,
162 * Dword 1.15:0 "VS Number of URB Entries":
163 * This field is always used (even if VS Function Enable is DISABLED).
164 *
165 * The warning below appears in the PRM (Section 3DSTATE_URB), but we can
166 * safely ignore it because this batch contains only one draw call.
167 * Because of URB corruption caused by allocating a previous GS unit
168 * URB entry to the VS unit, software is required to send a “GS NULL
169 * Fence” (Send URB fence with VS URB size == 1 and GS URB size == 0)
170 * plus a dummy DRAW call before any case where VS will be taking over
171 * GS URB space.
172 *
173 * If the 3DSTATE_URB_VS is emitted, than the others must be also.
174 * From the Ivybridge PRM, Volume 2 Part 1, section 1.7.1 3DSTATE_URB_VS:
175 *
176 * 3DSTATE_URB_HS, 3DSTATE_URB_DS, and 3DSTATE_URB_GS must also be
177 * programmed in order for the programming of this state to be
178 * valid.
179 */
180 static void
181 emit_urb_config(struct blorp_batch *batch,
182 const struct blorp_params *params)
183 {
184 /* Once vertex fetcher has written full VUE entries with complete
185 * header the space requirement is as follows per vertex (in bytes):
186 *
187 * Header Position Program constants
188 * +--------+------------+-------------------+
189 * | 16 | 16 | n x 16 |
190 * +--------+------------+-------------------+
191 *
192 * where 'n' stands for number of varying inputs expressed as vec4s.
193 */
194 const unsigned num_varyings =
195 params->wm_prog_data ? params->wm_prog_data->num_varying_inputs : 0;
196 const unsigned total_needed = 16 + 16 + num_varyings * 16;
197
198 /* The URB size is expressed in units of 64 bytes (512 bits) */
199 const unsigned vs_entry_size = DIV_ROUND_UP(total_needed, 64);
200
201 const unsigned sf_entry_size =
202 params->sf_prog_data ? params->sf_prog_data->urb_entry_size : 0;
203
204 blorp_emit_urb_config(batch, vs_entry_size, sf_entry_size);
205 }
206
207 #if GEN_GEN >= 7
208 static void
209 blorp_emit_memcpy(struct blorp_batch *batch,
210 struct blorp_address dst,
211 struct blorp_address src,
212 uint32_t size);
213 #endif
214
215 static void
216 blorp_emit_vertex_data(struct blorp_batch *batch,
217 const struct blorp_params *params,
218 struct blorp_address *addr,
219 uint32_t *size)
220 {
221 const float vertices[] = {
222 /* v0 */ (float)params->x1, (float)params->y1, params->z,
223 /* v1 */ (float)params->x0, (float)params->y1, params->z,
224 /* v2 */ (float)params->x0, (float)params->y0, params->z,
225 };
226
227 void *data = blorp_alloc_vertex_buffer(batch, sizeof(vertices), addr);
228 memcpy(data, vertices, sizeof(vertices));
229 *size = sizeof(vertices);
230 blorp_flush_range(batch, data, *size);
231 }
232
233 static void
234 blorp_emit_input_varying_data(struct blorp_batch *batch,
235 const struct blorp_params *params,
236 struct blorp_address *addr,
237 uint32_t *size)
238 {
239 const unsigned vec4_size_in_bytes = 4 * sizeof(float);
240 const unsigned max_num_varyings =
241 DIV_ROUND_UP(sizeof(params->wm_inputs), vec4_size_in_bytes);
242 const unsigned num_varyings =
243 params->wm_prog_data ? params->wm_prog_data->num_varying_inputs : 0;
244
245 *size = 16 + num_varyings * vec4_size_in_bytes;
246
247 const uint32_t *const inputs_src = (const uint32_t *)&params->wm_inputs;
248 void *data = blorp_alloc_vertex_buffer(batch, *size, addr);
249 uint32_t *inputs = data;
250
251 /* Copy in the VS inputs */
252 assert(sizeof(params->vs_inputs) == 16);
253 memcpy(inputs, &params->vs_inputs, sizeof(params->vs_inputs));
254 inputs += 4;
255
256 if (params->wm_prog_data) {
257 /* Walk over the attribute slots, determine if the attribute is used by
258 * the program and when necessary copy the values from the input storage
259 * to the vertex data buffer.
260 */
261 for (unsigned i = 0; i < max_num_varyings; i++) {
262 const gl_varying_slot attr = VARYING_SLOT_VAR0 + i;
263
264 const int input_index = params->wm_prog_data->urb_setup[attr];
265 if (input_index < 0)
266 continue;
267
268 memcpy(inputs, inputs_src + i * 4, vec4_size_in_bytes);
269
270 inputs += 4;
271 }
272 }
273
274 blorp_flush_range(batch, data, *size);
275
276 if (params->dst_clear_color_as_input) {
277 #if GEN_GEN >= 7
278 /* In this case, the clear color isn't known statically and instead
279 * comes in through an indirect which we have to copy into the vertex
280 * buffer before we execute the 3DPRIMITIVE. We already copied the
281 * value of params->wm_inputs.clear_color into the vertex buffer in the
282 * loop above. Now we emit code to stomp it from the GPU with the
283 * actual clear color value.
284 */
285 assert(num_varyings == 1);
286
287 /* The clear color is the first thing after the header */
288 struct blorp_address clear_color_input_addr = *addr;
289 clear_color_input_addr.offset += 16;
290
291 const unsigned clear_color_size =
292 GEN_GEN < 10 ? batch->blorp->isl_dev->ss.clear_value_size : 4 * 4;
293 blorp_emit_memcpy(batch, clear_color_input_addr,
294 params->dst.clear_color_addr,
295 clear_color_size);
296 #else
297 unreachable("MCS partial resolve is not a thing on SNB and earlier");
298 #endif
299 }
300 }
301
302 static void
303 blorp_fill_vertex_buffer_state(struct blorp_batch *batch,
304 struct GENX(VERTEX_BUFFER_STATE) *vb,
305 unsigned idx,
306 struct blorp_address addr, uint32_t size,
307 uint32_t stride)
308 {
309 vb[idx].VertexBufferIndex = idx;
310 vb[idx].BufferStartingAddress = addr;
311 vb[idx].BufferPitch = stride;
312
313 #if GEN_GEN >= 6
314 vb[idx].MOCS = addr.mocs;
315 #endif
316
317 #if GEN_GEN >= 7
318 vb[idx].AddressModifyEnable = true;
319 #endif
320
321 #if GEN_GEN >= 8
322 vb[idx].BufferSize = size;
323 #elif GEN_GEN >= 5
324 vb[idx].BufferAccessType = stride > 0 ? VERTEXDATA : INSTANCEDATA;
325 vb[idx].EndAddress = vb[idx].BufferStartingAddress;
326 vb[idx].EndAddress.offset += size - 1;
327 #elif GEN_GEN == 4
328 vb[idx].BufferAccessType = stride > 0 ? VERTEXDATA : INSTANCEDATA;
329 vb[idx].MaxIndex = stride > 0 ? size / stride : 0;
330 #endif
331 }
332
333 static void
334 blorp_emit_vertex_buffers(struct blorp_batch *batch,
335 const struct blorp_params *params)
336 {
337 struct GENX(VERTEX_BUFFER_STATE) vb[3];
338 uint32_t num_vbs = 2;
339 memset(vb, 0, sizeof(vb));
340
341 struct blorp_address addrs[2] = {};
342 uint32_t size;
343 blorp_emit_vertex_data(batch, params, &addrs[0], &size);
344 blorp_fill_vertex_buffer_state(batch, vb, 0, addrs[0], size,
345 3 * sizeof(float));
346
347 blorp_emit_input_varying_data(batch, params, &addrs[1], &size);
348 blorp_fill_vertex_buffer_state(batch, vb, 1, addrs[1], size, 0);
349
350 const unsigned num_dwords = 1 + num_vbs * GENX(VERTEX_BUFFER_STATE_length);
351 uint32_t *dw = blorp_emitn(batch, GENX(3DSTATE_VERTEX_BUFFERS), num_dwords);
352 if (!dw)
353 return;
354
355 blorp_vf_invalidate_for_vb_48b_transitions(batch, addrs, num_vbs);
356
357 for (unsigned i = 0; i < num_vbs; i++) {
358 GENX(VERTEX_BUFFER_STATE_pack)(batch, dw, &vb[i]);
359 dw += GENX(VERTEX_BUFFER_STATE_length);
360 }
361 }
362
363 static void
364 blorp_emit_vertex_elements(struct blorp_batch *batch,
365 const struct blorp_params *params)
366 {
367 const unsigned num_varyings =
368 params->wm_prog_data ? params->wm_prog_data->num_varying_inputs : 0;
369 bool need_ndc = batch->blorp->compiler->devinfo->gen <= 5;
370 const unsigned num_elements = 2 + need_ndc + num_varyings;
371
372 struct GENX(VERTEX_ELEMENT_STATE) ve[num_elements];
373 memset(ve, 0, num_elements * sizeof(*ve));
374
375 /* Setup VBO for the rectangle primitive..
376 *
377 * A rectangle primitive (3DPRIM_RECTLIST) consists of only three
378 * vertices. The vertices reside in screen space with DirectX
379 * coordinates (that is, (0, 0) is the upper left corner).
380 *
381 * v2 ------ implied
382 * | |
383 * | |
384 * v1 ----- v0
385 *
386 * Since the VS is disabled, the clipper loads each VUE directly from
387 * the URB. This is controlled by the 3DSTATE_VERTEX_BUFFERS and
388 * 3DSTATE_VERTEX_ELEMENTS packets below. The VUE contents are as follows:
389 * dw0: Reserved, MBZ.
390 * dw1: Render Target Array Index. Below vertex fetcher gets programmed
391 * to assign this with primitive instance identifier which will be
392 * used for layered clears. All other renders have only one instance
393 * and therefore the value will be effectively zero.
394 * dw2: Viewport Index. The HiZ op disables viewport mapping and
395 * scissoring, so set the dword to 0.
396 * dw3: Point Width: The HiZ op does not emit the POINTLIST primitive,
397 * so set the dword to 0.
398 * dw4: Vertex Position X.
399 * dw5: Vertex Position Y.
400 * dw6: Vertex Position Z.
401 * dw7: Vertex Position W.
402 *
403 * dw8: Flat vertex input 0
404 * dw9: Flat vertex input 1
405 * ...
406 * dwn: Flat vertex input n - 8
407 *
408 * For details, see the Sandybridge PRM, Volume 2, Part 1, Section 1.5.1
409 * "Vertex URB Entry (VUE) Formats".
410 *
411 * Only vertex position X and Y are going to be variable, Z is fixed to
412 * zero and W to one. Header words dw0,2,3 are zero. There is no need to
413 * include the fixed values in the vertex buffer. Vertex fetcher can be
414 * instructed to fill vertex elements with constant values of one and zero
415 * instead of reading them from the buffer.
416 * Flat inputs are program constants that are not interpolated. Moreover
417 * their values will be the same between vertices.
418 *
419 * See the vertex element setup below.
420 */
421 unsigned slot = 0;
422
423 ve[slot] = (struct GENX(VERTEX_ELEMENT_STATE)) {
424 .VertexBufferIndex = 1,
425 .Valid = true,
426 .SourceElementFormat = ISL_FORMAT_R32G32B32A32_FLOAT,
427 .SourceElementOffset = 0,
428 .Component0Control = VFCOMP_STORE_SRC,
429
430 /* From Gen8 onwards hardware is no more instructed to overwrite
431 * components using an element specifier. Instead one has separate
432 * 3DSTATE_VF_SGVS (System Generated Value Setup) state packet for it.
433 */
434 #if GEN_GEN >= 8
435 .Component1Control = VFCOMP_STORE_0,
436 #elif GEN_GEN >= 5
437 .Component1Control = VFCOMP_STORE_IID,
438 #else
439 .Component1Control = VFCOMP_STORE_0,
440 #endif
441 .Component2Control = VFCOMP_STORE_0,
442 .Component3Control = VFCOMP_STORE_0,
443 #if GEN_GEN <= 5
444 .DestinationElementOffset = slot * 4,
445 #endif
446 };
447 slot++;
448
449 #if GEN_GEN <= 5
450 /* On Iron Lake and earlier, a native device coordinates version of the
451 * position goes right after the normal VUE header and before position.
452 * Since w == 1 for all of our coordinates, this is just a copy of the
453 * position.
454 */
455 ve[slot] = (struct GENX(VERTEX_ELEMENT_STATE)) {
456 .VertexBufferIndex = 0,
457 .Valid = true,
458 .SourceElementFormat = ISL_FORMAT_R32G32B32_FLOAT,
459 .SourceElementOffset = 0,
460 .Component0Control = VFCOMP_STORE_SRC,
461 .Component1Control = VFCOMP_STORE_SRC,
462 .Component2Control = VFCOMP_STORE_SRC,
463 .Component3Control = VFCOMP_STORE_1_FP,
464 .DestinationElementOffset = slot * 4,
465 };
466 slot++;
467 #endif
468
469 ve[slot] = (struct GENX(VERTEX_ELEMENT_STATE)) {
470 .VertexBufferIndex = 0,
471 .Valid = true,
472 .SourceElementFormat = ISL_FORMAT_R32G32B32_FLOAT,
473 .SourceElementOffset = 0,
474 .Component0Control = VFCOMP_STORE_SRC,
475 .Component1Control = VFCOMP_STORE_SRC,
476 .Component2Control = VFCOMP_STORE_SRC,
477 .Component3Control = VFCOMP_STORE_1_FP,
478 #if GEN_GEN <= 5
479 .DestinationElementOffset = slot * 4,
480 #endif
481 };
482 slot++;
483
484 for (unsigned i = 0; i < num_varyings; ++i) {
485 ve[slot] = (struct GENX(VERTEX_ELEMENT_STATE)) {
486 .VertexBufferIndex = 1,
487 .Valid = true,
488 .SourceElementFormat = ISL_FORMAT_R32G32B32A32_FLOAT,
489 .SourceElementOffset = 16 + i * 4 * sizeof(float),
490 .Component0Control = VFCOMP_STORE_SRC,
491 .Component1Control = VFCOMP_STORE_SRC,
492 .Component2Control = VFCOMP_STORE_SRC,
493 .Component3Control = VFCOMP_STORE_SRC,
494 #if GEN_GEN <= 5
495 .DestinationElementOffset = slot * 4,
496 #endif
497 };
498 slot++;
499 }
500
501 const unsigned num_dwords =
502 1 + GENX(VERTEX_ELEMENT_STATE_length) * num_elements;
503 uint32_t *dw = blorp_emitn(batch, GENX(3DSTATE_VERTEX_ELEMENTS), num_dwords);
504 if (!dw)
505 return;
506
507 for (unsigned i = 0; i < num_elements; i++) {
508 GENX(VERTEX_ELEMENT_STATE_pack)(batch, dw, &ve[i]);
509 dw += GENX(VERTEX_ELEMENT_STATE_length);
510 }
511
512 #if GEN_GEN >= 8
513 /* Overwrite Render Target Array Index (2nd dword) in the VUE header with
514 * primitive instance identifier. This is used for layered clears.
515 */
516 blorp_emit(batch, GENX(3DSTATE_VF_SGVS), sgvs) {
517 sgvs.InstanceIDEnable = true;
518 sgvs.InstanceIDComponentNumber = COMP_1;
519 sgvs.InstanceIDElementOffset = 0;
520 }
521
522 for (unsigned i = 0; i < num_elements; i++) {
523 blorp_emit(batch, GENX(3DSTATE_VF_INSTANCING), vf) {
524 vf.VertexElementIndex = i;
525 vf.InstancingEnable = false;
526 }
527 }
528
529 blorp_emit(batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
530 topo.PrimitiveTopologyType = _3DPRIM_RECTLIST;
531 }
532 #endif
533 }
534
535 /* 3DSTATE_VIEWPORT_STATE_POINTERS */
536 static uint32_t
537 blorp_emit_cc_viewport(struct blorp_batch *batch)
538 {
539 uint32_t cc_vp_offset;
540 blorp_emit_dynamic(batch, GENX(CC_VIEWPORT), vp, 32, &cc_vp_offset) {
541 vp.MinimumDepth = 0.0;
542 vp.MaximumDepth = 1.0;
543 }
544
545 #if GEN_GEN >= 7
546 blorp_emit(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), vsp) {
547 vsp.CCViewportPointer = cc_vp_offset;
548 }
549 #elif GEN_GEN == 6
550 blorp_emit(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS), vsp) {
551 vsp.CCViewportStateChange = true;
552 vsp.PointertoCC_VIEWPORT = cc_vp_offset;
553 }
554 #endif
555
556 return cc_vp_offset;
557 }
558
559 static uint32_t
560 blorp_emit_sampler_state(struct blorp_batch *batch)
561 {
562 uint32_t offset;
563 blorp_emit_dynamic(batch, GENX(SAMPLER_STATE), sampler, 32, &offset) {
564 sampler.MipModeFilter = MIPFILTER_NONE;
565 sampler.MagModeFilter = MAPFILTER_LINEAR;
566 sampler.MinModeFilter = MAPFILTER_LINEAR;
567 sampler.MinLOD = 0;
568 sampler.MaxLOD = 0;
569 sampler.TCXAddressControlMode = TCM_CLAMP;
570 sampler.TCYAddressControlMode = TCM_CLAMP;
571 sampler.TCZAddressControlMode = TCM_CLAMP;
572 sampler.MaximumAnisotropy = RATIO21;
573 sampler.RAddressMinFilterRoundingEnable = true;
574 sampler.RAddressMagFilterRoundingEnable = true;
575 sampler.VAddressMinFilterRoundingEnable = true;
576 sampler.VAddressMagFilterRoundingEnable = true;
577 sampler.UAddressMinFilterRoundingEnable = true;
578 sampler.UAddressMagFilterRoundingEnable = true;
579 #if GEN_GEN > 6
580 sampler.NonnormalizedCoordinateEnable = true;
581 #endif
582 }
583
584 #if GEN_GEN >= 7
585 blorp_emit(batch, GENX(3DSTATE_SAMPLER_STATE_POINTERS_PS), ssp) {
586 ssp.PointertoPSSamplerState = offset;
587 }
588 #elif GEN_GEN == 6
589 blorp_emit(batch, GENX(3DSTATE_SAMPLER_STATE_POINTERS), ssp) {
590 ssp.VSSamplerStateChange = true;
591 ssp.GSSamplerStateChange = true;
592 ssp.PSSamplerStateChange = true;
593 ssp.PointertoPSSamplerState = offset;
594 }
595 #endif
596
597 return offset;
598 }
599
600 /* What follows is the code for setting up a "pipeline" on Sandy Bridge and
601 * later hardware. This file will be included by i965 for gen4-5 as well, so
602 * this code is guarded by GEN_GEN >= 6.
603 */
604 #if GEN_GEN >= 6
605
606 static void
607 blorp_emit_vs_config(struct blorp_batch *batch,
608 const struct blorp_params *params)
609 {
610 struct brw_vs_prog_data *vs_prog_data = params->vs_prog_data;
611 assert(!vs_prog_data || GEN_GEN < 11 ||
612 vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8);
613
614 blorp_emit(batch, GENX(3DSTATE_VS), vs) {
615 if (vs_prog_data) {
616 vs.Enable = true;
617
618 vs.KernelStartPointer = params->vs_prog_kernel;
619
620 vs.DispatchGRFStartRegisterForURBData =
621 vs_prog_data->base.base.dispatch_grf_start_reg;
622 vs.VertexURBEntryReadLength =
623 vs_prog_data->base.urb_read_length;
624 vs.VertexURBEntryReadOffset = 0;
625
626 vs.MaximumNumberofThreads =
627 batch->blorp->isl_dev->info->max_vs_threads - 1;
628
629 #if GEN_GEN >= 8
630 vs.SIMD8DispatchEnable =
631 vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8;
632 #endif
633 }
634 }
635 }
636
637 static void
638 blorp_emit_sf_config(struct blorp_batch *batch,
639 const struct blorp_params *params)
640 {
641 const struct brw_wm_prog_data *prog_data = params->wm_prog_data;
642
643 /* 3DSTATE_SF
644 *
645 * Disable ViewportTransformEnable (dw2.1)
646 *
647 * From the SandyBridge PRM, Volume 2, Part 1, Section 1.3, "3D
648 * Primitives Overview":
649 * RECTLIST: Viewport Mapping must be DISABLED (as is typical with the
650 * use of screen- space coordinates).
651 *
652 * A solid rectangle must be rendered, so set FrontFaceFillMode (dw2.4:3)
653 * and BackFaceFillMode (dw2.5:6) to SOLID(0).
654 *
655 * From the Sandy Bridge PRM, Volume 2, Part 1, Section
656 * 6.4.1.1 3DSTATE_SF, Field FrontFaceFillMode:
657 * SOLID: Any triangle or rectangle object found to be front-facing
658 * is rendered as a solid object. This setting is required when
659 * (rendering rectangle (RECTLIST) objects.
660 */
661
662 #if GEN_GEN >= 8
663
664 blorp_emit(batch, GENX(3DSTATE_SF), sf);
665
666 blorp_emit(batch, GENX(3DSTATE_RASTER), raster) {
667 raster.CullMode = CULLMODE_NONE;
668 }
669
670 blorp_emit(batch, GENX(3DSTATE_SBE), sbe) {
671 sbe.VertexURBEntryReadOffset = 1;
672 if (prog_data) {
673 sbe.NumberofSFOutputAttributes = prog_data->num_varying_inputs;
674 sbe.VertexURBEntryReadLength = brw_blorp_get_urb_length(prog_data);
675 sbe.ConstantInterpolationEnable = prog_data->flat_inputs;
676 } else {
677 sbe.NumberofSFOutputAttributes = 0;
678 sbe.VertexURBEntryReadLength = 1;
679 }
680 sbe.ForceVertexURBEntryReadLength = true;
681 sbe.ForceVertexURBEntryReadOffset = true;
682
683 #if GEN_GEN >= 9
684 for (unsigned i = 0; i < 32; i++)
685 sbe.AttributeActiveComponentFormat[i] = ACF_XYZW;
686 #endif
687 }
688
689 #elif GEN_GEN >= 7
690
691 blorp_emit(batch, GENX(3DSTATE_SF), sf) {
692 sf.FrontFaceFillMode = FILL_MODE_SOLID;
693 sf.BackFaceFillMode = FILL_MODE_SOLID;
694
695 sf.MultisampleRasterizationMode = params->num_samples > 1 ?
696 MSRASTMODE_ON_PATTERN : MSRASTMODE_OFF_PIXEL;
697
698 #if GEN_GEN == 7
699 sf.DepthBufferSurfaceFormat = params->depth_format;
700 #endif
701 }
702
703 blorp_emit(batch, GENX(3DSTATE_SBE), sbe) {
704 sbe.VertexURBEntryReadOffset = 1;
705 if (prog_data) {
706 sbe.NumberofSFOutputAttributes = prog_data->num_varying_inputs;
707 sbe.VertexURBEntryReadLength = brw_blorp_get_urb_length(prog_data);
708 sbe.ConstantInterpolationEnable = prog_data->flat_inputs;
709 } else {
710 sbe.NumberofSFOutputAttributes = 0;
711 sbe.VertexURBEntryReadLength = 1;
712 }
713 }
714
715 #else /* GEN_GEN <= 6 */
716
717 blorp_emit(batch, GENX(3DSTATE_SF), sf) {
718 sf.FrontFaceFillMode = FILL_MODE_SOLID;
719 sf.BackFaceFillMode = FILL_MODE_SOLID;
720
721 sf.MultisampleRasterizationMode = params->num_samples > 1 ?
722 MSRASTMODE_ON_PATTERN : MSRASTMODE_OFF_PIXEL;
723
724 sf.VertexURBEntryReadOffset = 1;
725 if (prog_data) {
726 sf.NumberofSFOutputAttributes = prog_data->num_varying_inputs;
727 sf.VertexURBEntryReadLength = brw_blorp_get_urb_length(prog_data);
728 sf.ConstantInterpolationEnable = prog_data->flat_inputs;
729 } else {
730 sf.NumberofSFOutputAttributes = 0;
731 sf.VertexURBEntryReadLength = 1;
732 }
733 }
734
735 #endif /* GEN_GEN */
736 }
737
738 static void
739 blorp_emit_ps_config(struct blorp_batch *batch,
740 const struct blorp_params *params)
741 {
742 const struct brw_wm_prog_data *prog_data = params->wm_prog_data;
743
744 /* Even when thread dispatch is disabled, max threads (dw5.25:31) must be
745 * nonzero to prevent the GPU from hanging. While the documentation doesn't
746 * mention this explicitly, it notes that the valid range for the field is
747 * [1,39] = [2,40] threads, which excludes zero.
748 *
749 * To be safe (and to minimize extraneous code) we go ahead and fully
750 * configure the WM state whether or not there is a WM program.
751 */
752
753 #if GEN_GEN >= 8
754
755 blorp_emit(batch, GENX(3DSTATE_WM), wm);
756
757 blorp_emit(batch, GENX(3DSTATE_PS), ps) {
758 if (params->src.enabled) {
759 ps.SamplerCount = 1; /* Up to 4 samplers */
760 ps.BindingTableEntryCount = 2;
761 } else {
762 ps.BindingTableEntryCount = 1;
763 }
764
765 /* Gen 11 workarounds table #2056 WABTPPrefetchDisable suggests to
766 * disable prefetching of binding tables on A0 and B0 steppings.
767 * TODO: Revisit this WA on C0 stepping.
768 */
769 if (GEN_GEN == 11)
770 ps.BindingTableEntryCount = 0;
771
772 if (prog_data) {
773 ps._8PixelDispatchEnable = prog_data->dispatch_8;
774 ps._16PixelDispatchEnable = prog_data->dispatch_16;
775 ps._32PixelDispatchEnable = prog_data->dispatch_32;
776
777 /* From the Sky Lake PRM 3DSTATE_PS::32 Pixel Dispatch Enable:
778 *
779 * "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16, SIMD32
780 * Dispatch must not be enabled for PER_PIXEL dispatch mode."
781 *
782 * Since 16x MSAA is first introduced on SKL, we don't need to apply
783 * the workaround on any older hardware.
784 */
785 if (GEN_GEN >= 9 && !prog_data->persample_dispatch &&
786 params->num_samples == 16) {
787 assert(ps._8PixelDispatchEnable || ps._16PixelDispatchEnable);
788 ps._32PixelDispatchEnable = false;
789 }
790
791 ps.DispatchGRFStartRegisterForConstantSetupData0 =
792 brw_wm_prog_data_dispatch_grf_start_reg(prog_data, ps, 0);
793 ps.DispatchGRFStartRegisterForConstantSetupData1 =
794 brw_wm_prog_data_dispatch_grf_start_reg(prog_data, ps, 1);
795 ps.DispatchGRFStartRegisterForConstantSetupData2 =
796 brw_wm_prog_data_dispatch_grf_start_reg(prog_data, ps, 2);
797
798 ps.KernelStartPointer0 = params->wm_prog_kernel +
799 brw_wm_prog_data_prog_offset(prog_data, ps, 0);
800 ps.KernelStartPointer1 = params->wm_prog_kernel +
801 brw_wm_prog_data_prog_offset(prog_data, ps, 1);
802 ps.KernelStartPointer2 = params->wm_prog_kernel +
803 brw_wm_prog_data_prog_offset(prog_data, ps, 2);
804 }
805
806 /* 3DSTATE_PS expects the number of threads per PSD, which is always 64
807 * for pre Gen11 and 128 for gen11+; On gen11+ If a programmed value is
808 * k, it implies 2(k+1) threads. It implicitly scales for different GT
809 * levels (which have some # of PSDs).
810 *
811 * In Gen8 the format is U8-2 whereas in Gen9+ it is U9-1.
812 */
813 if (GEN_GEN >= 9)
814 ps.MaximumNumberofThreadsPerPSD = 64 - 1;
815 else
816 ps.MaximumNumberofThreadsPerPSD = 64 - 2;
817
818 switch (params->fast_clear_op) {
819 case ISL_AUX_OP_NONE:
820 break;
821 #if GEN_GEN >= 9
822 case ISL_AUX_OP_PARTIAL_RESOLVE:
823 ps.RenderTargetResolveType = RESOLVE_PARTIAL;
824 break;
825 case ISL_AUX_OP_FULL_RESOLVE:
826 ps.RenderTargetResolveType = RESOLVE_FULL;
827 break;
828 #else
829 case ISL_AUX_OP_FULL_RESOLVE:
830 ps.RenderTargetResolveEnable = true;
831 break;
832 #endif
833 case ISL_AUX_OP_FAST_CLEAR:
834 ps.RenderTargetFastClearEnable = true;
835 break;
836 default:
837 unreachable("Invalid fast clear op");
838 }
839 }
840
841 blorp_emit(batch, GENX(3DSTATE_PS_EXTRA), psx) {
842 if (prog_data) {
843 psx.PixelShaderValid = true;
844 psx.AttributeEnable = prog_data->num_varying_inputs > 0;
845 psx.PixelShaderIsPerSample = prog_data->persample_dispatch;
846 }
847
848 if (params->src.enabled)
849 psx.PixelShaderKillsPixel = true;
850 }
851
852 #elif GEN_GEN >= 7
853
854 blorp_emit(batch, GENX(3DSTATE_WM), wm) {
855 switch (params->hiz_op) {
856 case ISL_AUX_OP_FAST_CLEAR:
857 wm.DepthBufferClear = true;
858 break;
859 case ISL_AUX_OP_FULL_RESOLVE:
860 wm.DepthBufferResolveEnable = true;
861 break;
862 case ISL_AUX_OP_AMBIGUATE:
863 wm.HierarchicalDepthBufferResolveEnable = true;
864 break;
865 case ISL_AUX_OP_NONE:
866 break;
867 default:
868 unreachable("not reached");
869 }
870
871 if (prog_data)
872 wm.ThreadDispatchEnable = true;
873
874 if (params->src.enabled)
875 wm.PixelShaderKillsPixel = true;
876
877 if (params->num_samples > 1) {
878 wm.MultisampleRasterizationMode = MSRASTMODE_ON_PATTERN;
879 wm.MultisampleDispatchMode =
880 (prog_data && prog_data->persample_dispatch) ?
881 MSDISPMODE_PERSAMPLE : MSDISPMODE_PERPIXEL;
882 } else {
883 wm.MultisampleRasterizationMode = MSRASTMODE_OFF_PIXEL;
884 wm.MultisampleDispatchMode = MSDISPMODE_PERSAMPLE;
885 }
886 }
887
888 blorp_emit(batch, GENX(3DSTATE_PS), ps) {
889 ps.MaximumNumberofThreads =
890 batch->blorp->isl_dev->info->max_wm_threads - 1;
891
892 #if GEN_IS_HASWELL
893 ps.SampleMask = 1;
894 #endif
895
896 if (prog_data) {
897 ps._8PixelDispatchEnable = prog_data->dispatch_8;
898 ps._16PixelDispatchEnable = prog_data->dispatch_16;
899 ps._32PixelDispatchEnable = prog_data->dispatch_32;
900
901 ps.DispatchGRFStartRegisterForConstantSetupData0 =
902 brw_wm_prog_data_dispatch_grf_start_reg(prog_data, ps, 0);
903 ps.DispatchGRFStartRegisterForConstantSetupData1 =
904 brw_wm_prog_data_dispatch_grf_start_reg(prog_data, ps, 1);
905 ps.DispatchGRFStartRegisterForConstantSetupData2 =
906 brw_wm_prog_data_dispatch_grf_start_reg(prog_data, ps, 2);
907
908 ps.KernelStartPointer0 = params->wm_prog_kernel +
909 brw_wm_prog_data_prog_offset(prog_data, ps, 0);
910 ps.KernelStartPointer1 = params->wm_prog_kernel +
911 brw_wm_prog_data_prog_offset(prog_data, ps, 1);
912 ps.KernelStartPointer2 = params->wm_prog_kernel +
913 brw_wm_prog_data_prog_offset(prog_data, ps, 2);
914
915 ps.AttributeEnable = prog_data->num_varying_inputs > 0;
916 } else {
917 /* Gen7 hardware gets angry if we don't enable at least one dispatch
918 * mode, so just enable 16-pixel dispatch if we don't have a program.
919 */
920 ps._16PixelDispatchEnable = true;
921 }
922
923 if (params->src.enabled)
924 ps.SamplerCount = 1; /* Up to 4 samplers */
925
926 switch (params->fast_clear_op) {
927 case ISL_AUX_OP_NONE:
928 break;
929 case ISL_AUX_OP_FULL_RESOLVE:
930 ps.RenderTargetResolveEnable = true;
931 break;
932 case ISL_AUX_OP_FAST_CLEAR:
933 ps.RenderTargetFastClearEnable = true;
934 break;
935 default:
936 unreachable("Invalid fast clear op");
937 }
938 }
939
940 #else /* GEN_GEN <= 6 */
941
942 blorp_emit(batch, GENX(3DSTATE_WM), wm) {
943 wm.MaximumNumberofThreads =
944 batch->blorp->isl_dev->info->max_wm_threads - 1;
945
946 switch (params->hiz_op) {
947 case ISL_AUX_OP_FAST_CLEAR:
948 wm.DepthBufferClear = true;
949 break;
950 case ISL_AUX_OP_FULL_RESOLVE:
951 wm.DepthBufferResolveEnable = true;
952 break;
953 case ISL_AUX_OP_AMBIGUATE:
954 wm.HierarchicalDepthBufferResolveEnable = true;
955 break;
956 case ISL_AUX_OP_NONE:
957 break;
958 default:
959 unreachable("not reached");
960 }
961
962 if (prog_data) {
963 wm.ThreadDispatchEnable = true;
964
965 wm._8PixelDispatchEnable = prog_data->dispatch_8;
966 wm._16PixelDispatchEnable = prog_data->dispatch_16;
967 wm._32PixelDispatchEnable = prog_data->dispatch_32;
968
969 wm.DispatchGRFStartRegisterForConstantSetupData0 =
970 brw_wm_prog_data_dispatch_grf_start_reg(prog_data, wm, 0);
971 wm.DispatchGRFStartRegisterForConstantSetupData1 =
972 brw_wm_prog_data_dispatch_grf_start_reg(prog_data, wm, 1);
973 wm.DispatchGRFStartRegisterForConstantSetupData2 =
974 brw_wm_prog_data_dispatch_grf_start_reg(prog_data, wm, 2);
975
976 wm.KernelStartPointer0 = params->wm_prog_kernel +
977 brw_wm_prog_data_prog_offset(prog_data, wm, 0);
978 wm.KernelStartPointer1 = params->wm_prog_kernel +
979 brw_wm_prog_data_prog_offset(prog_data, wm, 1);
980 wm.KernelStartPointer2 = params->wm_prog_kernel +
981 brw_wm_prog_data_prog_offset(prog_data, wm, 2);
982
983 wm.NumberofSFOutputAttributes = prog_data->num_varying_inputs;
984 }
985
986 if (params->src.enabled) {
987 wm.SamplerCount = 1; /* Up to 4 samplers */
988 wm.PixelShaderKillsPixel = true; /* TODO: temporarily smash on */
989 }
990
991 if (params->num_samples > 1) {
992 wm.MultisampleRasterizationMode = MSRASTMODE_ON_PATTERN;
993 wm.MultisampleDispatchMode =
994 (prog_data && prog_data->persample_dispatch) ?
995 MSDISPMODE_PERSAMPLE : MSDISPMODE_PERPIXEL;
996 } else {
997 wm.MultisampleRasterizationMode = MSRASTMODE_OFF_PIXEL;
998 wm.MultisampleDispatchMode = MSDISPMODE_PERSAMPLE;
999 }
1000 }
1001
1002 #endif /* GEN_GEN */
1003 }
1004
1005 static uint32_t
1006 blorp_emit_blend_state(struct blorp_batch *batch,
1007 const struct blorp_params *params)
1008 {
1009 struct GENX(BLEND_STATE) blend;
1010 memset(&blend, 0, sizeof(blend));
1011
1012 uint32_t offset;
1013 int size = GENX(BLEND_STATE_length) * 4;
1014 size += GENX(BLEND_STATE_ENTRY_length) * 4 * params->num_draw_buffers;
1015 uint32_t *state = blorp_alloc_dynamic_state(batch, size, 64, &offset);
1016 uint32_t *pos = state;
1017
1018 GENX(BLEND_STATE_pack)(NULL, pos, &blend);
1019 pos += GENX(BLEND_STATE_length);
1020
1021 for (unsigned i = 0; i < params->num_draw_buffers; ++i) {
1022 struct GENX(BLEND_STATE_ENTRY) entry = {
1023 .PreBlendColorClampEnable = true,
1024 .PostBlendColorClampEnable = true,
1025 .ColorClampRange = COLORCLAMP_RTFORMAT,
1026
1027 .WriteDisableRed = params->color_write_disable[0],
1028 .WriteDisableGreen = params->color_write_disable[1],
1029 .WriteDisableBlue = params->color_write_disable[2],
1030 .WriteDisableAlpha = params->color_write_disable[3],
1031 };
1032 GENX(BLEND_STATE_ENTRY_pack)(NULL, pos, &entry);
1033 pos += GENX(BLEND_STATE_ENTRY_length);
1034 }
1035
1036 blorp_flush_range(batch, state, size);
1037
1038 #if GEN_GEN >= 7
1039 blorp_emit(batch, GENX(3DSTATE_BLEND_STATE_POINTERS), sp) {
1040 sp.BlendStatePointer = offset;
1041 #if GEN_GEN >= 8
1042 sp.BlendStatePointerValid = true;
1043 #endif
1044 }
1045 #endif
1046
1047 #if GEN_GEN >= 8
1048 blorp_emit(batch, GENX(3DSTATE_PS_BLEND), ps_blend) {
1049 ps_blend.HasWriteableRT = true;
1050 }
1051 #endif
1052
1053 return offset;
1054 }
1055
1056 static uint32_t
1057 blorp_emit_color_calc_state(struct blorp_batch *batch,
1058 MAYBE_UNUSED const struct blorp_params *params)
1059 {
1060 uint32_t offset;
1061 blorp_emit_dynamic(batch, GENX(COLOR_CALC_STATE), cc, 64, &offset) {
1062 #if GEN_GEN <= 8
1063 cc.StencilReferenceValue = params->stencil_ref;
1064 #endif
1065 }
1066
1067 #if GEN_GEN >= 7
1068 blorp_emit(batch, GENX(3DSTATE_CC_STATE_POINTERS), sp) {
1069 sp.ColorCalcStatePointer = offset;
1070 #if GEN_GEN >= 8
1071 sp.ColorCalcStatePointerValid = true;
1072 #endif
1073 }
1074 #endif
1075
1076 return offset;
1077 }
1078
1079 static uint32_t
1080 blorp_emit_depth_stencil_state(struct blorp_batch *batch,
1081 const struct blorp_params *params)
1082 {
1083 #if GEN_GEN >= 8
1084 struct GENX(3DSTATE_WM_DEPTH_STENCIL) ds = {
1085 GENX(3DSTATE_WM_DEPTH_STENCIL_header),
1086 };
1087 #else
1088 struct GENX(DEPTH_STENCIL_STATE) ds = { 0 };
1089 #endif
1090
1091 if (params->depth.enabled) {
1092 ds.DepthBufferWriteEnable = true;
1093
1094 switch (params->hiz_op) {
1095 case ISL_AUX_OP_NONE:
1096 ds.DepthTestEnable = true;
1097 ds.DepthTestFunction = COMPAREFUNCTION_ALWAYS;
1098 break;
1099
1100 /* See the following sections of the Sandy Bridge PRM, Volume 2, Part1:
1101 * - 7.5.3.1 Depth Buffer Clear
1102 * - 7.5.3.2 Depth Buffer Resolve
1103 * - 7.5.3.3 Hierarchical Depth Buffer Resolve
1104 */
1105 case ISL_AUX_OP_FULL_RESOLVE:
1106 ds.DepthTestEnable = true;
1107 ds.DepthTestFunction = COMPAREFUNCTION_NEVER;
1108 break;
1109
1110 case ISL_AUX_OP_FAST_CLEAR:
1111 case ISL_AUX_OP_AMBIGUATE:
1112 ds.DepthTestEnable = false;
1113 break;
1114 case ISL_AUX_OP_PARTIAL_RESOLVE:
1115 unreachable("Invalid HIZ op");
1116 }
1117 }
1118
1119 if (params->stencil.enabled) {
1120 ds.StencilBufferWriteEnable = true;
1121 ds.StencilTestEnable = true;
1122 ds.DoubleSidedStencilEnable = false;
1123
1124 ds.StencilTestFunction = COMPAREFUNCTION_ALWAYS;
1125 ds.StencilPassDepthPassOp = STENCILOP_REPLACE;
1126
1127 ds.StencilWriteMask = params->stencil_mask;
1128 #if GEN_GEN >= 9
1129 ds.StencilReferenceValue = params->stencil_ref;
1130 #endif
1131 }
1132
1133 #if GEN_GEN >= 8
1134 uint32_t offset = 0;
1135 uint32_t *dw = blorp_emit_dwords(batch,
1136 GENX(3DSTATE_WM_DEPTH_STENCIL_length));
1137 if (!dw)
1138 return 0;
1139
1140 GENX(3DSTATE_WM_DEPTH_STENCIL_pack)(NULL, dw, &ds);
1141 #else
1142 uint32_t offset;
1143 void *state = blorp_alloc_dynamic_state(batch,
1144 GENX(DEPTH_STENCIL_STATE_length) * 4,
1145 64, &offset);
1146 GENX(DEPTH_STENCIL_STATE_pack)(NULL, state, &ds);
1147 blorp_flush_range(batch, state, GENX(DEPTH_STENCIL_STATE_length) * 4);
1148 #endif
1149
1150 #if GEN_GEN == 7
1151 blorp_emit(batch, GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS), sp) {
1152 sp.PointertoDEPTH_STENCIL_STATE = offset;
1153 }
1154 #endif
1155
1156 return offset;
1157 }
1158
1159 static void
1160 blorp_emit_3dstate_multisample(struct blorp_batch *batch,
1161 const struct blorp_params *params)
1162 {
1163 blorp_emit(batch, GENX(3DSTATE_MULTISAMPLE), ms) {
1164 ms.NumberofMultisamples = __builtin_ffs(params->num_samples) - 1;
1165
1166 #if GEN_GEN >= 8
1167 /* The PRM says that this bit is valid only for DX9:
1168 *
1169 * SW can choose to set this bit only for DX9 API. DX10/OGL API's
1170 * should not have any effect by setting or not setting this bit.
1171 */
1172 ms.PixelPositionOffsetEnable = false;
1173 #elif GEN_GEN >= 7
1174
1175 switch (params->num_samples) {
1176 case 1:
1177 GEN_SAMPLE_POS_1X(ms.Sample);
1178 break;
1179 case 2:
1180 GEN_SAMPLE_POS_2X(ms.Sample);
1181 break;
1182 case 4:
1183 GEN_SAMPLE_POS_4X(ms.Sample);
1184 break;
1185 case 8:
1186 GEN_SAMPLE_POS_8X(ms.Sample);
1187 break;
1188 default:
1189 break;
1190 }
1191 #else
1192 GEN_SAMPLE_POS_4X(ms.Sample);
1193 #endif
1194 ms.PixelLocation = CENTER;
1195 }
1196 }
1197
1198 static void
1199 blorp_emit_pipeline(struct blorp_batch *batch,
1200 const struct blorp_params *params)
1201 {
1202 uint32_t blend_state_offset = 0;
1203 uint32_t color_calc_state_offset;
1204 uint32_t depth_stencil_state_offset;
1205
1206 emit_urb_config(batch, params);
1207
1208 if (params->wm_prog_data) {
1209 blend_state_offset = blorp_emit_blend_state(batch, params);
1210 }
1211 color_calc_state_offset = blorp_emit_color_calc_state(batch, params);
1212 depth_stencil_state_offset = blorp_emit_depth_stencil_state(batch, params);
1213
1214 #if GEN_GEN == 6
1215 /* 3DSTATE_CC_STATE_POINTERS
1216 *
1217 * The pointer offsets are relative to
1218 * CMD_STATE_BASE_ADDRESS.DynamicStateBaseAddress.
1219 *
1220 * The HiZ op doesn't use BLEND_STATE or COLOR_CALC_STATE.
1221 *
1222 * The dynamic state emit helpers emit their own STATE_POINTERS packets on
1223 * gen7+. However, on gen6 and earlier, they're all lumpped together in
1224 * one CC_STATE_POINTERS packet so we have to emit that here.
1225 */
1226 blorp_emit(batch, GENX(3DSTATE_CC_STATE_POINTERS), cc) {
1227 cc.BLEND_STATEChange = true;
1228 cc.ColorCalcStatePointerValid = true;
1229 cc.DEPTH_STENCIL_STATEChange = true;
1230 cc.PointertoBLEND_STATE = blend_state_offset;
1231 cc.ColorCalcStatePointer = color_calc_state_offset;
1232 cc.PointertoDEPTH_STENCIL_STATE = depth_stencil_state_offset;
1233 }
1234 #else
1235 (void)blend_state_offset;
1236 (void)color_calc_state_offset;
1237 (void)depth_stencil_state_offset;
1238 #endif
1239
1240 blorp_emit(batch, GENX(3DSTATE_CONSTANT_VS), vs);
1241 #if GEN_GEN >= 7
1242 blorp_emit(batch, GENX(3DSTATE_CONSTANT_HS), hs);
1243 blorp_emit(batch, GENX(3DSTATE_CONSTANT_DS), DS);
1244 #endif
1245 blorp_emit(batch, GENX(3DSTATE_CONSTANT_GS), gs);
1246 blorp_emit(batch, GENX(3DSTATE_CONSTANT_PS), ps);
1247
1248 if (params->src.enabled)
1249 blorp_emit_sampler_state(batch);
1250
1251 blorp_emit_3dstate_multisample(batch, params);
1252
1253 blorp_emit(batch, GENX(3DSTATE_SAMPLE_MASK), mask) {
1254 mask.SampleMask = (1 << params->num_samples) - 1;
1255 }
1256
1257 /* From the BSpec, 3D Pipeline > Geometry > Vertex Shader > State,
1258 * 3DSTATE_VS, Dword 5.0 "VS Function Enable":
1259 *
1260 * [DevSNB] A pipeline flush must be programmed prior to a
1261 * 3DSTATE_VS command that causes the VS Function Enable to
1262 * toggle. Pipeline flush can be executed by sending a PIPE_CONTROL
1263 * command with CS stall bit set and a post sync operation.
1264 *
1265 * We've already done one at the start of the BLORP operation.
1266 */
1267 blorp_emit_vs_config(batch, params);
1268 #if GEN_GEN >= 7
1269 blorp_emit(batch, GENX(3DSTATE_HS), hs);
1270 blorp_emit(batch, GENX(3DSTATE_TE), te);
1271 blorp_emit(batch, GENX(3DSTATE_DS), DS);
1272 blorp_emit(batch, GENX(3DSTATE_STREAMOUT), so);
1273 #endif
1274 blorp_emit(batch, GENX(3DSTATE_GS), gs);
1275
1276 blorp_emit(batch, GENX(3DSTATE_CLIP), clip) {
1277 clip.PerspectiveDivideDisable = true;
1278 }
1279
1280 blorp_emit_sf_config(batch, params);
1281 blorp_emit_ps_config(batch, params);
1282
1283 blorp_emit_cc_viewport(batch);
1284 }
1285
1286 /******** This is the end of the pipeline setup code ********/
1287
1288 #endif /* GEN_GEN >= 6 */
1289
1290 #if GEN_GEN >= 7
1291 static void
1292 blorp_emit_memcpy(struct blorp_batch *batch,
1293 struct blorp_address dst,
1294 struct blorp_address src,
1295 uint32_t size)
1296 {
1297 assert(size % 4 == 0);
1298
1299 for (unsigned dw = 0; dw < size; dw += 4) {
1300 #if GEN_GEN >= 8
1301 blorp_emit(batch, GENX(MI_COPY_MEM_MEM), cp) {
1302 cp.DestinationMemoryAddress = dst;
1303 cp.SourceMemoryAddress = src;
1304 }
1305 #else
1306 /* IVB does not have a general purpose register for command streamer
1307 * commands. Therefore, we use an alternate temporary register.
1308 */
1309 #define BLORP_TEMP_REG 0x2440 /* GEN7_3DPRIM_BASE_VERTEX */
1310 blorp_emit(batch, GENX(MI_LOAD_REGISTER_MEM), load) {
1311 load.RegisterAddress = BLORP_TEMP_REG;
1312 load.MemoryAddress = src;
1313 }
1314 blorp_emit(batch, GENX(MI_STORE_REGISTER_MEM), store) {
1315 store.RegisterAddress = BLORP_TEMP_REG;
1316 store.MemoryAddress = dst;
1317 }
1318 #undef BLORP_TEMP_REG
1319 #endif
1320 dst.offset += 4;
1321 src.offset += 4;
1322 }
1323 }
1324 #endif
1325
1326 static void
1327 blorp_emit_surface_state(struct blorp_batch *batch,
1328 const struct brw_blorp_surface_info *surface,
1329 enum isl_aux_op op,
1330 void *state, uint32_t state_offset,
1331 const bool color_write_disables[4],
1332 bool is_render_target)
1333 {
1334 const struct isl_device *isl_dev = batch->blorp->isl_dev;
1335 struct isl_surf surf = surface->surf;
1336
1337 if (surf.dim == ISL_SURF_DIM_1D &&
1338 surf.dim_layout == ISL_DIM_LAYOUT_GEN4_2D) {
1339 assert(surf.logical_level0_px.height == 1);
1340 surf.dim = ISL_SURF_DIM_2D;
1341 }
1342
1343 /* Blorp doesn't support HiZ in any of the blit or slow-clear paths */
1344 enum isl_aux_usage aux_usage = surface->aux_usage;
1345 if (aux_usage == ISL_AUX_USAGE_HIZ)
1346 aux_usage = ISL_AUX_USAGE_NONE;
1347
1348 isl_channel_mask_t write_disable_mask = 0;
1349 if (is_render_target && GEN_GEN <= 5) {
1350 if (color_write_disables[0])
1351 write_disable_mask |= ISL_CHANNEL_RED_BIT;
1352 if (color_write_disables[1])
1353 write_disable_mask |= ISL_CHANNEL_GREEN_BIT;
1354 if (color_write_disables[2])
1355 write_disable_mask |= ISL_CHANNEL_BLUE_BIT;
1356 if (color_write_disables[3])
1357 write_disable_mask |= ISL_CHANNEL_ALPHA_BIT;
1358 }
1359
1360 const bool use_clear_address =
1361 GEN_GEN >= 10 && (surface->clear_color_addr.buffer != NULL);
1362
1363 isl_surf_fill_state(batch->blorp->isl_dev, state,
1364 .surf = &surf, .view = &surface->view,
1365 .aux_surf = &surface->aux_surf, .aux_usage = aux_usage,
1366 .mocs = surface->addr.mocs,
1367 .clear_color = surface->clear_color,
1368 .use_clear_address = use_clear_address,
1369 .write_disables = write_disable_mask);
1370
1371 blorp_surface_reloc(batch, state_offset + isl_dev->ss.addr_offset,
1372 surface->addr, 0);
1373
1374 if (aux_usage != ISL_AUX_USAGE_NONE) {
1375 /* On gen7 and prior, the bottom 12 bits of the MCS base address are
1376 * used to store other information. This should be ok, however, because
1377 * surface buffer addresses are always 4K page alinged.
1378 */
1379 assert((surface->aux_addr.offset & 0xfff) == 0);
1380 uint32_t *aux_addr = state + isl_dev->ss.aux_addr_offset;
1381 blorp_surface_reloc(batch, state_offset + isl_dev->ss.aux_addr_offset,
1382 surface->aux_addr, *aux_addr);
1383 }
1384
1385 if (surface->clear_color_addr.buffer) {
1386 #if GEN_GEN >= 10
1387 assert((surface->clear_color_addr.offset & 0x3f) == 0);
1388 uint32_t *clear_addr = state + isl_dev->ss.clear_color_state_offset;
1389 blorp_surface_reloc(batch, state_offset +
1390 isl_dev->ss.clear_color_state_offset,
1391 surface->clear_color_addr, *clear_addr);
1392 #elif GEN_GEN >= 7
1393 if (op == ISL_AUX_OP_FULL_RESOLVE || op == ISL_AUX_OP_PARTIAL_RESOLVE) {
1394 struct blorp_address dst_addr = blorp_get_surface_base_address(batch);
1395 dst_addr.offset += state_offset + isl_dev->ss.clear_value_offset;
1396 blorp_emit_memcpy(batch, dst_addr, surface->clear_color_addr,
1397 isl_dev->ss.clear_value_size);
1398 }
1399 #else
1400 unreachable("Fast clears are only supported on gen7+");
1401 #endif
1402 }
1403
1404 blorp_flush_range(batch, state, GENX(RENDER_SURFACE_STATE_length) * 4);
1405 }
1406
1407 static void
1408 blorp_emit_null_surface_state(struct blorp_batch *batch,
1409 const struct brw_blorp_surface_info *surface,
1410 uint32_t *state)
1411 {
1412 struct GENX(RENDER_SURFACE_STATE) ss = {
1413 .SurfaceType = SURFTYPE_NULL,
1414 .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
1415 .Width = surface->surf.logical_level0_px.width - 1,
1416 .Height = surface->surf.logical_level0_px.height - 1,
1417 .MIPCountLOD = surface->view.base_level,
1418 .MinimumArrayElement = surface->view.base_array_layer,
1419 .Depth = surface->view.array_len - 1,
1420 .RenderTargetViewExtent = surface->view.array_len - 1,
1421 #if GEN_GEN >= 6
1422 .NumberofMultisamples = ffs(surface->surf.samples) - 1,
1423 #endif
1424
1425 #if GEN_GEN >= 7
1426 .SurfaceArray = surface->surf.dim != ISL_SURF_DIM_3D,
1427 #endif
1428
1429 #if GEN_GEN >= 8
1430 .TileMode = YMAJOR,
1431 #else
1432 .TiledSurface = true,
1433 #endif
1434 };
1435
1436 GENX(RENDER_SURFACE_STATE_pack)(NULL, state, &ss);
1437
1438 blorp_flush_range(batch, state, GENX(RENDER_SURFACE_STATE_length) * 4);
1439 }
1440
1441 static void
1442 blorp_emit_surface_states(struct blorp_batch *batch,
1443 const struct blorp_params *params)
1444 {
1445 const struct isl_device *isl_dev = batch->blorp->isl_dev;
1446 uint32_t bind_offset = 0, surface_offsets[2];
1447 void *surface_maps[2];
1448
1449 MAYBE_UNUSED bool has_indirect_clear_color = false;
1450 if (params->use_pre_baked_binding_table) {
1451 bind_offset = params->pre_baked_binding_table_offset;
1452 } else {
1453 unsigned num_surfaces = 1 + params->src.enabled;
1454 blorp_alloc_binding_table(batch, num_surfaces,
1455 isl_dev->ss.size, isl_dev->ss.align,
1456 &bind_offset, surface_offsets, surface_maps);
1457
1458 if (params->dst.enabled) {
1459 blorp_emit_surface_state(batch, &params->dst,
1460 params->fast_clear_op,
1461 surface_maps[BLORP_RENDERBUFFER_BT_INDEX],
1462 surface_offsets[BLORP_RENDERBUFFER_BT_INDEX],
1463 params->color_write_disable, true);
1464 if (params->dst.clear_color_addr.buffer != NULL)
1465 has_indirect_clear_color = true;
1466 } else {
1467 assert(params->depth.enabled || params->stencil.enabled);
1468 const struct brw_blorp_surface_info *surface =
1469 params->depth.enabled ? &params->depth : &params->stencil;
1470 blorp_emit_null_surface_state(batch, surface,
1471 surface_maps[BLORP_RENDERBUFFER_BT_INDEX]);
1472 }
1473
1474 if (params->src.enabled) {
1475 blorp_emit_surface_state(batch, &params->src,
1476 params->fast_clear_op,
1477 surface_maps[BLORP_TEXTURE_BT_INDEX],
1478 surface_offsets[BLORP_TEXTURE_BT_INDEX],
1479 NULL, false);
1480 if (params->src.clear_color_addr.buffer != NULL)
1481 has_indirect_clear_color = true;
1482 }
1483 }
1484
1485 #if GEN_GEN >= 7
1486 if (has_indirect_clear_color) {
1487 /* Updating a surface state object may require that the state cache be
1488 * invalidated. From the SKL PRM, Shared Functions -> State -> State
1489 * Caching:
1490 *
1491 * Whenever the RENDER_SURFACE_STATE object in memory pointed to by
1492 * the Binding Table Pointer (BTP) and Binding Table Index (BTI) is
1493 * modified [...], the L1 state cache must be invalidated to ensure
1494 * the new surface or sampler state is fetched from system memory.
1495 */
1496 blorp_emit(batch, GENX(PIPE_CONTROL), pipe) {
1497 pipe.StateCacheInvalidationEnable = true;
1498 }
1499 }
1500 #endif
1501
1502 #if GEN_GEN >= 7
1503 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), bt);
1504 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_HS), bt);
1505 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_DS), bt);
1506 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_GS), bt);
1507
1508 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_PS), bt) {
1509 bt.PointertoPSBindingTable = bind_offset;
1510 }
1511 #elif GEN_GEN >= 6
1512 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS), bt) {
1513 bt.PSBindingTableChange = true;
1514 bt.PointertoPSBindingTable = bind_offset;
1515 }
1516 #else
1517 blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS), bt) {
1518 bt.PointertoPSBindingTable = bind_offset;
1519 }
1520 #endif
1521 }
1522
1523 static void
1524 blorp_emit_depth_stencil_config(struct blorp_batch *batch,
1525 const struct blorp_params *params)
1526 {
1527 const struct isl_device *isl_dev = batch->blorp->isl_dev;
1528
1529 uint32_t *dw = blorp_emit_dwords(batch, isl_dev->ds.size / 4);
1530 if (dw == NULL)
1531 return;
1532
1533 struct isl_depth_stencil_hiz_emit_info info = { };
1534
1535 if (params->depth.enabled) {
1536 info.view = &params->depth.view;
1537 info.mocs = params->depth.addr.mocs;
1538 } else if (params->stencil.enabled) {
1539 info.view = &params->stencil.view;
1540 info.mocs = params->stencil.addr.mocs;
1541 }
1542
1543 if (params->depth.enabled) {
1544 info.depth_surf = &params->depth.surf;
1545
1546 info.depth_address =
1547 blorp_emit_reloc(batch, dw + isl_dev->ds.depth_offset / 4,
1548 params->depth.addr, 0);
1549
1550 info.hiz_usage = params->depth.aux_usage;
1551 if (info.hiz_usage == ISL_AUX_USAGE_HIZ) {
1552 info.hiz_surf = &params->depth.aux_surf;
1553
1554 struct blorp_address hiz_address = params->depth.aux_addr;
1555 #if GEN_GEN == 6
1556 /* Sandy bridge hardware does not technically support mipmapped HiZ.
1557 * However, we have a special layout that allows us to make it work
1558 * anyway by manually offsetting to the specified miplevel.
1559 */
1560 assert(info.hiz_surf->dim_layout == ISL_DIM_LAYOUT_GEN6_STENCIL_HIZ);
1561 uint32_t offset_B;
1562 isl_surf_get_image_offset_B_tile_sa(info.hiz_surf,
1563 info.view->base_level, 0, 0,
1564 &offset_B, NULL, NULL);
1565 hiz_address.offset += offset_B;
1566 #endif
1567
1568 info.hiz_address =
1569 blorp_emit_reloc(batch, dw + isl_dev->ds.hiz_offset / 4,
1570 hiz_address, 0);
1571
1572 info.depth_clear_value = params->depth.clear_color.f32[0];
1573 }
1574 }
1575
1576 if (params->stencil.enabled) {
1577 info.stencil_surf = &params->stencil.surf;
1578
1579 struct blorp_address stencil_address = params->stencil.addr;
1580 #if GEN_GEN == 6
1581 /* Sandy bridge hardware does not technically support mipmapped stencil.
1582 * However, we have a special layout that allows us to make it work
1583 * anyway by manually offsetting to the specified miplevel.
1584 */
1585 assert(info.stencil_surf->dim_layout == ISL_DIM_LAYOUT_GEN6_STENCIL_HIZ);
1586 uint32_t offset_B;
1587 isl_surf_get_image_offset_B_tile_sa(info.stencil_surf,
1588 info.view->base_level, 0, 0,
1589 &offset_B, NULL, NULL);
1590 stencil_address.offset += offset_B;
1591 #endif
1592
1593 info.stencil_address =
1594 blorp_emit_reloc(batch, dw + isl_dev->ds.stencil_offset / 4,
1595 stencil_address, 0);
1596 }
1597
1598 isl_emit_depth_stencil_hiz_s(isl_dev, dw, &info);
1599 }
1600
1601 #if GEN_GEN >= 8
1602 /* Emits the Optimized HiZ sequence specified in the BDW+ PRMs. The
1603 * depth/stencil buffer extents are ignored to handle APIs which perform
1604 * clearing operations without such information.
1605 * */
1606 static void
1607 blorp_emit_gen8_hiz_op(struct blorp_batch *batch,
1608 const struct blorp_params *params)
1609 {
1610 /* We should be performing an operation on a depth or stencil buffer.
1611 */
1612 assert(params->depth.enabled || params->stencil.enabled);
1613
1614 /* The stencil buffer should only be enabled if a fast clear operation is
1615 * requested.
1616 */
1617 if (params->stencil.enabled)
1618 assert(params->hiz_op == ISL_AUX_OP_FAST_CLEAR);
1619
1620 /* From the BDW PRM Volume 2, 3DSTATE_WM_HZ_OP:
1621 *
1622 * 3DSTATE_MULTISAMPLE packet must be used prior to this packet to change
1623 * the Number of Multisamples. This packet must not be used to change
1624 * Number of Multisamples in a rendering sequence.
1625 *
1626 * Since HIZ may be the first thing in a batch buffer, play safe and always
1627 * emit 3DSTATE_MULTISAMPLE.
1628 */
1629 blorp_emit_3dstate_multisample(batch, params);
1630
1631 /* From the BDW PRM Volume 7, Depth Buffer Clear:
1632 *
1633 * The clear value must be between the min and max depth values
1634 * (inclusive) defined in the CC_VIEWPORT. If the depth buffer format is
1635 * D32_FLOAT, then +/-DENORM values are also allowed.
1636 *
1637 * Set the bounds to match our hardware limits, [0.0, 1.0].
1638 */
1639 if (params->depth.enabled && params->hiz_op == ISL_AUX_OP_FAST_CLEAR) {
1640 assert(params->depth.clear_color.f32[0] >= 0.0f);
1641 assert(params->depth.clear_color.f32[0] <= 1.0f);
1642 blorp_emit_cc_viewport(batch);
1643 }
1644
1645 /* According to the SKL PRM formula for WM_INT::ThreadDispatchEnable, the
1646 * 3DSTATE_WM::ForceThreadDispatchEnable field can force WM thread dispatch
1647 * even when WM_HZ_OP is active. However, WM thread dispatch is normally
1648 * disabled for HiZ ops and it appears that force-enabling it can lead to
1649 * GPU hangs on at least Skylake. Since we don't know the current state of
1650 * the 3DSTATE_WM packet, just emit a dummy one prior to 3DSTATE_WM_HZ_OP.
1651 */
1652 blorp_emit(batch, GENX(3DSTATE_WM), wm);
1653
1654 /* If we can't alter the depth stencil config and multiple layers are
1655 * involved, the HiZ op will fail. This is because the op requires that a
1656 * new config is emitted for each additional layer.
1657 */
1658 if (batch->flags & BLORP_BATCH_NO_EMIT_DEPTH_STENCIL) {
1659 assert(params->num_layers <= 1);
1660 } else {
1661 blorp_emit_depth_stencil_config(batch, params);
1662 }
1663
1664 blorp_emit(batch, GENX(3DSTATE_WM_HZ_OP), hzp) {
1665 switch (params->hiz_op) {
1666 case ISL_AUX_OP_FAST_CLEAR:
1667 hzp.StencilBufferClearEnable = params->stencil.enabled;
1668 hzp.DepthBufferClearEnable = params->depth.enabled;
1669 hzp.StencilClearValue = params->stencil_ref;
1670 hzp.FullSurfaceDepthandStencilClear = params->full_surface_hiz_op;
1671 break;
1672 case ISL_AUX_OP_FULL_RESOLVE:
1673 assert(params->full_surface_hiz_op);
1674 hzp.DepthBufferResolveEnable = true;
1675 break;
1676 case ISL_AUX_OP_AMBIGUATE:
1677 assert(params->full_surface_hiz_op);
1678 hzp.HierarchicalDepthBufferResolveEnable = true;
1679 break;
1680 case ISL_AUX_OP_PARTIAL_RESOLVE:
1681 case ISL_AUX_OP_NONE:
1682 unreachable("Invalid HIZ op");
1683 }
1684
1685 hzp.NumberofMultisamples = ffs(params->num_samples) - 1;
1686 hzp.SampleMask = 0xFFFF;
1687
1688 /* Due to a hardware issue, this bit MBZ */
1689 assert(hzp.ScissorRectangleEnable == false);
1690
1691 /* Contrary to the HW docs both fields are inclusive */
1692 hzp.ClearRectangleXMin = params->x0;
1693 hzp.ClearRectangleYMin = params->y0;
1694
1695 /* Contrary to the HW docs both fields are exclusive */
1696 hzp.ClearRectangleXMax = params->x1;
1697 hzp.ClearRectangleYMax = params->y1;
1698 }
1699
1700 /* PIPE_CONTROL w/ all bits clear except for “Post-Sync Operation” must set
1701 * to “Write Immediate Data” enabled.
1702 */
1703 blorp_emit(batch, GENX(PIPE_CONTROL), pc) {
1704 pc.PostSyncOperation = WriteImmediateData;
1705 pc.Address = blorp_get_workaround_page(batch);
1706 }
1707
1708 blorp_emit(batch, GENX(3DSTATE_WM_HZ_OP), hzp);
1709 }
1710 #endif
1711
1712 static void
1713 blorp_update_clear_color(struct blorp_batch *batch,
1714 const struct brw_blorp_surface_info *info,
1715 enum isl_aux_op op)
1716 {
1717 if (info->clear_color_addr.buffer && op == ISL_AUX_OP_FAST_CLEAR) {
1718 #if GEN_GEN >= 9
1719 for (int i = 0; i < 4; i++) {
1720 blorp_emit(batch, GENX(MI_STORE_DATA_IMM), sdi) {
1721 sdi.Address = info->clear_color_addr;
1722 sdi.Address.offset += i * 4;
1723 sdi.ImmediateData = info->clear_color.u32[i];
1724 }
1725 }
1726 #elif GEN_GEN >= 7
1727 blorp_emit(batch, GENX(MI_STORE_DATA_IMM), sdi) {
1728 sdi.Address = info->clear_color_addr;
1729 sdi.ImmediateData = ISL_CHANNEL_SELECT_RED << 25 |
1730 ISL_CHANNEL_SELECT_GREEN << 22 |
1731 ISL_CHANNEL_SELECT_BLUE << 19 |
1732 ISL_CHANNEL_SELECT_ALPHA << 16;
1733 if (isl_format_has_int_channel(info->view.format)) {
1734 for (unsigned i = 0; i < 4; i++) {
1735 assert(info->clear_color.u32[i] == 0 ||
1736 info->clear_color.u32[i] == 1);
1737 }
1738 sdi.ImmediateData |= (info->clear_color.u32[0] != 0) << 31;
1739 sdi.ImmediateData |= (info->clear_color.u32[1] != 0) << 30;
1740 sdi.ImmediateData |= (info->clear_color.u32[2] != 0) << 29;
1741 sdi.ImmediateData |= (info->clear_color.u32[3] != 0) << 28;
1742 } else {
1743 for (unsigned i = 0; i < 4; i++) {
1744 assert(info->clear_color.f32[i] == 0.0f ||
1745 info->clear_color.f32[i] == 1.0f);
1746 }
1747 sdi.ImmediateData |= (info->clear_color.f32[0] != 0.0f) << 31;
1748 sdi.ImmediateData |= (info->clear_color.f32[1] != 0.0f) << 30;
1749 sdi.ImmediateData |= (info->clear_color.f32[2] != 0.0f) << 29;
1750 sdi.ImmediateData |= (info->clear_color.f32[3] != 0.0f) << 28;
1751 }
1752 }
1753 #endif
1754 }
1755 }
1756
1757 /**
1758 * \brief Execute a blit or render pass operation.
1759 *
1760 * To execute the operation, this function manually constructs and emits a
1761 * batch to draw a rectangle primitive. The batchbuffer is flushed before
1762 * constructing and after emitting the batch.
1763 *
1764 * This function alters no GL state.
1765 */
1766 static void
1767 blorp_exec(struct blorp_batch *batch, const struct blorp_params *params)
1768 {
1769 if (!(batch->flags & BLORP_BATCH_NO_UPDATE_CLEAR_COLOR)) {
1770 blorp_update_clear_color(batch, &params->dst, params->fast_clear_op);
1771 blorp_update_clear_color(batch, &params->depth, params->hiz_op);
1772 }
1773
1774 #if GEN_GEN >= 8
1775 if (params->hiz_op != ISL_AUX_OP_NONE) {
1776 blorp_emit_gen8_hiz_op(batch, params);
1777 return;
1778 }
1779 #endif
1780
1781 blorp_emit_vertex_buffers(batch, params);
1782 blorp_emit_vertex_elements(batch, params);
1783
1784 blorp_emit_pipeline(batch, params);
1785
1786 blorp_emit_surface_states(batch, params);
1787
1788 if (!(batch->flags & BLORP_BATCH_NO_EMIT_DEPTH_STENCIL))
1789 blorp_emit_depth_stencil_config(batch, params);
1790
1791 blorp_emit(batch, GENX(3DPRIMITIVE), prim) {
1792 prim.VertexAccessType = SEQUENTIAL;
1793 prim.PrimitiveTopologyType = _3DPRIM_RECTLIST;
1794 #if GEN_GEN >= 7
1795 prim.PredicateEnable = batch->flags & BLORP_BATCH_PREDICATE_ENABLE;
1796 #endif
1797 prim.VertexCountPerInstance = 3;
1798 prim.InstanceCount = params->num_layers;
1799 }
1800 }
1801
1802 #endif /* BLORP_GENX_EXEC_H */