2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "dev/gen_device_info.h"
27 #include "common/gen_sample_positions.h"
28 #include "genxml/gen_macros.h"
30 #include "main/bufferobj.h"
31 #include "main/context.h"
32 #include "main/enums.h"
33 #include "main/macros.h"
34 #include "main/state.h"
36 #include "brw_context.h"
38 #include "brw_multisample_state.h"
39 #include "brw_state.h"
43 #include "intel_batchbuffer.h"
44 #include "intel_buffer_objects.h"
45 #include "intel_fbo.h"
47 #include "main/enums.h"
48 #include "main/fbobject.h"
49 #include "main/framebuffer.h"
50 #include "main/glformats.h"
51 #include "main/samplerobj.h"
52 #include "main/shaderapi.h"
53 #include "main/stencil.h"
54 #include "main/transformfeedback.h"
55 #include "main/varray.h"
56 #include "main/viewport.h"
57 #include "util/half_float.h"
60 emit_dwords(struct brw_context
*brw
, unsigned n
)
62 intel_batchbuffer_begin(brw
, n
, RENDER_RING
);
63 uint32_t *map
= brw
->batch
.map_next
;
64 brw
->batch
.map_next
+= n
;
65 intel_batchbuffer_advance(brw
);
75 #define __gen_address_type struct brw_address
76 #define __gen_user_data struct brw_context
79 __gen_combine_address(struct brw_context
*brw
, void *location
,
80 struct brw_address address
, uint32_t delta
)
82 struct intel_batchbuffer
*batch
= &brw
->batch
;
85 if (address
.bo
== NULL
) {
86 return address
.offset
+ delta
;
88 if (GEN_GEN
< 6 && brw_ptr_in_state_buffer(batch
, location
)) {
89 offset
= (char *) location
- (char *) brw
->batch
.state
.map
;
90 return brw_state_reloc(batch
, offset
, address
.bo
,
91 address
.offset
+ delta
,
95 assert(!brw_ptr_in_state_buffer(batch
, location
));
97 offset
= (char *) location
- (char *) brw
->batch
.batch
.map
;
98 return brw_batch_reloc(batch
, offset
, address
.bo
,
99 address
.offset
+ delta
,
100 address
.reloc_flags
);
104 UNUSED
static struct brw_address
105 rw_bo(struct brw_bo
*bo
, uint32_t offset
)
107 return (struct brw_address
) {
110 .reloc_flags
= RELOC_WRITE
,
114 static struct brw_address
115 ro_bo(struct brw_bo
*bo
, uint32_t offset
)
117 return (struct brw_address
) {
123 static struct brw_address
124 rw_32_bo(struct brw_bo
*bo
, uint32_t offset
)
126 return (struct brw_address
) {
129 .reloc_flags
= RELOC_WRITE
| RELOC_32BIT
,
133 static struct brw_address
134 ro_32_bo(struct brw_bo
*bo
, uint32_t offset
)
136 return (struct brw_address
) {
139 .reloc_flags
= RELOC_32BIT
,
143 UNUSED
static struct brw_address
144 ggtt_bo(struct brw_bo
*bo
, uint32_t offset
)
146 return (struct brw_address
) {
149 .reloc_flags
= RELOC_WRITE
| RELOC_NEEDS_GGTT
,
154 static struct brw_address
155 KSP(struct brw_context
*brw
, uint32_t offset
)
157 return ro_bo(brw
->cache
.bo
, offset
);
161 KSP(UNUSED
struct brw_context
*brw
, uint32_t offset
)
167 #include "genxml/genX_pack.h"
169 #define _brw_cmd_length(cmd) cmd ## _length
170 #define _brw_cmd_length_bias(cmd) cmd ## _length_bias
171 #define _brw_cmd_header(cmd) cmd ## _header
172 #define _brw_cmd_pack(cmd) cmd ## _pack
174 #define brw_batch_emit(brw, cmd, name) \
175 for (struct cmd name = { _brw_cmd_header(cmd) }, \
176 *_dst = emit_dwords(brw, _brw_cmd_length(cmd)); \
177 __builtin_expect(_dst != NULL, 1); \
178 _brw_cmd_pack(cmd)(brw, (void *)_dst, &name), \
181 #define brw_batch_emitn(brw, cmd, n, ...) ({ \
182 uint32_t *_dw = emit_dwords(brw, n); \
183 struct cmd template = { \
184 _brw_cmd_header(cmd), \
185 .DWordLength = n - _brw_cmd_length_bias(cmd), \
188 _brw_cmd_pack(cmd)(brw, _dw, &template); \
189 _dw + 1; /* Array starts at dw[1] */ \
192 #define brw_state_emit(brw, cmd, align, offset, name) \
193 for (struct cmd name = {}, \
194 *_dst = brw_state_batch(brw, _brw_cmd_length(cmd) * 4, \
196 __builtin_expect(_dst != NULL, 1); \
197 _brw_cmd_pack(cmd)(brw, (void *)_dst, &name), \
201 * Polygon stipple packet
204 genX(upload_polygon_stipple
)(struct brw_context
*brw
)
206 struct gl_context
*ctx
= &brw
->ctx
;
209 if (!ctx
->Polygon
.StippleFlag
)
212 brw_batch_emit(brw
, GENX(3DSTATE_POLY_STIPPLE_PATTERN
), poly
) {
213 /* Polygon stipple is provided in OpenGL order, i.e. bottom
214 * row first. If we're rendering to a window (i.e. the
215 * default frame buffer object, 0), then we need to invert
216 * it to match our pixel layout. But if we're rendering
217 * to a FBO (i.e. any named frame buffer object), we *don't*
218 * need to invert - we already match the layout.
220 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
221 for (unsigned i
= 0; i
< 32; i
++)
222 poly
.PatternRow
[i
] = ctx
->PolygonStipple
[31 - i
]; /* invert */
224 for (unsigned i
= 0; i
< 32; i
++)
225 poly
.PatternRow
[i
] = ctx
->PolygonStipple
[i
];
230 static const struct brw_tracked_state
genX(polygon_stipple
) = {
232 .mesa
= _NEW_POLYGON
|
234 .brw
= BRW_NEW_CONTEXT
,
236 .emit
= genX(upload_polygon_stipple
),
240 * Polygon stipple offset packet
243 genX(upload_polygon_stipple_offset
)(struct brw_context
*brw
)
245 struct gl_context
*ctx
= &brw
->ctx
;
248 if (!ctx
->Polygon
.StippleFlag
)
251 brw_batch_emit(brw
, GENX(3DSTATE_POLY_STIPPLE_OFFSET
), poly
) {
254 * If we're drawing to a system window we have to invert the Y axis
255 * in order to match the OpenGL pixel coordinate system, and our
256 * offset must be matched to the window position. If we're drawing
257 * to a user-created FBO then our native pixel coordinate system
258 * works just fine, and there's no window system to worry about.
260 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
261 poly
.PolygonStippleYOffset
=
262 (32 - (_mesa_geometric_height(ctx
->DrawBuffer
) & 31)) & 31;
267 static const struct brw_tracked_state
genX(polygon_stipple_offset
) = {
269 .mesa
= _NEW_BUFFERS
|
271 .brw
= BRW_NEW_CONTEXT
,
273 .emit
= genX(upload_polygon_stipple_offset
),
277 * Line stipple packet
280 genX(upload_line_stipple
)(struct brw_context
*brw
)
282 struct gl_context
*ctx
= &brw
->ctx
;
284 if (!ctx
->Line
.StippleFlag
)
287 brw_batch_emit(brw
, GENX(3DSTATE_LINE_STIPPLE
), line
) {
288 line
.LineStipplePattern
= ctx
->Line
.StipplePattern
;
290 line
.LineStippleInverseRepeatCount
= 1.0f
/ ctx
->Line
.StippleFactor
;
291 line
.LineStippleRepeatCount
= ctx
->Line
.StippleFactor
;
295 static const struct brw_tracked_state
genX(line_stipple
) = {
298 .brw
= BRW_NEW_CONTEXT
,
300 .emit
= genX(upload_line_stipple
),
303 /* Constant single cliprect for framebuffer object or DRI2 drawing */
305 genX(upload_drawing_rect
)(struct brw_context
*brw
)
307 struct gl_context
*ctx
= &brw
->ctx
;
308 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
309 const unsigned int fb_width
= _mesa_geometric_width(fb
);
310 const unsigned int fb_height
= _mesa_geometric_height(fb
);
312 brw_batch_emit(brw
, GENX(3DSTATE_DRAWING_RECTANGLE
), rect
) {
313 rect
.ClippedDrawingRectangleXMax
= fb_width
- 1;
314 rect
.ClippedDrawingRectangleYMax
= fb_height
- 1;
318 static const struct brw_tracked_state
genX(drawing_rect
) = {
320 .mesa
= _NEW_BUFFERS
,
321 .brw
= BRW_NEW_BLORP
|
324 .emit
= genX(upload_drawing_rect
),
328 genX(emit_vertex_buffer_state
)(struct brw_context
*brw
,
332 unsigned start_offset
,
333 MAYBE_UNUSED
unsigned end_offset
,
335 MAYBE_UNUSED
unsigned step_rate
)
337 struct GENX(VERTEX_BUFFER_STATE
) buf_state
= {
338 .VertexBufferIndex
= buffer_nr
,
339 .BufferPitch
= stride
,
341 /* The VF cache designers apparently cut corners, and made the cache
342 * only consider the bottom 32 bits of memory addresses. If you happen
343 * to have two vertex buffers which get placed exactly 4 GiB apart and
344 * use them in back-to-back draw calls, you can get collisions. To work
345 * around this problem, we restrict vertex buffers to the low 32 bits of
348 .BufferStartingAddress
= ro_32_bo(bo
, start_offset
),
350 .BufferSize
= end_offset
- start_offset
,
354 .AddressModifyEnable
= true,
358 .BufferAccessType
= step_rate
? INSTANCEDATA
: VERTEXDATA
,
359 .InstanceDataStepRate
= step_rate
,
361 .EndAddress
= ro_bo(bo
, end_offset
- 1),
366 .VertexBufferMOCS
= ICL_MOCS_WB
,
368 .VertexBufferMOCS
= CNL_MOCS_WB
,
370 .VertexBufferMOCS
= SKL_MOCS_WB
,
372 .VertexBufferMOCS
= BDW_MOCS_WB
,
374 .VertexBufferMOCS
= GEN7_MOCS_L3
,
378 GENX(VERTEX_BUFFER_STATE_pack
)(brw
, dw
, &buf_state
);
379 return dw
+ GENX(VERTEX_BUFFER_STATE_length
);
383 is_passthru_format(uint32_t format
)
386 case ISL_FORMAT_R64_PASSTHRU
:
387 case ISL_FORMAT_R64G64_PASSTHRU
:
388 case ISL_FORMAT_R64G64B64_PASSTHRU
:
389 case ISL_FORMAT_R64G64B64A64_PASSTHRU
:
397 uploads_needed(uint32_t format
,
400 if (!is_passthru_format(format
))
407 case ISL_FORMAT_R64_PASSTHRU
:
408 case ISL_FORMAT_R64G64_PASSTHRU
:
410 case ISL_FORMAT_R64G64B64_PASSTHRU
:
411 case ISL_FORMAT_R64G64B64A64_PASSTHRU
:
414 unreachable("not reached");
419 * Returns the format that we are finally going to use when upload a vertex
420 * element. It will only change if we are using *64*PASSTHRU formats, as for
421 * gen < 8 they need to be splitted on two *32*FLOAT formats.
423 * @upload points in which upload we are. Valid values are [0,1]
426 downsize_format_if_needed(uint32_t format
,
429 assert(upload
== 0 || upload
== 1);
431 if (!is_passthru_format(format
))
434 /* ISL_FORMAT_R64_PASSTHRU and ISL_FORMAT_R64G64_PASSTHRU with an upload ==
435 * 1 means that we have been forced to do 2 uploads for a size <= 2. This
436 * happens with gen < 8 and dvec3 or dvec4 vertex shader input
437 * variables. In those cases, we return ISL_FORMAT_R32_FLOAT as a way of
438 * flagging that we want to fill with zeroes this second forced upload.
441 case ISL_FORMAT_R64_PASSTHRU
:
442 return upload
== 0 ? ISL_FORMAT_R32G32_FLOAT
443 : ISL_FORMAT_R32_FLOAT
;
444 case ISL_FORMAT_R64G64_PASSTHRU
:
445 return upload
== 0 ? ISL_FORMAT_R32G32B32A32_FLOAT
446 : ISL_FORMAT_R32_FLOAT
;
447 case ISL_FORMAT_R64G64B64_PASSTHRU
:
448 return upload
== 0 ? ISL_FORMAT_R32G32B32A32_FLOAT
449 : ISL_FORMAT_R32G32_FLOAT
;
450 case ISL_FORMAT_R64G64B64A64_PASSTHRU
:
451 return ISL_FORMAT_R32G32B32A32_FLOAT
;
453 unreachable("not reached");
458 * Returns the number of componentes associated with a format that is used on
459 * a 64 to 32 format split. See downsize_format()
462 upload_format_size(uint32_t upload_format
)
464 switch (upload_format
) {
465 case ISL_FORMAT_R32_FLOAT
:
467 /* downsized_format has returned this one in order to flag that we are
468 * performing a second upload which we want to have filled with
469 * zeroes. This happens with gen < 8, a size <= 2, and dvec3 or dvec4
470 * vertex shader input variables.
474 case ISL_FORMAT_R32G32_FLOAT
:
476 case ISL_FORMAT_R32G32B32A32_FLOAT
:
479 unreachable("not reached");
484 genX(emit_vertices
)(struct brw_context
*brw
)
486 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
489 brw_prepare_vertices(brw
);
490 brw_prepare_shader_draw_parameters(brw
);
493 brw_emit_query_begin(brw
);
496 const struct brw_vs_prog_data
*vs_prog_data
=
497 brw_vs_prog_data(brw
->vs
.base
.prog_data
);
500 struct gl_context
*ctx
= &brw
->ctx
;
501 const bool uses_edge_flag
= (ctx
->Polygon
.FrontMode
!= GL_FILL
||
502 ctx
->Polygon
.BackMode
!= GL_FILL
);
504 if (vs_prog_data
->uses_vertexid
|| vs_prog_data
->uses_instanceid
) {
505 unsigned vue
= brw
->vb
.nr_enabled
;
507 /* The element for the edge flags must always be last, so we have to
508 * insert the SGVS before it in that case.
510 if (uses_edge_flag
) {
516 "Trying to insert VID/IID past 33rd vertex element, "
517 "need to reorder the vertex attrbutes.");
519 brw_batch_emit(brw
, GENX(3DSTATE_VF_SGVS
), vfs
) {
520 if (vs_prog_data
->uses_vertexid
) {
521 vfs
.VertexIDEnable
= true;
522 vfs
.VertexIDComponentNumber
= 2;
523 vfs
.VertexIDElementOffset
= vue
;
526 if (vs_prog_data
->uses_instanceid
) {
527 vfs
.InstanceIDEnable
= true;
528 vfs
.InstanceIDComponentNumber
= 3;
529 vfs
.InstanceIDElementOffset
= vue
;
533 brw_batch_emit(brw
, GENX(3DSTATE_VF_INSTANCING
), vfi
) {
534 vfi
.InstancingEnable
= true;
535 vfi
.VertexElementIndex
= vue
;
538 brw_batch_emit(brw
, GENX(3DSTATE_VF_SGVS
), vfs
);
542 const bool needs_sgvs_element
= (vs_prog_data
->uses_basevertex
||
543 vs_prog_data
->uses_baseinstance
||
544 vs_prog_data
->uses_instanceid
||
545 vs_prog_data
->uses_vertexid
);
547 unsigned nr_elements
=
548 brw
->vb
.nr_enabled
+ needs_sgvs_element
+ vs_prog_data
->uses_drawid
;
551 /* If any of the formats of vb.enabled needs more that one upload, we need
552 * to add it to nr_elements
554 for (unsigned i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
555 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
556 const struct gl_vertex_array
*glarray
= input
->glarray
;
557 const struct gl_array_attributes
*glattrib
= glarray
->VertexAttrib
;
558 uint32_t format
= brw_get_vertex_surface_type(brw
, glattrib
);
560 if (uploads_needed(format
, input
->is_dual_slot
) > 1)
565 /* If the VS doesn't read any inputs (calculating vertex position from
566 * a state variable for some reason, for example), emit a single pad
567 * VERTEX_ELEMENT struct and bail.
569 * The stale VB state stays in place, but they don't do anything unless
570 * a VE loads from them.
572 if (nr_elements
== 0) {
573 dw
= brw_batch_emitn(brw
, GENX(3DSTATE_VERTEX_ELEMENTS
),
574 1 + GENX(VERTEX_ELEMENT_STATE_length
));
575 struct GENX(VERTEX_ELEMENT_STATE
) elem
= {
577 .SourceElementFormat
= ISL_FORMAT_R32G32B32A32_FLOAT
,
578 .Component0Control
= VFCOMP_STORE_0
,
579 .Component1Control
= VFCOMP_STORE_0
,
580 .Component2Control
= VFCOMP_STORE_0
,
581 .Component3Control
= VFCOMP_STORE_1_FP
,
583 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem
);
587 /* Now emit 3DSTATE_VERTEX_BUFFERS and 3DSTATE_VERTEX_ELEMENTS packets. */
588 const bool uses_draw_params
=
589 vs_prog_data
->uses_basevertex
||
590 vs_prog_data
->uses_baseinstance
;
591 const unsigned nr_buffers
= brw
->vb
.nr_buffers
+
592 uses_draw_params
+ vs_prog_data
->uses_drawid
;
595 assert(nr_buffers
<= (GEN_GEN
>= 6 ? 33 : 17));
597 dw
= brw_batch_emitn(brw
, GENX(3DSTATE_VERTEX_BUFFERS
),
598 1 + GENX(VERTEX_BUFFER_STATE_length
) * nr_buffers
);
600 for (unsigned i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
601 const struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[i
];
602 /* Prior to Haswell and Bay Trail we have to use 4-component formats
603 * to fake 3-component ones. In particular, we do this for
604 * half-float and 8 and 16-bit integer formats. This means that the
605 * vertex element may poke over the end of the buffer by 2 bytes.
607 const unsigned padding
=
608 (GEN_GEN
<= 7 && !GEN_IS_HASWELL
&& !devinfo
->is_baytrail
) * 2;
609 const unsigned end
= buffer
->offset
+ buffer
->size
+ padding
;
610 dw
= genX(emit_vertex_buffer_state
)(brw
, dw
, i
, buffer
->bo
,
617 if (uses_draw_params
) {
618 dw
= genX(emit_vertex_buffer_state
)(brw
, dw
, brw
->vb
.nr_buffers
,
619 brw
->draw
.draw_params_bo
,
620 brw
->draw
.draw_params_offset
,
621 brw
->draw
.draw_params_bo
->size
,
626 if (vs_prog_data
->uses_drawid
) {
627 dw
= genX(emit_vertex_buffer_state
)(brw
, dw
, brw
->vb
.nr_buffers
+ 1,
628 brw
->draw
.draw_id_bo
,
629 brw
->draw
.draw_id_offset
,
630 brw
->draw
.draw_id_bo
->size
,
636 /* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS,
637 * presumably for VertexID/InstanceID.
640 assert(nr_elements
<= 34);
641 const struct brw_vertex_element
*gen6_edgeflag_input
= NULL
;
643 assert(nr_elements
<= 18);
646 dw
= brw_batch_emitn(brw
, GENX(3DSTATE_VERTEX_ELEMENTS
),
647 1 + GENX(VERTEX_ELEMENT_STATE_length
) * nr_elements
);
649 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
650 const struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
651 const struct gl_vertex_array
*glarray
= input
->glarray
;
652 const struct gl_array_attributes
*glattrib
= glarray
->VertexAttrib
;
653 uint32_t format
= brw_get_vertex_surface_type(brw
, glattrib
);
654 uint32_t comp0
= VFCOMP_STORE_SRC
;
655 uint32_t comp1
= VFCOMP_STORE_SRC
;
656 uint32_t comp2
= VFCOMP_STORE_SRC
;
657 uint32_t comp3
= VFCOMP_STORE_SRC
;
658 const unsigned num_uploads
= GEN_GEN
< 8 ?
659 uploads_needed(format
, input
->is_dual_slot
) : 1;
662 /* From the BDW PRM, Volume 2d, page 588 (VERTEX_ELEMENT_STATE):
663 * "Any SourceElementFormat of *64*_PASSTHRU cannot be used with an
664 * element which has edge flag enabled."
666 assert(!(is_passthru_format(format
) && uses_edge_flag
));
669 /* The gen4 driver expects edgeflag to come in as a float, and passes
670 * that float on to the tests in the clipper. Mesa's current vertex
671 * attribute value for EdgeFlag is stored as a float, which works out.
672 * glEdgeFlagPointer, on the other hand, gives us an unnormalized
673 * integer ubyte. Just rewrite that to convert to a float.
675 * Gen6+ passes edgeflag as sideband along with the vertex, instead
676 * of in the VUE. We have to upload it sideband as the last vertex
677 * element according to the B-Spec.
680 if (input
== &brw
->vb
.inputs
[VERT_ATTRIB_EDGEFLAG
]) {
681 gen6_edgeflag_input
= input
;
686 for (unsigned c
= 0; c
< num_uploads
; c
++) {
687 const uint32_t upload_format
= GEN_GEN
>= 8 ? format
:
688 downsize_format_if_needed(format
, c
);
689 /* If we need more that one upload, the offset stride would be 128
690 * bits (16 bytes), as for previous uploads we are using the full
692 const unsigned offset
= input
->offset
+ c
* 16;
694 const struct gl_vertex_array
*glarray
= input
->glarray
;
695 const struct gl_array_attributes
*glattrib
= glarray
->VertexAttrib
;
696 const int size
= (GEN_GEN
< 8 && is_passthru_format(format
)) ?
697 upload_format_size(upload_format
) : glattrib
->Size
;
700 case 0: comp0
= VFCOMP_STORE_0
;
701 case 1: comp1
= VFCOMP_STORE_0
;
702 case 2: comp2
= VFCOMP_STORE_0
;
704 if (GEN_GEN
>= 8 && glattrib
->Doubles
) {
705 comp3
= VFCOMP_STORE_0
;
706 } else if (glattrib
->Integer
) {
707 comp3
= VFCOMP_STORE_1_INT
;
709 comp3
= VFCOMP_STORE_1_FP
;
716 /* From the BDW PRM, Volume 2d, page 586 (VERTEX_ELEMENT_STATE):
718 * "When SourceElementFormat is set to one of the *64*_PASSTHRU
719 * formats, 64-bit components are stored in the URB without any
720 * conversion. In this case, vertex elements must be written as 128
721 * or 256 bits, with VFCOMP_STORE_0 being used to pad the output as
722 * required. E.g., if R64_PASSTHRU is used to copy a 64-bit Red
723 * component into the URB, Component 1 must be specified as
724 * VFCOMP_STORE_0 (with Components 2,3 set to VFCOMP_NOSTORE) in
725 * order to output a 128-bit vertex element, or Components 1-3 must
726 * be specified as VFCOMP_STORE_0 in order to output a 256-bit vertex
727 * element. Likewise, use of R64G64B64_PASSTHRU requires Component 3
728 * to be specified as VFCOMP_STORE_0 in order to output a 256-bit
731 if (glattrib
->Doubles
&& !input
->is_dual_slot
) {
732 /* Store vertex elements which correspond to double and dvec2 vertex
733 * shader inputs as 128-bit vertex elements, instead of 256-bits.
735 comp2
= VFCOMP_NOSTORE
;
736 comp3
= VFCOMP_NOSTORE
;
740 struct GENX(VERTEX_ELEMENT_STATE
) elem_state
= {
741 .VertexBufferIndex
= input
->buffer
,
743 .SourceElementFormat
= upload_format
,
744 .SourceElementOffset
= offset
,
745 .Component0Control
= comp0
,
746 .Component1Control
= comp1
,
747 .Component2Control
= comp2
,
748 .Component3Control
= comp3
,
750 .DestinationElementOffset
= i
* 4,
754 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem_state
);
755 dw
+= GENX(VERTEX_ELEMENT_STATE_length
);
759 if (needs_sgvs_element
) {
760 struct GENX(VERTEX_ELEMENT_STATE
) elem_state
= {
762 .Component0Control
= VFCOMP_STORE_0
,
763 .Component1Control
= VFCOMP_STORE_0
,
764 .Component2Control
= VFCOMP_STORE_0
,
765 .Component3Control
= VFCOMP_STORE_0
,
767 .DestinationElementOffset
= i
* 4,
772 if (vs_prog_data
->uses_basevertex
||
773 vs_prog_data
->uses_baseinstance
) {
774 elem_state
.VertexBufferIndex
= brw
->vb
.nr_buffers
;
775 elem_state
.SourceElementFormat
= ISL_FORMAT_R32G32_UINT
;
776 elem_state
.Component0Control
= VFCOMP_STORE_SRC
;
777 elem_state
.Component1Control
= VFCOMP_STORE_SRC
;
780 elem_state
.VertexBufferIndex
= brw
->vb
.nr_buffers
;
781 elem_state
.SourceElementFormat
= ISL_FORMAT_R32G32_UINT
;
782 if (vs_prog_data
->uses_basevertex
)
783 elem_state
.Component0Control
= VFCOMP_STORE_SRC
;
785 if (vs_prog_data
->uses_baseinstance
)
786 elem_state
.Component1Control
= VFCOMP_STORE_SRC
;
788 if (vs_prog_data
->uses_vertexid
)
789 elem_state
.Component2Control
= VFCOMP_STORE_VID
;
791 if (vs_prog_data
->uses_instanceid
)
792 elem_state
.Component3Control
= VFCOMP_STORE_IID
;
795 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem_state
);
796 dw
+= GENX(VERTEX_ELEMENT_STATE_length
);
799 if (vs_prog_data
->uses_drawid
) {
800 struct GENX(VERTEX_ELEMENT_STATE
) elem_state
= {
802 .VertexBufferIndex
= brw
->vb
.nr_buffers
+ 1,
803 .SourceElementFormat
= ISL_FORMAT_R32_UINT
,
804 .Component0Control
= VFCOMP_STORE_SRC
,
805 .Component1Control
= VFCOMP_STORE_0
,
806 .Component2Control
= VFCOMP_STORE_0
,
807 .Component3Control
= VFCOMP_STORE_0
,
809 .DestinationElementOffset
= i
* 4,
813 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem_state
);
814 dw
+= GENX(VERTEX_ELEMENT_STATE_length
);
818 if (gen6_edgeflag_input
) {
819 const struct gl_vertex_array
*glarray
= gen6_edgeflag_input
->glarray
;
820 const struct gl_array_attributes
*glattrib
= glarray
->VertexAttrib
;
821 const uint32_t format
= brw_get_vertex_surface_type(brw
, glattrib
);
823 struct GENX(VERTEX_ELEMENT_STATE
) elem_state
= {
825 .VertexBufferIndex
= gen6_edgeflag_input
->buffer
,
826 .EdgeFlagEnable
= true,
827 .SourceElementFormat
= format
,
828 .SourceElementOffset
= gen6_edgeflag_input
->offset
,
829 .Component0Control
= VFCOMP_STORE_SRC
,
830 .Component1Control
= VFCOMP_STORE_0
,
831 .Component2Control
= VFCOMP_STORE_0
,
832 .Component3Control
= VFCOMP_STORE_0
,
835 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem_state
);
836 dw
+= GENX(VERTEX_ELEMENT_STATE_length
);
841 for (unsigned i
= 0, j
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
842 const struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
843 const struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[input
->buffer
];
844 unsigned element_index
;
846 /* The edge flag element is reordered to be the last one in the code
847 * above so we need to compensate for that in the element indices used
850 if (input
== gen6_edgeflag_input
)
851 element_index
= nr_elements
- 1;
855 brw_batch_emit(brw
, GENX(3DSTATE_VF_INSTANCING
), vfi
) {
856 vfi
.VertexElementIndex
= element_index
;
857 vfi
.InstancingEnable
= buffer
->step_rate
!= 0;
858 vfi
.InstanceDataStepRate
= buffer
->step_rate
;
862 if (vs_prog_data
->uses_drawid
) {
863 const unsigned element
= brw
->vb
.nr_enabled
+ needs_sgvs_element
;
865 brw_batch_emit(brw
, GENX(3DSTATE_VF_INSTANCING
), vfi
) {
866 vfi
.VertexElementIndex
= element
;
872 static const struct brw_tracked_state
genX(vertices
) = {
874 .mesa
= _NEW_POLYGON
,
875 .brw
= BRW_NEW_BATCH
|
878 BRW_NEW_VS_PROG_DATA
,
880 .emit
= genX(emit_vertices
),
884 genX(emit_index_buffer
)(struct brw_context
*brw
)
886 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
888 if (index_buffer
== NULL
)
891 brw_batch_emit(brw
, GENX(3DSTATE_INDEX_BUFFER
), ib
) {
892 #if GEN_GEN < 8 && !GEN_IS_HASWELL
893 ib
.CutIndexEnable
= brw
->prim_restart
.enable_cut_index
;
895 ib
.IndexFormat
= brw_get_index_type(index_buffer
->index_size
);
897 /* The VF cache designers apparently cut corners, and made the cache
898 * only consider the bottom 32 bits of memory addresses. If you happen
899 * to have two index buffers which get placed exactly 4 GiB apart and
900 * use them in back-to-back draw calls, you can get collisions. To work
901 * around this problem, we restrict index buffers to the low 32 bits of
904 ib
.BufferStartingAddress
= ro_32_bo(brw
->ib
.bo
, 0);
906 ib
.IndexBufferMOCS
= GEN_GEN
>= 9 ? SKL_MOCS_WB
: BDW_MOCS_WB
;
907 ib
.BufferSize
= brw
->ib
.size
;
909 ib
.BufferEndingAddress
= ro_bo(brw
->ib
.bo
, brw
->ib
.size
- 1);
914 static const struct brw_tracked_state
genX(index_buffer
) = {
917 .brw
= BRW_NEW_BATCH
|
919 BRW_NEW_INDEX_BUFFER
,
921 .emit
= genX(emit_index_buffer
),
924 #if GEN_IS_HASWELL || GEN_GEN >= 8
926 genX(upload_cut_index
)(struct brw_context
*brw
)
928 const struct gl_context
*ctx
= &brw
->ctx
;
930 brw_batch_emit(brw
, GENX(3DSTATE_VF
), vf
) {
931 if (ctx
->Array
._PrimitiveRestart
&& brw
->ib
.ib
) {
932 vf
.IndexedDrawCutIndexEnable
= true;
933 vf
.CutIndex
= _mesa_primitive_restart_index(ctx
, brw
->ib
.index_size
);
938 const struct brw_tracked_state
genX(cut_index
) = {
940 .mesa
= _NEW_TRANSFORM
,
941 .brw
= BRW_NEW_INDEX_BUFFER
,
943 .emit
= genX(upload_cut_index
),
949 * Determine the appropriate attribute override value to store into the
950 * 3DSTATE_SF structure for a given fragment shader attribute. The attribute
951 * override value contains two pieces of information: the location of the
952 * attribute in the VUE (relative to urb_entry_read_offset, see below), and a
953 * flag indicating whether to "swizzle" the attribute based on the direction
954 * the triangle is facing.
956 * If an attribute is "swizzled", then the given VUE location is used for
957 * front-facing triangles, and the VUE location that immediately follows is
958 * used for back-facing triangles. We use this to implement the mapping from
959 * gl_FrontColor/gl_BackColor to gl_Color.
961 * urb_entry_read_offset is the offset into the VUE at which the SF unit is
962 * being instructed to begin reading attribute data. It can be set to a
963 * nonzero value to prevent the SF unit from wasting time reading elements of
964 * the VUE that are not needed by the fragment shader. It is measured in
965 * 256-bit increments.
968 genX(get_attr_override
)(struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) *attr
,
969 const struct brw_vue_map
*vue_map
,
970 int urb_entry_read_offset
, int fs_attr
,
971 bool two_side_color
, uint32_t *max_source_attr
)
973 /* Find the VUE slot for this attribute. */
974 int slot
= vue_map
->varying_to_slot
[fs_attr
];
976 /* Viewport and Layer are stored in the VUE header. We need to override
977 * them to zero if earlier stages didn't write them, as GL requires that
978 * they read back as zero when not explicitly set.
980 if (fs_attr
== VARYING_SLOT_VIEWPORT
|| fs_attr
== VARYING_SLOT_LAYER
) {
981 attr
->ComponentOverrideX
= true;
982 attr
->ComponentOverrideW
= true;
983 attr
->ConstantSource
= CONST_0000
;
985 if (!(vue_map
->slots_valid
& VARYING_BIT_LAYER
))
986 attr
->ComponentOverrideY
= true;
987 if (!(vue_map
->slots_valid
& VARYING_BIT_VIEWPORT
))
988 attr
->ComponentOverrideZ
= true;
993 /* If there was only a back color written but not front, use back
994 * as the color instead of undefined
996 if (slot
== -1 && fs_attr
== VARYING_SLOT_COL0
)
997 slot
= vue_map
->varying_to_slot
[VARYING_SLOT_BFC0
];
998 if (slot
== -1 && fs_attr
== VARYING_SLOT_COL1
)
999 slot
= vue_map
->varying_to_slot
[VARYING_SLOT_BFC1
];
1002 /* This attribute does not exist in the VUE--that means that the vertex
1003 * shader did not write to it. This means that either:
1005 * (a) This attribute is a texture coordinate, and it is going to be
1006 * replaced with point coordinates (as a consequence of a call to
1007 * glTexEnvi(GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE)), so the
1008 * hardware will ignore whatever attribute override we supply.
1010 * (b) This attribute is read by the fragment shader but not written by
1011 * the vertex shader, so its value is undefined. Therefore the
1012 * attribute override we supply doesn't matter.
1014 * (c) This attribute is gl_PrimitiveID, and it wasn't written by the
1015 * previous shader stage.
1017 * Note that we don't have to worry about the cases where the attribute
1018 * is gl_PointCoord or is undergoing point sprite coordinate
1019 * replacement, because in those cases, this function isn't called.
1021 * In case (c), we need to program the attribute overrides so that the
1022 * primitive ID will be stored in this slot. In every other case, the
1023 * attribute override we supply doesn't matter. So just go ahead and
1024 * program primitive ID in every case.
1026 attr
->ComponentOverrideW
= true;
1027 attr
->ComponentOverrideX
= true;
1028 attr
->ComponentOverrideY
= true;
1029 attr
->ComponentOverrideZ
= true;
1030 attr
->ConstantSource
= PRIM_ID
;
1034 /* Compute the location of the attribute relative to urb_entry_read_offset.
1035 * Each increment of urb_entry_read_offset represents a 256-bit value, so
1036 * it counts for two 128-bit VUE slots.
1038 int source_attr
= slot
- 2 * urb_entry_read_offset
;
1039 assert(source_attr
>= 0 && source_attr
< 32);
1041 /* If we are doing two-sided color, and the VUE slot following this one
1042 * represents a back-facing color, then we need to instruct the SF unit to
1043 * do back-facing swizzling.
1045 bool swizzling
= two_side_color
&&
1046 ((vue_map
->slot_to_varying
[slot
] == VARYING_SLOT_COL0
&&
1047 vue_map
->slot_to_varying
[slot
+1] == VARYING_SLOT_BFC0
) ||
1048 (vue_map
->slot_to_varying
[slot
] == VARYING_SLOT_COL1
&&
1049 vue_map
->slot_to_varying
[slot
+1] == VARYING_SLOT_BFC1
));
1051 /* Update max_source_attr. If swizzling, the SF will read this slot + 1. */
1052 if (*max_source_attr
< source_attr
+ swizzling
)
1053 *max_source_attr
= source_attr
+ swizzling
;
1055 attr
->SourceAttribute
= source_attr
;
1057 attr
->SwizzleSelect
= INPUTATTR_FACING
;
1062 genX(calculate_attr_overrides
)(const struct brw_context
*brw
,
1063 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) *attr_overrides
,
1064 uint32_t *point_sprite_enables
,
1065 uint32_t *urb_entry_read_length
,
1066 uint32_t *urb_entry_read_offset
)
1068 const struct gl_context
*ctx
= &brw
->ctx
;
1071 const struct gl_point_attrib
*point
= &ctx
->Point
;
1073 /* BRW_NEW_FRAGMENT_PROGRAM */
1074 const struct gl_program
*fp
= brw
->programs
[MESA_SHADER_FRAGMENT
];
1076 /* BRW_NEW_FS_PROG_DATA */
1077 const struct brw_wm_prog_data
*wm_prog_data
=
1078 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1079 uint32_t max_source_attr
= 0;
1081 *point_sprite_enables
= 0;
1084 brw_compute_first_urb_slot_required(fp
->info
.inputs_read
,
1085 &brw
->vue_map_geom_out
);
1087 /* Each URB offset packs two varying slots */
1088 assert(first_slot
% 2 == 0);
1089 *urb_entry_read_offset
= first_slot
/ 2;
1091 /* From the Ivybridge PRM, Vol 2 Part 1, 3DSTATE_SBE,
1092 * description of dw10 Point Sprite Texture Coordinate Enable:
1094 * "This field must be programmed to zero when non-point primitives
1097 * The SandyBridge PRM doesn't explicitly say that point sprite enables
1098 * must be programmed to zero when rendering non-point primitives, but
1099 * the IvyBridge PRM does, and if we don't, we get garbage.
1101 * This is not required on Haswell, as the hardware ignores this state
1102 * when drawing non-points -- although we do still need to be careful to
1103 * correctly set the attr overrides.
1106 * BRW_NEW_PRIMITIVE | BRW_NEW_GS_PROG_DATA | BRW_NEW_TES_PROG_DATA
1108 bool drawing_points
= brw_is_drawing_points(brw
);
1110 for (int attr
= 0; attr
< VARYING_SLOT_MAX
; attr
++) {
1111 int input_index
= wm_prog_data
->urb_setup
[attr
];
1113 if (input_index
< 0)
1117 bool point_sprite
= false;
1118 if (drawing_points
) {
1119 if (point
->PointSprite
&&
1120 (attr
>= VARYING_SLOT_TEX0
&& attr
<= VARYING_SLOT_TEX7
) &&
1121 (point
->CoordReplace
& (1u << (attr
- VARYING_SLOT_TEX0
)))) {
1122 point_sprite
= true;
1125 if (attr
== VARYING_SLOT_PNTC
)
1126 point_sprite
= true;
1129 *point_sprite_enables
|= (1 << input_index
);
1132 /* BRW_NEW_VUE_MAP_GEOM_OUT | _NEW_LIGHT | _NEW_PROGRAM */
1133 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) attribute
= { 0 };
1135 if (!point_sprite
) {
1136 genX(get_attr_override
)(&attribute
,
1137 &brw
->vue_map_geom_out
,
1138 *urb_entry_read_offset
, attr
,
1139 _mesa_vertex_program_two_side_enabled(ctx
),
1143 /* The hardware can only do the overrides on 16 overrides at a
1144 * time, and the other up to 16 have to be lined up so that the
1145 * input index = the output index. We'll need to do some
1146 * tweaking to make sure that's the case.
1148 if (input_index
< 16)
1149 attr_overrides
[input_index
] = attribute
;
1151 assert(attribute
.SourceAttribute
== input_index
);
1154 /* From the Sandy Bridge PRM, Volume 2, Part 1, documentation for
1155 * 3DSTATE_SF DWord 1 bits 15:11, "Vertex URB Entry Read Length":
1157 * "This field should be set to the minimum length required to read the
1158 * maximum source attribute. The maximum source attribute is indicated
1159 * by the maximum value of the enabled Attribute # Source Attribute if
1160 * Attribute Swizzle Enable is set, Number of Output Attributes-1 if
1161 * enable is not set.
1162 * read_length = ceiling((max_source_attr + 1) / 2)
1164 * [errata] Corruption/Hang possible if length programmed larger than
1167 * Similar text exists for Ivy Bridge.
1169 *urb_entry_read_length
= DIV_ROUND_UP(max_source_attr
+ 1, 2);
1173 /* ---------------------------------------------------------------------- */
1176 typedef struct GENX(3DSTATE_WM_DEPTH_STENCIL
) DEPTH_STENCIL_GENXML
;
1178 typedef struct GENX(DEPTH_STENCIL_STATE
) DEPTH_STENCIL_GENXML
;
1180 typedef struct GENX(COLOR_CALC_STATE
) DEPTH_STENCIL_GENXML
;
1184 set_depth_stencil_bits(struct brw_context
*brw
, DEPTH_STENCIL_GENXML
*ds
)
1186 struct gl_context
*ctx
= &brw
->ctx
;
1189 struct intel_renderbuffer
*depth_irb
=
1190 intel_get_renderbuffer(ctx
->DrawBuffer
, BUFFER_DEPTH
);
1193 struct gl_depthbuffer_attrib
*depth
= &ctx
->Depth
;
1196 struct gl_stencil_attrib
*stencil
= &ctx
->Stencil
;
1197 const int b
= stencil
->_BackFace
;
1199 if (depth
->Test
&& depth_irb
) {
1200 ds
->DepthTestEnable
= true;
1201 ds
->DepthBufferWriteEnable
= brw_depth_writes_enabled(brw
);
1202 ds
->DepthTestFunction
= intel_translate_compare_func(depth
->Func
);
1205 if (brw
->stencil_enabled
) {
1206 ds
->StencilTestEnable
= true;
1207 ds
->StencilWriteMask
= stencil
->WriteMask
[0] & 0xff;
1208 ds
->StencilTestMask
= stencil
->ValueMask
[0] & 0xff;
1210 ds
->StencilTestFunction
=
1211 intel_translate_compare_func(stencil
->Function
[0]);
1213 intel_translate_stencil_op(stencil
->FailFunc
[0]);
1214 ds
->StencilPassDepthPassOp
=
1215 intel_translate_stencil_op(stencil
->ZPassFunc
[0]);
1216 ds
->StencilPassDepthFailOp
=
1217 intel_translate_stencil_op(stencil
->ZFailFunc
[0]);
1219 ds
->StencilBufferWriteEnable
= brw
->stencil_write_enabled
;
1221 if (brw
->stencil_two_sided
) {
1222 ds
->DoubleSidedStencilEnable
= true;
1223 ds
->BackfaceStencilWriteMask
= stencil
->WriteMask
[b
] & 0xff;
1224 ds
->BackfaceStencilTestMask
= stencil
->ValueMask
[b
] & 0xff;
1226 ds
->BackfaceStencilTestFunction
=
1227 intel_translate_compare_func(stencil
->Function
[b
]);
1228 ds
->BackfaceStencilFailOp
=
1229 intel_translate_stencil_op(stencil
->FailFunc
[b
]);
1230 ds
->BackfaceStencilPassDepthPassOp
=
1231 intel_translate_stencil_op(stencil
->ZPassFunc
[b
]);
1232 ds
->BackfaceStencilPassDepthFailOp
=
1233 intel_translate_stencil_op(stencil
->ZFailFunc
[b
]);
1236 #if GEN_GEN <= 5 || GEN_GEN >= 9
1237 ds
->StencilReferenceValue
= _mesa_get_stencil_ref(ctx
, 0);
1238 ds
->BackfaceStencilReferenceValue
= _mesa_get_stencil_ref(ctx
, b
);
1245 genX(upload_depth_stencil_state
)(struct brw_context
*brw
)
1248 brw_batch_emit(brw
, GENX(3DSTATE_WM_DEPTH_STENCIL
), wmds
) {
1249 set_depth_stencil_bits(brw
, &wmds
);
1253 brw_state_emit(brw
, GENX(DEPTH_STENCIL_STATE
), 64, &ds_offset
, ds
) {
1254 set_depth_stencil_bits(brw
, &ds
);
1257 /* Now upload a pointer to the indirect state */
1259 brw_batch_emit(brw
, GENX(3DSTATE_CC_STATE_POINTERS
), ptr
) {
1260 ptr
.PointertoDEPTH_STENCIL_STATE
= ds_offset
;
1261 ptr
.DEPTH_STENCIL_STATEChange
= true;
1264 brw_batch_emit(brw
, GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS
), ptr
) {
1265 ptr
.PointertoDEPTH_STENCIL_STATE
= ds_offset
;
1271 static const struct brw_tracked_state
genX(depth_stencil_state
) = {
1273 .mesa
= _NEW_BUFFERS
|
1276 .brw
= BRW_NEW_BLORP
|
1277 (GEN_GEN
>= 8 ? BRW_NEW_CONTEXT
1279 BRW_NEW_STATE_BASE_ADDRESS
),
1281 .emit
= genX(upload_depth_stencil_state
),
1285 /* ---------------------------------------------------------------------- */
1290 genX(upload_clip_state
)(struct brw_context
*brw
)
1292 struct gl_context
*ctx
= &brw
->ctx
;
1294 ctx
->NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
1295 brw_state_emit(brw
, GENX(CLIP_STATE
), 32, &brw
->clip
.state_offset
, clip
) {
1296 clip
.KernelStartPointer
= KSP(brw
, brw
->clip
.prog_offset
);
1297 clip
.GRFRegisterCount
=
1298 DIV_ROUND_UP(brw
->clip
.prog_data
->total_grf
, 16) - 1;
1299 clip
.FloatingPointMode
= FLOATING_POINT_MODE_Alternate
;
1300 clip
.SingleProgramFlow
= true;
1301 clip
.VertexURBEntryReadLength
= brw
->clip
.prog_data
->urb_read_length
;
1302 clip
.ConstantURBEntryReadLength
= brw
->clip
.prog_data
->curb_read_length
;
1304 /* BRW_NEW_PUSH_CONSTANT_ALLOCATION */
1305 clip
.ConstantURBEntryReadOffset
= brw
->curbe
.clip_start
* 2;
1306 clip
.DispatchGRFStartRegisterForURBData
= 1;
1307 clip
.VertexURBEntryReadOffset
= 0;
1309 /* BRW_NEW_URB_FENCE */
1310 clip
.NumberofURBEntries
= brw
->urb
.nr_clip_entries
;
1311 clip
.URBEntryAllocationSize
= brw
->urb
.vsize
- 1;
1313 if (brw
->urb
.nr_clip_entries
>= 10) {
1314 /* Half of the URB entries go to each thread, and it has to be an
1317 assert(brw
->urb
.nr_clip_entries
% 2 == 0);
1319 /* Although up to 16 concurrent Clip threads are allowed on Ironlake,
1320 * only 2 threads can output VUEs at a time.
1322 clip
.MaximumNumberofThreads
= (GEN_GEN
== 5 ? 16 : 2) - 1;
1324 assert(brw
->urb
.nr_clip_entries
>= 5);
1325 clip
.MaximumNumberofThreads
= 1 - 1;
1328 clip
.VertexPositionSpace
= VPOS_NDCSPACE
;
1329 clip
.UserClipFlagsMustClipEnable
= true;
1330 clip
.GuardbandClipTestEnable
= true;
1332 clip
.ClipperViewportStatePointer
=
1333 ro_bo(brw
->batch
.state
.bo
, brw
->clip
.vp_offset
);
1335 clip
.ScreenSpaceViewportXMin
= -1;
1336 clip
.ScreenSpaceViewportXMax
= 1;
1337 clip
.ScreenSpaceViewportYMin
= -1;
1338 clip
.ScreenSpaceViewportYMax
= 1;
1340 clip
.ViewportXYClipTestEnable
= true;
1341 clip
.ViewportZClipTestEnable
= !ctx
->Transform
.DepthClamp
;
1343 /* _NEW_TRANSFORM */
1344 if (GEN_GEN
== 5 || GEN_IS_G4X
) {
1345 clip
.UserClipDistanceClipTestEnableBitmask
=
1346 ctx
->Transform
.ClipPlanesEnabled
;
1348 /* Up to 6 actual clip flags, plus the 7th for the negative RHW
1351 clip
.UserClipDistanceClipTestEnableBitmask
=
1352 (ctx
->Transform
.ClipPlanesEnabled
& 0x3f) | 0x40;
1355 if (ctx
->Transform
.ClipDepthMode
== GL_ZERO_TO_ONE
)
1356 clip
.APIMode
= APIMODE_D3D
;
1358 clip
.APIMode
= APIMODE_OGL
;
1360 clip
.GuardbandClipTestEnable
= true;
1362 clip
.ClipMode
= brw
->clip
.prog_data
->clip_mode
;
1365 clip
.NegativeWClipTestEnable
= true;
1370 const struct brw_tracked_state
genX(clip_state
) = {
1372 .mesa
= _NEW_TRANSFORM
|
1374 .brw
= BRW_NEW_BATCH
|
1376 BRW_NEW_CLIP_PROG_DATA
|
1377 BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
1378 BRW_NEW_PROGRAM_CACHE
|
1381 .emit
= genX(upload_clip_state
),
1387 genX(upload_clip_state
)(struct brw_context
*brw
)
1389 struct gl_context
*ctx
= &brw
->ctx
;
1392 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1394 /* BRW_NEW_FS_PROG_DATA */
1395 struct brw_wm_prog_data
*wm_prog_data
=
1396 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1398 brw_batch_emit(brw
, GENX(3DSTATE_CLIP
), clip
) {
1399 clip
.StatisticsEnable
= !brw
->meta_in_progress
;
1401 if (wm_prog_data
->barycentric_interp_modes
&
1402 BRW_BARYCENTRIC_NONPERSPECTIVE_BITS
)
1403 clip
.NonPerspectiveBarycentricEnable
= true;
1406 clip
.EarlyCullEnable
= true;
1410 clip
.FrontWinding
= brw
->polygon_front_bit
== _mesa_is_user_fbo(fb
);
1412 if (ctx
->Polygon
.CullFlag
) {
1413 switch (ctx
->Polygon
.CullFaceMode
) {
1415 clip
.CullMode
= CULLMODE_FRONT
;
1418 clip
.CullMode
= CULLMODE_BACK
;
1420 case GL_FRONT_AND_BACK
:
1421 clip
.CullMode
= CULLMODE_BOTH
;
1424 unreachable("Should not get here: invalid CullFlag");
1427 clip
.CullMode
= CULLMODE_NONE
;
1432 clip
.UserClipDistanceCullTestEnableBitmask
=
1433 brw_vue_prog_data(brw
->vs
.base
.prog_data
)->cull_distance_mask
;
1435 clip
.ViewportZClipTestEnable
= !ctx
->Transform
.DepthClamp
;
1439 if (ctx
->Light
.ProvokingVertex
== GL_FIRST_VERTEX_CONVENTION
) {
1440 clip
.TriangleStripListProvokingVertexSelect
= 0;
1441 clip
.TriangleFanProvokingVertexSelect
= 1;
1442 clip
.LineStripListProvokingVertexSelect
= 0;
1444 clip
.TriangleStripListProvokingVertexSelect
= 2;
1445 clip
.TriangleFanProvokingVertexSelect
= 2;
1446 clip
.LineStripListProvokingVertexSelect
= 1;
1449 /* _NEW_TRANSFORM */
1450 clip
.UserClipDistanceClipTestEnableBitmask
=
1451 ctx
->Transform
.ClipPlanesEnabled
;
1454 clip
.ForceUserClipDistanceClipTestEnableBitmask
= true;
1457 if (ctx
->Transform
.ClipDepthMode
== GL_ZERO_TO_ONE
)
1458 clip
.APIMode
= APIMODE_D3D
;
1460 clip
.APIMode
= APIMODE_OGL
;
1462 clip
.GuardbandClipTestEnable
= true;
1464 /* BRW_NEW_VIEWPORT_COUNT */
1465 const unsigned viewport_count
= brw
->clip
.viewport_count
;
1467 if (ctx
->RasterDiscard
) {
1468 clip
.ClipMode
= CLIPMODE_REJECT_ALL
;
1470 perf_debug("Rasterizer discard is currently implemented via the "
1471 "clipper; having the GS not write primitives would "
1472 "likely be faster.\n");
1475 clip
.ClipMode
= CLIPMODE_NORMAL
;
1478 clip
.ClipEnable
= true;
1481 * BRW_NEW_GEOMETRY_PROGRAM | BRW_NEW_TES_PROG_DATA | BRW_NEW_PRIMITIVE
1483 if (!brw_is_drawing_points(brw
) && !brw_is_drawing_lines(brw
))
1484 clip
.ViewportXYClipTestEnable
= true;
1486 clip
.MinimumPointWidth
= 0.125;
1487 clip
.MaximumPointWidth
= 255.875;
1488 clip
.MaximumVPIndex
= viewport_count
- 1;
1489 if (_mesa_geometric_layers(fb
) == 0)
1490 clip
.ForceZeroRTAIndexEnable
= true;
1494 static const struct brw_tracked_state
genX(clip_state
) = {
1496 .mesa
= _NEW_BUFFERS
|
1500 .brw
= BRW_NEW_BLORP
|
1502 BRW_NEW_FS_PROG_DATA
|
1503 BRW_NEW_GS_PROG_DATA
|
1504 BRW_NEW_VS_PROG_DATA
|
1505 BRW_NEW_META_IN_PROGRESS
|
1507 BRW_NEW_RASTERIZER_DISCARD
|
1508 BRW_NEW_TES_PROG_DATA
|
1509 BRW_NEW_VIEWPORT_COUNT
,
1511 .emit
= genX(upload_clip_state
),
1515 /* ---------------------------------------------------------------------- */
1518 genX(upload_sf
)(struct brw_context
*brw
)
1520 struct gl_context
*ctx
= &brw
->ctx
;
1525 bool render_to_fbo
= _mesa_is_user_fbo(ctx
->DrawBuffer
);
1526 UNUSED
const bool multisampled_fbo
=
1527 _mesa_geometric_samples(ctx
->DrawBuffer
) > 1;
1531 const struct brw_sf_prog_data
*sf_prog_data
= brw
->sf
.prog_data
;
1533 ctx
->NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
1535 brw_state_emit(brw
, GENX(SF_STATE
), 64, &brw
->sf
.state_offset
, sf
) {
1536 sf
.KernelStartPointer
= KSP(brw
, brw
->sf
.prog_offset
);
1537 sf
.FloatingPointMode
= FLOATING_POINT_MODE_Alternate
;
1538 sf
.GRFRegisterCount
= DIV_ROUND_UP(sf_prog_data
->total_grf
, 16) - 1;
1539 sf
.DispatchGRFStartRegisterForURBData
= 3;
1540 sf
.VertexURBEntryReadOffset
= BRW_SF_URB_ENTRY_READ_OFFSET
;
1541 sf
.VertexURBEntryReadLength
= sf_prog_data
->urb_read_length
;
1542 sf
.NumberofURBEntries
= brw
->urb
.nr_sf_entries
;
1543 sf
.URBEntryAllocationSize
= brw
->urb
.sfsize
- 1;
1545 /* STATE_PREFETCH command description describes this state as being
1546 * something loaded through the GPE (L2 ISC), so it's INSTRUCTION
1549 sf
.SetupViewportStateOffset
=
1550 ro_bo(brw
->batch
.state
.bo
, brw
->sf
.vp_offset
);
1552 sf
.PointRasterizationRule
= RASTRULE_UPPER_RIGHT
;
1554 /* sf.ConstantURBEntryReadLength = stage_prog_data->curb_read_length; */
1555 /* sf.ConstantURBEntryReadOffset = brw->curbe.vs_start * 2; */
1557 sf
.MaximumNumberofThreads
=
1558 MIN2(GEN_GEN
== 5 ? 48 : 24, brw
->urb
.nr_sf_entries
) - 1;
1560 sf
.SpritePointEnable
= ctx
->Point
.PointSprite
;
1562 sf
.DestinationOriginHorizontalBias
= 0.5;
1563 sf
.DestinationOriginVerticalBias
= 0.5;
1565 brw_batch_emit(brw
, GENX(3DSTATE_SF
), sf
) {
1566 sf
.StatisticsEnable
= true;
1568 sf
.ViewportTransformEnable
= true;
1572 sf
.DepthBufferSurfaceFormat
= brw_depthbuffer_format(brw
);
1577 sf
.FrontWinding
= brw
->polygon_front_bit
== render_to_fbo
;
1579 sf
.GlobalDepthOffsetEnableSolid
= ctx
->Polygon
.OffsetFill
;
1580 sf
.GlobalDepthOffsetEnableWireframe
= ctx
->Polygon
.OffsetLine
;
1581 sf
.GlobalDepthOffsetEnablePoint
= ctx
->Polygon
.OffsetPoint
;
1583 switch (ctx
->Polygon
.FrontMode
) {
1585 sf
.FrontFaceFillMode
= FILL_MODE_SOLID
;
1588 sf
.FrontFaceFillMode
= FILL_MODE_WIREFRAME
;
1591 sf
.FrontFaceFillMode
= FILL_MODE_POINT
;
1594 unreachable("not reached");
1597 switch (ctx
->Polygon
.BackMode
) {
1599 sf
.BackFaceFillMode
= FILL_MODE_SOLID
;
1602 sf
.BackFaceFillMode
= FILL_MODE_WIREFRAME
;
1605 sf
.BackFaceFillMode
= FILL_MODE_POINT
;
1608 unreachable("not reached");
1611 if (multisampled_fbo
&& ctx
->Multisample
.Enabled
)
1612 sf
.MultisampleRasterizationMode
= MSRASTMODE_ON_PATTERN
;
1614 sf
.GlobalDepthOffsetConstant
= ctx
->Polygon
.OffsetUnits
* 2;
1615 sf
.GlobalDepthOffsetScale
= ctx
->Polygon
.OffsetFactor
;
1616 sf
.GlobalDepthOffsetClamp
= ctx
->Polygon
.OffsetClamp
;
1619 sf
.ScissorRectangleEnable
= true;
1621 if (ctx
->Polygon
.CullFlag
) {
1622 switch (ctx
->Polygon
.CullFaceMode
) {
1624 sf
.CullMode
= CULLMODE_FRONT
;
1627 sf
.CullMode
= CULLMODE_BACK
;
1629 case GL_FRONT_AND_BACK
:
1630 sf
.CullMode
= CULLMODE_BOTH
;
1633 unreachable("not reached");
1636 sf
.CullMode
= CULLMODE_NONE
;
1640 sf
.LineStippleEnable
= ctx
->Line
.StippleFlag
;
1647 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1649 if (devinfo
->is_cherryview
)
1650 sf
.CHVLineWidth
= brw_get_line_width(brw
);
1652 sf
.LineWidth
= brw_get_line_width(brw
);
1654 sf
.LineWidth
= brw_get_line_width(brw
);
1657 if (ctx
->Line
.SmoothFlag
) {
1658 sf
.LineEndCapAntialiasingRegionWidth
= _10pixels
;
1660 sf
.AntiAliasingEnable
= true;
1664 /* _NEW_POINT - Clamp to ARB_point_parameters user limits */
1665 point_size
= CLAMP(ctx
->Point
.Size
, ctx
->Point
.MinSize
, ctx
->Point
.MaxSize
);
1666 /* Clamp to the hardware limits */
1667 sf
.PointWidth
= CLAMP(point_size
, 0.125f
, 255.875f
);
1669 /* _NEW_PROGRAM | _NEW_POINT, BRW_NEW_VUE_MAP_GEOM_OUT */
1670 if (use_state_point_size(brw
))
1671 sf
.PointWidthSource
= State
;
1674 /* _NEW_POINT | _NEW_MULTISAMPLE */
1675 if ((ctx
->Point
.SmoothFlag
|| _mesa_is_multisample_enabled(ctx
)) &&
1676 !ctx
->Point
.PointSprite
)
1677 sf
.SmoothPointEnable
= true;
1682 * Smooth Point Enable bit MUST not be set when NUM_MULTISAMPLES > 1.
1684 const bool multisampled_fbo
=
1685 _mesa_geometric_samples(ctx
->DrawBuffer
) > 1;
1686 if (multisampled_fbo
)
1687 sf
.SmoothPointEnable
= false;
1690 #if GEN_IS_G4X || GEN_GEN >= 5
1691 sf
.AALineDistanceMode
= AALINEDISTANCE_TRUE
;
1695 if (ctx
->Light
.ProvokingVertex
!= GL_FIRST_VERTEX_CONVENTION
) {
1696 sf
.TriangleStripListProvokingVertexSelect
= 2;
1697 sf
.TriangleFanProvokingVertexSelect
= 2;
1698 sf
.LineStripListProvokingVertexSelect
= 1;
1700 sf
.TriangleFanProvokingVertexSelect
= 1;
1704 /* BRW_NEW_FS_PROG_DATA */
1705 const struct brw_wm_prog_data
*wm_prog_data
=
1706 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1708 sf
.AttributeSwizzleEnable
= true;
1709 sf
.NumberofSFOutputAttributes
= wm_prog_data
->num_varying_inputs
;
1712 * Window coordinates in an FBO are inverted, which means point
1713 * sprite origin must be inverted, too.
1715 if ((ctx
->Point
.SpriteOrigin
== GL_LOWER_LEFT
) != render_to_fbo
) {
1716 sf
.PointSpriteTextureCoordinateOrigin
= LOWERLEFT
;
1718 sf
.PointSpriteTextureCoordinateOrigin
= UPPERLEFT
;
1721 /* BRW_NEW_VUE_MAP_GEOM_OUT | BRW_NEW_FRAGMENT_PROGRAM |
1722 * _NEW_POINT | _NEW_LIGHT | _NEW_PROGRAM | BRW_NEW_FS_PROG_DATA
1724 uint32_t urb_entry_read_length
;
1725 uint32_t urb_entry_read_offset
;
1726 uint32_t point_sprite_enables
;
1727 genX(calculate_attr_overrides
)(brw
, sf
.Attribute
, &point_sprite_enables
,
1728 &urb_entry_read_length
,
1729 &urb_entry_read_offset
);
1730 sf
.VertexURBEntryReadLength
= urb_entry_read_length
;
1731 sf
.VertexURBEntryReadOffset
= urb_entry_read_offset
;
1732 sf
.PointSpriteTextureCoordinateEnable
= point_sprite_enables
;
1733 sf
.ConstantInterpolationEnable
= wm_prog_data
->flat_inputs
;
1738 static const struct brw_tracked_state
genX(sf_state
) = {
1740 .mesa
= _NEW_LIGHT
|
1744 (GEN_GEN
>= 6 ? _NEW_MULTISAMPLE
: 0) |
1745 (GEN_GEN
<= 7 ? _NEW_BUFFERS
| _NEW_POLYGON
: 0) |
1746 (GEN_GEN
== 10 ? _NEW_BUFFERS
: 0),
1747 .brw
= BRW_NEW_BLORP
|
1748 BRW_NEW_VUE_MAP_GEOM_OUT
|
1749 (GEN_GEN
<= 5 ? BRW_NEW_BATCH
|
1750 BRW_NEW_PROGRAM_CACHE
|
1751 BRW_NEW_SF_PROG_DATA
|
1755 (GEN_GEN
>= 6 ? BRW_NEW_CONTEXT
: 0) |
1756 (GEN_GEN
>= 6 && GEN_GEN
<= 7 ?
1757 BRW_NEW_GS_PROG_DATA
|
1759 BRW_NEW_TES_PROG_DATA
1761 (GEN_GEN
== 6 ? BRW_NEW_FS_PROG_DATA
|
1762 BRW_NEW_FRAGMENT_PROGRAM
1765 .emit
= genX(upload_sf
),
1768 /* ---------------------------------------------------------------------- */
1771 brw_color_buffer_write_enabled(struct brw_context
*brw
)
1773 struct gl_context
*ctx
= &brw
->ctx
;
1774 /* BRW_NEW_FRAGMENT_PROGRAM */
1775 const struct gl_program
*fp
= brw
->programs
[MESA_SHADER_FRAGMENT
];
1779 for (i
= 0; i
< ctx
->DrawBuffer
->_NumColorDrawBuffers
; i
++) {
1780 struct gl_renderbuffer
*rb
= ctx
->DrawBuffer
->_ColorDrawBuffers
[i
];
1781 uint64_t outputs_written
= fp
->info
.outputs_written
;
1784 if (rb
&& (outputs_written
& BITFIELD64_BIT(FRAG_RESULT_COLOR
) ||
1785 outputs_written
& BITFIELD64_BIT(FRAG_RESULT_DATA0
+ i
)) &&
1786 GET_COLORMASK(ctx
->Color
.ColorMask
, i
)) {
1795 genX(upload_wm
)(struct brw_context
*brw
)
1797 struct gl_context
*ctx
= &brw
->ctx
;
1799 /* BRW_NEW_FS_PROG_DATA */
1800 const struct brw_wm_prog_data
*wm_prog_data
=
1801 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1803 UNUSED
bool writes_depth
=
1804 wm_prog_data
->computed_depth_mode
!= BRW_PSCDEPTH_OFF
;
1805 UNUSED
struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
1806 UNUSED
const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1809 /* We can't fold this into gen6_upload_wm_push_constants(), because
1810 * according to the SNB PRM, vol 2 part 1 section 7.2.2
1811 * (3DSTATE_CONSTANT_PS [DevSNB]):
1813 * "[DevSNB]: This packet must be followed by WM_STATE."
1815 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_PS
), wmcp
) {
1816 if (wm_prog_data
->base
.nr_params
!= 0) {
1817 wmcp
.Buffer0Valid
= true;
1818 /* Pointer to the WM constant buffer. Covered by the set of
1819 * state flags from gen6_upload_wm_push_constants.
1821 wmcp
.PointertoPSConstantBuffer0
= stage_state
->push_const_offset
;
1822 wmcp
.PSConstantBuffer0ReadLength
= stage_state
->push_const_size
- 1;
1828 brw_batch_emit(brw
, GENX(3DSTATE_WM
), wm
) {
1829 wm
.LineAntialiasingRegionWidth
= _10pixels
;
1830 wm
.LineEndCapAntialiasingRegionWidth
= _05pixels
;
1832 wm
.PointRasterizationRule
= RASTRULE_UPPER_RIGHT
;
1833 wm
.BarycentricInterpolationMode
= wm_prog_data
->barycentric_interp_modes
;
1835 ctx
->NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
1836 brw_state_emit(brw
, GENX(WM_STATE
), 64, &stage_state
->state_offset
, wm
) {
1837 if (wm_prog_data
->dispatch_8
&& wm_prog_data
->dispatch_16
) {
1838 /* These two fields should be the same pre-gen6, which is why we
1839 * only have one hardware field to program for both dispatch
1842 assert(wm_prog_data
->base
.dispatch_grf_start_reg
==
1843 wm_prog_data
->dispatch_grf_start_reg_2
);
1846 if (wm_prog_data
->dispatch_8
|| wm_prog_data
->dispatch_16
)
1847 wm
.GRFRegisterCount0
= wm_prog_data
->reg_blocks_0
;
1849 if (stage_state
->sampler_count
)
1850 wm
.SamplerStatePointer
=
1851 ro_bo(brw
->batch
.state
.bo
, stage_state
->sampler_offset
);
1853 if (wm_prog_data
->prog_offset_2
)
1854 wm
.GRFRegisterCount2
= wm_prog_data
->reg_blocks_2
;
1857 wm
.SetupURBEntryReadLength
= wm_prog_data
->num_varying_inputs
* 2;
1858 wm
.ConstantURBEntryReadLength
= wm_prog_data
->base
.curb_read_length
;
1859 /* BRW_NEW_PUSH_CONSTANT_ALLOCATION */
1860 wm
.ConstantURBEntryReadOffset
= brw
->curbe
.wm_start
* 2;
1861 wm
.EarlyDepthTestEnable
= true;
1862 wm
.LineAntialiasingRegionWidth
= _05pixels
;
1863 wm
.LineEndCapAntialiasingRegionWidth
= _10pixels
;
1866 if (ctx
->Polygon
.OffsetFill
) {
1867 wm
.GlobalDepthOffsetEnable
= true;
1868 /* Something weird going on with legacy_global_depth_bias,
1869 * offset_constant, scaling and MRD. This value passes glean
1870 * but gives some odd results elsewere (eg. the
1871 * quad-offset-units test).
1873 wm
.GlobalDepthOffsetConstant
= ctx
->Polygon
.OffsetUnits
* 2;
1875 /* This is the only value that passes glean:
1877 wm
.GlobalDepthOffsetScale
= ctx
->Polygon
.OffsetFactor
;
1880 wm
.DepthCoefficientURBReadOffset
= 1;
1883 /* BRW_NEW_STATS_WM */
1884 wm
.StatisticsEnable
= GEN_GEN
>= 6 || brw
->stats_wm
;
1887 if (wm_prog_data
->base
.use_alt_mode
)
1888 wm
.FloatingPointMode
= FLOATING_POINT_MODE_Alternate
;
1890 wm
.SamplerCount
= GEN_GEN
== 5 ?
1891 0 : DIV_ROUND_UP(stage_state
->sampler_count
, 4);
1893 wm
.BindingTableEntryCount
=
1894 wm_prog_data
->base
.binding_table
.size_bytes
/ 4;
1895 wm
.MaximumNumberofThreads
= devinfo
->max_wm_threads
- 1;
1896 wm
._8PixelDispatchEnable
= wm_prog_data
->dispatch_8
;
1897 wm
._16PixelDispatchEnable
= wm_prog_data
->dispatch_16
;
1898 wm
.DispatchGRFStartRegisterForConstantSetupData0
=
1899 wm_prog_data
->base
.dispatch_grf_start_reg
;
1901 wm_prog_data
->dispatch_8
|| wm_prog_data
->dispatch_16
) {
1902 wm
.KernelStartPointer0
= KSP(brw
, stage_state
->prog_offset
);
1906 if (GEN_GEN
== 6 || wm_prog_data
->prog_offset_2
) {
1907 wm
.KernelStartPointer2
=
1908 KSP(brw
, stage_state
->prog_offset
+ wm_prog_data
->prog_offset_2
);
1913 wm
.DualSourceBlendEnable
=
1914 wm_prog_data
->dual_src_blend
&& (ctx
->Color
.BlendEnabled
& 1) &&
1915 ctx
->Color
.Blend
[0]._UsesDualSrc
;
1916 wm
.oMaskPresenttoRenderTarget
= wm_prog_data
->uses_omask
;
1917 wm
.NumberofSFOutputAttributes
= wm_prog_data
->num_varying_inputs
;
1919 /* From the SNB PRM, volume 2 part 1, page 281:
1920 * "If the PS kernel does not need the Position XY Offsets
1921 * to compute a Position XY value, then this field should be
1922 * programmed to POSOFFSET_NONE."
1924 * "SW Recommendation: If the PS kernel needs the Position Offsets
1925 * to compute a Position XY value, this field should match Position
1926 * ZW Interpolation Mode to ensure a consistent position.xyzw
1928 * We only require XY sample offsets. So, this recommendation doesn't
1929 * look useful at the moment. We might need this in future.
1931 if (wm_prog_data
->uses_pos_offset
)
1932 wm
.PositionXYOffsetSelect
= POSOFFSET_SAMPLE
;
1934 wm
.PositionXYOffsetSelect
= POSOFFSET_NONE
;
1936 wm
.DispatchGRFStartRegisterForConstantSetupData2
=
1937 wm_prog_data
->dispatch_grf_start_reg_2
;
1940 if (wm_prog_data
->base
.total_scratch
) {
1941 wm
.ScratchSpaceBasePointer
= rw_32_bo(stage_state
->scratch_bo
, 0);
1942 wm
.PerThreadScratchSpace
=
1943 ffs(stage_state
->per_thread_scratch
) - 11;
1946 wm
.PixelShaderComputedDepth
= writes_depth
;
1950 wm
.LineStippleEnable
= ctx
->Line
.StippleFlag
;
1953 wm
.PolygonStippleEnable
= ctx
->Polygon
.StippleFlag
;
1958 wm
.PixelShaderUsesSourceW
= wm_prog_data
->uses_src_w
;
1961 const bool multisampled_fbo
= _mesa_geometric_samples(ctx
->DrawBuffer
) > 1;
1963 if (multisampled_fbo
) {
1964 /* _NEW_MULTISAMPLE */
1965 if (ctx
->Multisample
.Enabled
)
1966 wm
.MultisampleRasterizationMode
= MSRASTMODE_ON_PATTERN
;
1968 wm
.MultisampleRasterizationMode
= MSRASTMODE_OFF_PIXEL
;
1970 if (wm_prog_data
->persample_dispatch
)
1971 wm
.MultisampleDispatchMode
= MSDISPMODE_PERSAMPLE
;
1973 wm
.MultisampleDispatchMode
= MSDISPMODE_PERPIXEL
;
1975 wm
.MultisampleRasterizationMode
= MSRASTMODE_OFF_PIXEL
;
1976 wm
.MultisampleDispatchMode
= MSDISPMODE_PERSAMPLE
;
1979 wm
.PixelShaderUsesSourceDepth
= wm_prog_data
->uses_src_depth
;
1980 if (wm_prog_data
->uses_kill
||
1981 _mesa_is_alpha_test_enabled(ctx
) ||
1982 _mesa_is_alpha_to_coverage_enabled(ctx
) ||
1983 (GEN_GEN
>= 6 && wm_prog_data
->uses_omask
)) {
1984 wm
.PixelShaderKillsPixel
= true;
1987 /* _NEW_BUFFERS | _NEW_COLOR */
1988 if (brw_color_buffer_write_enabled(brw
) || writes_depth
||
1989 wm
.PixelShaderKillsPixel
||
1990 (GEN_GEN
>= 6 && wm_prog_data
->has_side_effects
)) {
1991 wm
.ThreadDispatchEnable
= true;
1995 wm
.PixelShaderComputedDepthMode
= wm_prog_data
->computed_depth_mode
;
1996 wm
.PixelShaderUsesInputCoverageMask
= wm_prog_data
->uses_sample_mask
;
1999 /* The "UAV access enable" bits are unnecessary on HSW because they only
2000 * seem to have an effect on the HW-assisted coherency mechanism which we
2001 * don't need, and the rasterization-related UAV_ONLY flag and the
2002 * DISPATCH_ENABLE bit can be set independently from it.
2003 * C.f. gen8_upload_ps_extra().
2005 * BRW_NEW_FRAGMENT_PROGRAM | BRW_NEW_FS_PROG_DATA | _NEW_BUFFERS |
2009 if (!(brw_color_buffer_write_enabled(brw
) || writes_depth
) &&
2010 wm_prog_data
->has_side_effects
)
2016 /* BRW_NEW_FS_PROG_DATA */
2017 if (wm_prog_data
->early_fragment_tests
)
2018 wm
.EarlyDepthStencilControl
= EDSC_PREPS
;
2019 else if (wm_prog_data
->has_side_effects
)
2020 wm
.EarlyDepthStencilControl
= EDSC_PSEXEC
;
2025 if (brw
->wm
.offset_clamp
!= ctx
->Polygon
.OffsetClamp
) {
2026 brw_batch_emit(brw
, GENX(3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP
), clamp
) {
2027 clamp
.GlobalDepthOffsetClamp
= ctx
->Polygon
.OffsetClamp
;
2030 brw
->wm
.offset_clamp
= ctx
->Polygon
.OffsetClamp
;
2035 static const struct brw_tracked_state
genX(wm_state
) = {
2039 (GEN_GEN
< 8 ? _NEW_BUFFERS
|
2042 (GEN_GEN
== 6 ? _NEW_PROGRAM_CONSTANTS
: 0) |
2043 (GEN_GEN
< 6 ? _NEW_POLYGONSTIPPLE
: 0) |
2044 (GEN_GEN
< 8 && GEN_GEN
>= 6 ? _NEW_MULTISAMPLE
: 0),
2045 .brw
= BRW_NEW_BLORP
|
2046 BRW_NEW_FS_PROG_DATA
|
2047 (GEN_GEN
< 6 ? BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
2048 BRW_NEW_FRAGMENT_PROGRAM
|
2049 BRW_NEW_PROGRAM_CACHE
|
2050 BRW_NEW_SAMPLER_STATE_TABLE
|
2053 (GEN_GEN
< 7 ? BRW_NEW_BATCH
: BRW_NEW_CONTEXT
),
2055 .emit
= genX(upload_wm
),
2058 /* ---------------------------------------------------------------------- */
2060 /* We restrict scratch buffers to the bottom 32 bits of the address space
2061 * by using rw_32_bo().
2063 * General State Base Address is a bit broken. If the address + size as
2064 * seen by STATE_BASE_ADDRESS overflows 48 bits, the GPU appears to treat
2065 * all accesses to the buffer as being out of bounds and returns zero.
2068 #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix) \
2069 pkt.KernelStartPointer = KSP(brw, stage_state->prog_offset); \
2070 pkt.SamplerCount = \
2071 DIV_ROUND_UP(CLAMP(stage_state->sampler_count, 0, 16), 4); \
2072 pkt.BindingTableEntryCount = \
2073 stage_prog_data->binding_table.size_bytes / 4; \
2074 pkt.FloatingPointMode = stage_prog_data->use_alt_mode; \
2076 if (stage_prog_data->total_scratch) { \
2077 pkt.ScratchSpaceBasePointer = rw_32_bo(stage_state->scratch_bo, 0); \
2078 pkt.PerThreadScratchSpace = \
2079 ffs(stage_state->per_thread_scratch) - 11; \
2082 pkt.DispatchGRFStartRegisterForURBData = \
2083 stage_prog_data->dispatch_grf_start_reg; \
2084 pkt.prefix##URBEntryReadLength = vue_prog_data->urb_read_length; \
2085 pkt.prefix##URBEntryReadOffset = 0; \
2087 pkt.StatisticsEnable = true; \
2091 genX(upload_vs_state
)(struct brw_context
*brw
)
2093 UNUSED
struct gl_context
*ctx
= &brw
->ctx
;
2094 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
2095 struct brw_stage_state
*stage_state
= &brw
->vs
.base
;
2097 /* BRW_NEW_VS_PROG_DATA */
2098 const struct brw_vue_prog_data
*vue_prog_data
=
2099 brw_vue_prog_data(brw
->vs
.base
.prog_data
);
2100 const struct brw_stage_prog_data
*stage_prog_data
= &vue_prog_data
->base
;
2102 assert(vue_prog_data
->dispatch_mode
== DISPATCH_MODE_SIMD8
||
2103 vue_prog_data
->dispatch_mode
== DISPATCH_MODE_4X2_DUAL_OBJECT
);
2104 assert(GEN_GEN
< 11 ||
2105 vue_prog_data
->dispatch_mode
== DISPATCH_MODE_SIMD8
);
2108 /* From the BSpec, 3D Pipeline > Geometry > Vertex Shader > State,
2109 * 3DSTATE_VS, Dword 5.0 "VS Function Enable":
2111 * [DevSNB] A pipeline flush must be programmed prior to a 3DSTATE_VS
2112 * command that causes the VS Function Enable to toggle. Pipeline
2113 * flush can be executed by sending a PIPE_CONTROL command with CS
2114 * stall bit set and a post sync operation.
2116 * We've already done such a flush at the start of state upload, so we
2117 * don't need to do another one here.
2119 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_VS
), cvs
) {
2120 if (stage_state
->push_const_size
!= 0) {
2121 cvs
.Buffer0Valid
= true;
2122 cvs
.PointertoVSConstantBuffer0
= stage_state
->push_const_offset
;
2123 cvs
.VSConstantBuffer0ReadLength
= stage_state
->push_const_size
- 1;
2128 if (GEN_GEN
== 7 && devinfo
->is_ivybridge
)
2129 gen7_emit_vs_workaround_flush(brw
);
2132 brw_batch_emit(brw
, GENX(3DSTATE_VS
), vs
) {
2134 ctx
->NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
2135 brw_state_emit(brw
, GENX(VS_STATE
), 32, &stage_state
->state_offset
, vs
) {
2137 INIT_THREAD_DISPATCH_FIELDS(vs
, Vertex
);
2139 vs
.MaximumNumberofThreads
= devinfo
->max_vs_threads
- 1;
2142 vs
.GRFRegisterCount
= DIV_ROUND_UP(vue_prog_data
->total_grf
, 16) - 1;
2143 vs
.ConstantURBEntryReadLength
= stage_prog_data
->curb_read_length
;
2144 vs
.ConstantURBEntryReadOffset
= brw
->curbe
.vs_start
* 2;
2146 vs
.NumberofURBEntries
= brw
->urb
.nr_vs_entries
>> (GEN_GEN
== 5 ? 2 : 0);
2147 vs
.URBEntryAllocationSize
= brw
->urb
.vsize
- 1;
2149 vs
.MaximumNumberofThreads
=
2150 CLAMP(brw
->urb
.nr_vs_entries
/ 2, 1, devinfo
->max_vs_threads
) - 1;
2152 vs
.StatisticsEnable
= false;
2153 vs
.SamplerStatePointer
=
2154 ro_bo(brw
->batch
.state
.bo
, stage_state
->sampler_offset
);
2158 /* Force single program flow on Ironlake. We cannot reliably get
2159 * all applications working without it. See:
2160 * https://bugs.freedesktop.org/show_bug.cgi?id=29172
2162 * The most notable and reliably failing application is the Humus
2165 vs
.SingleProgramFlow
= true;
2166 vs
.SamplerCount
= 0; /* hardware requirement */
2170 vs
.SIMD8DispatchEnable
=
2171 vue_prog_data
->dispatch_mode
== DISPATCH_MODE_SIMD8
;
2173 vs
.UserClipDistanceCullTestEnableBitmask
=
2174 vue_prog_data
->cull_distance_mask
;
2179 /* Based on my reading of the simulator, the VS constants don't get
2180 * pulled into the VS FF unit until an appropriate pipeline flush
2181 * happens, and instead the 3DSTATE_CONSTANT_VS packet just adds
2182 * references to them into a little FIFO. The flushes are common,
2183 * but don't reliably happen between this and a 3DPRIMITIVE, causing
2184 * the primitive to use the wrong constants. Then the FIFO
2185 * containing the constant setup gets added to again on the next
2186 * constants change, and eventually when a flush does happen the
2187 * unit is overwhelmed by constant changes and dies.
2189 * To avoid this, send a PIPE_CONTROL down the line that will
2190 * update the unit immediately loading the constants. The flush
2191 * type bits here were those set by the STATE_BASE_ADDRESS whose
2192 * move in a82a43e8d99e1715dd11c9c091b5ab734079b6a6 triggered the
2193 * bug reports that led to this workaround, and may be more than
2194 * what is strictly required to avoid the issue.
2196 brw_emit_pipe_control_flush(brw
,
2197 PIPE_CONTROL_DEPTH_STALL
|
2198 PIPE_CONTROL_INSTRUCTION_INVALIDATE
|
2199 PIPE_CONTROL_STATE_CACHE_INVALIDATE
);
2203 static const struct brw_tracked_state
genX(vs_state
) = {
2205 .mesa
= (GEN_GEN
== 6 ? (_NEW_PROGRAM_CONSTANTS
| _NEW_TRANSFORM
) : 0),
2206 .brw
= BRW_NEW_BATCH
|
2209 BRW_NEW_VS_PROG_DATA
|
2210 (GEN_GEN
== 6 ? BRW_NEW_VERTEX_PROGRAM
: 0) |
2211 (GEN_GEN
<= 5 ? BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
2212 BRW_NEW_PROGRAM_CACHE
|
2213 BRW_NEW_SAMPLER_STATE_TABLE
|
2217 .emit
= genX(upload_vs_state
),
2220 /* ---------------------------------------------------------------------- */
2223 genX(upload_cc_viewport
)(struct brw_context
*brw
)
2225 struct gl_context
*ctx
= &brw
->ctx
;
2227 /* BRW_NEW_VIEWPORT_COUNT */
2228 const unsigned viewport_count
= brw
->clip
.viewport_count
;
2230 struct GENX(CC_VIEWPORT
) ccv
;
2231 uint32_t cc_vp_offset
;
2233 brw_state_batch(brw
, 4 * GENX(CC_VIEWPORT_length
) * viewport_count
,
2236 for (unsigned i
= 0; i
< viewport_count
; i
++) {
2237 /* _NEW_VIEWPORT | _NEW_TRANSFORM */
2238 const struct gl_viewport_attrib
*vp
= &ctx
->ViewportArray
[i
];
2239 if (ctx
->Transform
.DepthClamp
) {
2240 ccv
.MinimumDepth
= MIN2(vp
->Near
, vp
->Far
);
2241 ccv
.MaximumDepth
= MAX2(vp
->Near
, vp
->Far
);
2243 ccv
.MinimumDepth
= 0.0;
2244 ccv
.MaximumDepth
= 1.0;
2246 GENX(CC_VIEWPORT_pack
)(NULL
, cc_map
, &ccv
);
2247 cc_map
+= GENX(CC_VIEWPORT_length
);
2251 brw_batch_emit(brw
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC
), ptr
) {
2252 ptr
.CCViewportPointer
= cc_vp_offset
;
2255 brw_batch_emit(brw
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS
), vp
) {
2256 vp
.CCViewportStateChange
= 1;
2257 vp
.PointertoCC_VIEWPORT
= cc_vp_offset
;
2260 brw
->cc
.vp_offset
= cc_vp_offset
;
2261 ctx
->NewDriverState
|= BRW_NEW_CC_VP
;
2265 const struct brw_tracked_state
genX(cc_vp
) = {
2267 .mesa
= _NEW_TRANSFORM
|
2269 .brw
= BRW_NEW_BATCH
|
2271 BRW_NEW_VIEWPORT_COUNT
,
2273 .emit
= genX(upload_cc_viewport
)
2276 /* ---------------------------------------------------------------------- */
2279 set_scissor_bits(const struct gl_context
*ctx
, int i
,
2280 bool render_to_fbo
, unsigned fb_width
, unsigned fb_height
,
2281 struct GENX(SCISSOR_RECT
) *sc
)
2285 bbox
[0] = MAX2(ctx
->ViewportArray
[i
].X
, 0);
2286 bbox
[1] = MIN2(bbox
[0] + ctx
->ViewportArray
[i
].Width
, fb_width
);
2287 bbox
[2] = MAX2(ctx
->ViewportArray
[i
].Y
, 0);
2288 bbox
[3] = MIN2(bbox
[2] + ctx
->ViewportArray
[i
].Height
, fb_height
);
2289 _mesa_intersect_scissor_bounding_box(ctx
, i
, bbox
);
2291 if (bbox
[0] == bbox
[1] || bbox
[2] == bbox
[3]) {
2292 /* If the scissor was out of bounds and got clamped to 0 width/height
2293 * at the bounds, the subtraction of 1 from maximums could produce a
2294 * negative number and thus not clip anything. Instead, just provide
2295 * a min > max scissor inside the bounds, which produces the expected
2298 sc
->ScissorRectangleXMin
= 1;
2299 sc
->ScissorRectangleXMax
= 0;
2300 sc
->ScissorRectangleYMin
= 1;
2301 sc
->ScissorRectangleYMax
= 0;
2302 } else if (render_to_fbo
) {
2303 /* texmemory: Y=0=bottom */
2304 sc
->ScissorRectangleXMin
= bbox
[0];
2305 sc
->ScissorRectangleXMax
= bbox
[1] - 1;
2306 sc
->ScissorRectangleYMin
= bbox
[2];
2307 sc
->ScissorRectangleYMax
= bbox
[3] - 1;
2309 /* memory: Y=0=top */
2310 sc
->ScissorRectangleXMin
= bbox
[0];
2311 sc
->ScissorRectangleXMax
= bbox
[1] - 1;
2312 sc
->ScissorRectangleYMin
= fb_height
- bbox
[3];
2313 sc
->ScissorRectangleYMax
= fb_height
- bbox
[2] - 1;
2319 genX(upload_scissor_state
)(struct brw_context
*brw
)
2321 struct gl_context
*ctx
= &brw
->ctx
;
2322 const bool render_to_fbo
= _mesa_is_user_fbo(ctx
->DrawBuffer
);
2323 struct GENX(SCISSOR_RECT
) scissor
;
2324 uint32_t scissor_state_offset
;
2325 const unsigned int fb_width
= _mesa_geometric_width(ctx
->DrawBuffer
);
2326 const unsigned int fb_height
= _mesa_geometric_height(ctx
->DrawBuffer
);
2327 uint32_t *scissor_map
;
2329 /* BRW_NEW_VIEWPORT_COUNT */
2330 const unsigned viewport_count
= brw
->clip
.viewport_count
;
2332 scissor_map
= brw_state_batch(
2333 brw
, GENX(SCISSOR_RECT_length
) * sizeof(uint32_t) * viewport_count
,
2334 32, &scissor_state_offset
);
2336 /* _NEW_SCISSOR | _NEW_BUFFERS | _NEW_VIEWPORT */
2338 /* The scissor only needs to handle the intersection of drawable and
2339 * scissor rect. Clipping to the boundaries of static shared buffers
2340 * for front/back/depth is covered by looping over cliprects in brw_draw.c.
2342 * Note that the hardware's coordinates are inclusive, while Mesa's min is
2343 * inclusive but max is exclusive.
2345 for (unsigned i
= 0; i
< viewport_count
; i
++) {
2346 set_scissor_bits(ctx
, i
, render_to_fbo
, fb_width
, fb_height
, &scissor
);
2347 GENX(SCISSOR_RECT_pack
)(
2348 NULL
, scissor_map
+ i
* GENX(SCISSOR_RECT_length
), &scissor
);
2351 brw_batch_emit(brw
, GENX(3DSTATE_SCISSOR_STATE_POINTERS
), ptr
) {
2352 ptr
.ScissorRectPointer
= scissor_state_offset
;
2356 static const struct brw_tracked_state
genX(scissor_state
) = {
2358 .mesa
= _NEW_BUFFERS
|
2361 .brw
= BRW_NEW_BATCH
|
2363 BRW_NEW_VIEWPORT_COUNT
,
2365 .emit
= genX(upload_scissor_state
),
2369 /* ---------------------------------------------------------------------- */
2372 brw_calculate_guardband_size(uint32_t fb_width
, uint32_t fb_height
,
2373 float m00
, float m11
, float m30
, float m31
,
2374 float *xmin
, float *xmax
,
2375 float *ymin
, float *ymax
)
2377 /* According to the "Vertex X,Y Clamping and Quantization" section of the
2378 * Strips and Fans documentation:
2380 * "The vertex X and Y screen-space coordinates are also /clamped/ to the
2381 * fixed-point "guardband" range supported by the rasterization hardware"
2385 * "In almost all circumstances, if an object’s vertices are actually
2386 * modified by this clamping (i.e., had X or Y coordinates outside of
2387 * the guardband extent the rendered object will not match the intended
2388 * result. Therefore software should take steps to ensure that this does
2389 * not happen - e.g., by clipping objects such that they do not exceed
2390 * these limits after the Drawing Rectangle is applied."
2392 * I believe the fundamental restriction is that the rasterizer (in
2393 * the SF/WM stages) have a limit on the number of pixels that can be
2394 * rasterized. We need to ensure any coordinates beyond the rasterizer
2395 * limit are handled by the clipper. So effectively that limit becomes
2396 * the clipper's guardband size.
2398 * It goes on to say:
2400 * "In addition, in order to be correctly rendered, objects must have a
2401 * screenspace bounding box not exceeding 8K in the X or Y direction.
2402 * This additional restriction must also be comprehended by software,
2403 * i.e., enforced by use of clipping."
2405 * This makes no sense. Gen7+ hardware supports 16K render targets,
2406 * and you definitely need to be able to draw polygons that fill the
2407 * surface. Our assumption is that the rasterizer was limited to 8K
2408 * on Sandybridge, which only supports 8K surfaces, and it was actually
2409 * increased to 16K on Ivybridge and later.
2411 * So, limit the guardband to 16K on Gen7+ and 8K on Sandybridge.
2413 const float gb_size
= GEN_GEN
>= 7 ? 16384.0f
: 8192.0f
;
2415 if (m00
!= 0 && m11
!= 0) {
2416 /* First, we compute the screen-space render area */
2417 const float ss_ra_xmin
= MIN3( 0, m30
+ m00
, m30
- m00
);
2418 const float ss_ra_xmax
= MAX3( fb_width
, m30
+ m00
, m30
- m00
);
2419 const float ss_ra_ymin
= MIN3( 0, m31
+ m11
, m31
- m11
);
2420 const float ss_ra_ymax
= MAX3(fb_height
, m31
+ m11
, m31
- m11
);
2422 /* We want the guardband to be centered on that */
2423 const float ss_gb_xmin
= (ss_ra_xmin
+ ss_ra_xmax
) / 2 - gb_size
;
2424 const float ss_gb_xmax
= (ss_ra_xmin
+ ss_ra_xmax
) / 2 + gb_size
;
2425 const float ss_gb_ymin
= (ss_ra_ymin
+ ss_ra_ymax
) / 2 - gb_size
;
2426 const float ss_gb_ymax
= (ss_ra_ymin
+ ss_ra_ymax
) / 2 + gb_size
;
2428 /* Now we need it in native device coordinates */
2429 const float ndc_gb_xmin
= (ss_gb_xmin
- m30
) / m00
;
2430 const float ndc_gb_xmax
= (ss_gb_xmax
- m30
) / m00
;
2431 const float ndc_gb_ymin
= (ss_gb_ymin
- m31
) / m11
;
2432 const float ndc_gb_ymax
= (ss_gb_ymax
- m31
) / m11
;
2434 /* Thanks to Y-flipping and ORIGIN_UPPER_LEFT, the Y coordinates may be
2435 * flipped upside-down. X should be fine though.
2437 assert(ndc_gb_xmin
<= ndc_gb_xmax
);
2438 *xmin
= ndc_gb_xmin
;
2439 *xmax
= ndc_gb_xmax
;
2440 *ymin
= MIN2(ndc_gb_ymin
, ndc_gb_ymax
);
2441 *ymax
= MAX2(ndc_gb_ymin
, ndc_gb_ymax
);
2443 /* The viewport scales to 0, so nothing will be rendered. */
2452 genX(upload_sf_clip_viewport
)(struct brw_context
*brw
)
2454 struct gl_context
*ctx
= &brw
->ctx
;
2455 float y_scale
, y_bias
;
2457 /* BRW_NEW_VIEWPORT_COUNT */
2458 const unsigned viewport_count
= brw
->clip
.viewport_count
;
2461 const bool render_to_fbo
= _mesa_is_user_fbo(ctx
->DrawBuffer
);
2462 const uint32_t fb_width
= (float)_mesa_geometric_width(ctx
->DrawBuffer
);
2463 const uint32_t fb_height
= (float)_mesa_geometric_height(ctx
->DrawBuffer
);
2467 struct GENX(SF_CLIP_VIEWPORT
) sfv
;
2468 uint32_t sf_clip_vp_offset
;
2469 uint32_t *sf_clip_map
=
2470 brw_state_batch(brw
, GENX(SF_CLIP_VIEWPORT_length
) * 4 * viewport_count
,
2471 64, &sf_clip_vp_offset
);
2473 struct GENX(SF_VIEWPORT
) sfv
;
2474 struct GENX(CLIP_VIEWPORT
) clv
;
2475 uint32_t sf_vp_offset
, clip_vp_offset
;
2477 brw_state_batch(brw
, GENX(SF_VIEWPORT_length
) * 4 * viewport_count
,
2479 uint32_t *clip_map
=
2480 brw_state_batch(brw
, GENX(CLIP_VIEWPORT_length
) * 4 * viewport_count
,
2481 32, &clip_vp_offset
);
2485 if (render_to_fbo
) {
2490 y_bias
= (float)fb_height
;
2493 for (unsigned i
= 0; i
< brw
->clip
.viewport_count
; i
++) {
2494 /* _NEW_VIEWPORT: Guardband Clipping */
2495 float scale
[3], translate
[3], gb_xmin
, gb_xmax
, gb_ymin
, gb_ymax
;
2496 _mesa_get_viewport_xform(ctx
, i
, scale
, translate
);
2498 sfv
.ViewportMatrixElementm00
= scale
[0];
2499 sfv
.ViewportMatrixElementm11
= scale
[1] * y_scale
,
2500 sfv
.ViewportMatrixElementm22
= scale
[2],
2501 sfv
.ViewportMatrixElementm30
= translate
[0],
2502 sfv
.ViewportMatrixElementm31
= translate
[1] * y_scale
+ y_bias
,
2503 sfv
.ViewportMatrixElementm32
= translate
[2],
2504 brw_calculate_guardband_size(fb_width
, fb_height
,
2505 sfv
.ViewportMatrixElementm00
,
2506 sfv
.ViewportMatrixElementm11
,
2507 sfv
.ViewportMatrixElementm30
,
2508 sfv
.ViewportMatrixElementm31
,
2509 &gb_xmin
, &gb_xmax
, &gb_ymin
, &gb_ymax
);
2512 clv
.XMinClipGuardband
= gb_xmin
;
2513 clv
.XMaxClipGuardband
= gb_xmax
;
2514 clv
.YMinClipGuardband
= gb_ymin
;
2515 clv
.YMaxClipGuardband
= gb_ymax
;
2518 set_scissor_bits(ctx
, i
, render_to_fbo
, fb_width
, fb_height
,
2519 &sfv
.ScissorRectangle
);
2521 /* _NEW_VIEWPORT | _NEW_BUFFERS: Screen Space Viewport
2522 * The hardware will take the intersection of the drawing rectangle,
2523 * scissor rectangle, and the viewport extents. However, emitting
2524 * 3DSTATE_DRAWING_RECTANGLE is expensive since it requires a full
2525 * pipeline stall so we're better off just being a little more clever
2526 * with our viewport so we can emit it once at context creation time.
2528 const float viewport_Xmin
= MAX2(ctx
->ViewportArray
[i
].X
, 0);
2529 const float viewport_Ymin
= MAX2(ctx
->ViewportArray
[i
].Y
, 0);
2530 const float viewport_Xmax
=
2531 MIN2(ctx
->ViewportArray
[i
].X
+ ctx
->ViewportArray
[i
].Width
, fb_width
);
2532 const float viewport_Ymax
=
2533 MIN2(ctx
->ViewportArray
[i
].Y
+ ctx
->ViewportArray
[i
].Height
, fb_height
);
2535 if (render_to_fbo
) {
2536 sfv
.XMinViewPort
= viewport_Xmin
;
2537 sfv
.XMaxViewPort
= viewport_Xmax
- 1;
2538 sfv
.YMinViewPort
= viewport_Ymin
;
2539 sfv
.YMaxViewPort
= viewport_Ymax
- 1;
2541 sfv
.XMinViewPort
= viewport_Xmin
;
2542 sfv
.XMaxViewPort
= viewport_Xmax
- 1;
2543 sfv
.YMinViewPort
= fb_height
- viewport_Ymax
;
2544 sfv
.YMaxViewPort
= fb_height
- viewport_Ymin
- 1;
2549 GENX(SF_CLIP_VIEWPORT_pack
)(NULL
, sf_clip_map
, &sfv
);
2550 sf_clip_map
+= GENX(SF_CLIP_VIEWPORT_length
);
2552 GENX(SF_VIEWPORT_pack
)(NULL
, sf_map
, &sfv
);
2553 GENX(CLIP_VIEWPORT_pack
)(NULL
, clip_map
, &clv
);
2554 sf_map
+= GENX(SF_VIEWPORT_length
);
2555 clip_map
+= GENX(CLIP_VIEWPORT_length
);
2560 brw_batch_emit(brw
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
), ptr
) {
2561 ptr
.SFClipViewportPointer
= sf_clip_vp_offset
;
2564 brw_batch_emit(brw
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS
), vp
) {
2565 vp
.SFViewportStateChange
= 1;
2566 vp
.CLIPViewportStateChange
= 1;
2567 vp
.PointertoCLIP_VIEWPORT
= clip_vp_offset
;
2568 vp
.PointertoSF_VIEWPORT
= sf_vp_offset
;
2571 brw
->sf
.vp_offset
= sf_vp_offset
;
2572 brw
->clip
.vp_offset
= clip_vp_offset
;
2573 brw
->ctx
.NewDriverState
|= BRW_NEW_SF_VP
| BRW_NEW_CLIP_VP
;
2577 static const struct brw_tracked_state
genX(sf_clip_viewport
) = {
2579 .mesa
= _NEW_BUFFERS
|
2581 (GEN_GEN
<= 5 ? _NEW_SCISSOR
: 0),
2582 .brw
= BRW_NEW_BATCH
|
2584 BRW_NEW_VIEWPORT_COUNT
,
2586 .emit
= genX(upload_sf_clip_viewport
),
2589 /* ---------------------------------------------------------------------- */
2592 genX(upload_gs_state
)(struct brw_context
*brw
)
2594 UNUSED
struct gl_context
*ctx
= &brw
->ctx
;
2595 UNUSED
const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
2596 const struct brw_stage_state
*stage_state
= &brw
->gs
.base
;
2597 const struct gl_program
*gs_prog
= brw
->programs
[MESA_SHADER_GEOMETRY
];
2598 /* BRW_NEW_GEOMETRY_PROGRAM */
2599 bool active
= GEN_GEN
>= 6 && gs_prog
;
2601 /* BRW_NEW_GS_PROG_DATA */
2602 struct brw_stage_prog_data
*stage_prog_data
= stage_state
->prog_data
;
2603 UNUSED
const struct brw_vue_prog_data
*vue_prog_data
=
2604 brw_vue_prog_data(stage_prog_data
);
2606 const struct brw_gs_prog_data
*gs_prog_data
=
2607 brw_gs_prog_data(stage_prog_data
);
2611 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_GS
), cgs
) {
2612 if (active
&& stage_state
->push_const_size
!= 0) {
2613 cgs
.Buffer0Valid
= true;
2614 cgs
.PointertoGSConstantBuffer0
= stage_state
->push_const_offset
;
2615 cgs
.GSConstantBuffer0ReadLength
= stage_state
->push_const_size
- 1;
2620 #if GEN_GEN == 7 && !GEN_IS_HASWELL
2622 * From Graphics BSpec: 3D-Media-GPGPU Engine > 3D Pipeline Stages >
2623 * Geometry > Geometry Shader > State:
2625 * "Note: Because of corruption in IVB:GT2, software needs to flush the
2626 * whole fixed function pipeline when the GS enable changes value in
2629 * The hardware architects have clarified that in this context "flush the
2630 * whole fixed function pipeline" means to emit a PIPE_CONTROL with the "CS
2633 if (devinfo
->gt
== 2 && brw
->gs
.enabled
!= active
)
2634 gen7_emit_cs_stall_flush(brw
);
2638 brw_batch_emit(brw
, GENX(3DSTATE_GS
), gs
) {
2640 ctx
->NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
2641 brw_state_emit(brw
, GENX(GS_STATE
), 32, &brw
->ff_gs
.state_offset
, gs
) {
2646 INIT_THREAD_DISPATCH_FIELDS(gs
, Vertex
);
2649 gs
.OutputVertexSize
= gs_prog_data
->output_vertex_size_hwords
* 2 - 1;
2650 gs
.OutputTopology
= gs_prog_data
->output_topology
;
2651 gs
.ControlDataHeaderSize
=
2652 gs_prog_data
->control_data_header_size_hwords
;
2654 gs
.InstanceControl
= gs_prog_data
->invocations
- 1;
2655 gs
.DispatchMode
= vue_prog_data
->dispatch_mode
;
2657 gs
.IncludePrimitiveID
= gs_prog_data
->include_primitive_id
;
2659 gs
.ControlDataFormat
= gs_prog_data
->control_data_format
;
2662 /* Note: the meaning of the GEN7_GS_REORDER_TRAILING bit changes between
2663 * Ivy Bridge and Haswell.
2665 * On Ivy Bridge, setting this bit causes the vertices of a triangle
2666 * strip to be delivered to the geometry shader in an order that does
2667 * not strictly follow the OpenGL spec, but preserves triangle
2668 * orientation. For example, if the vertices are (1, 2, 3, 4, 5), then
2669 * the geometry shader sees triangles:
2671 * (1, 2, 3), (2, 4, 3), (3, 4, 5)
2673 * (Clearing the bit is even worse, because it fails to preserve
2676 * Triangle strips with adjacency always ordered in a way that preserves
2677 * triangle orientation but does not strictly follow the OpenGL spec,
2678 * regardless of the setting of this bit.
2680 * On Haswell, both triangle strips and triangle strips with adjacency
2681 * are always ordered in a way that preserves triangle orientation.
2682 * Setting this bit causes the ordering to strictly follow the OpenGL
2685 * So in either case we want to set the bit. Unfortunately on Ivy
2686 * Bridge this will get the order close to correct but not perfect.
2688 gs
.ReorderMode
= TRAILING
;
2689 gs
.MaximumNumberofThreads
=
2690 GEN_GEN
== 8 ? (devinfo
->max_gs_threads
/ 2 - 1)
2691 : (devinfo
->max_gs_threads
- 1);
2694 gs
.SOStatisticsEnable
= true;
2695 if (gs_prog
->info
.has_transform_feedback_varyings
)
2696 gs
.SVBIPayloadEnable
= true;
2698 /* GEN6_GS_SPF_MODE and GEN6_GS_VECTOR_MASK_ENABLE are enabled as it
2699 * was previously done for gen6.
2701 * TODO: test with both disabled to see if the HW is behaving
2702 * as expected, like in gen7.
2704 gs
.SingleProgramFlow
= true;
2705 gs
.VectorMaskEnable
= true;
2709 gs
.ExpectedVertexCount
= gs_prog_data
->vertices_in
;
2711 if (gs_prog_data
->static_vertex_count
!= -1) {
2712 gs
.StaticOutput
= true;
2713 gs
.StaticOutputVertexCount
= gs_prog_data
->static_vertex_count
;
2715 gs
.IncludeVertexHandles
= vue_prog_data
->include_vue_handles
;
2717 gs
.UserClipDistanceCullTestEnableBitmask
=
2718 vue_prog_data
->cull_distance_mask
;
2720 const int urb_entry_write_offset
= 1;
2721 const uint32_t urb_entry_output_length
=
2722 DIV_ROUND_UP(vue_prog_data
->vue_map
.num_slots
, 2) -
2723 urb_entry_write_offset
;
2725 gs
.VertexURBEntryOutputReadOffset
= urb_entry_write_offset
;
2726 gs
.VertexURBEntryOutputLength
= MAX2(urb_entry_output_length
, 1);
2732 if (!active
&& brw
->ff_gs
.prog_active
) {
2733 /* In gen6, transform feedback for the VS stage is done with an
2734 * ad-hoc GS program. This function provides the needed 3DSTATE_GS
2737 gs
.KernelStartPointer
= KSP(brw
, brw
->ff_gs
.prog_offset
);
2738 gs
.SingleProgramFlow
= true;
2739 gs
.DispatchGRFStartRegisterForURBData
= GEN_GEN
== 6 ? 2 : 1;
2740 gs
.VertexURBEntryReadLength
= brw
->ff_gs
.prog_data
->urb_read_length
;
2743 gs
.GRFRegisterCount
=
2744 DIV_ROUND_UP(brw
->ff_gs
.prog_data
->total_grf
, 16) - 1;
2745 /* BRW_NEW_URB_FENCE */
2746 gs
.NumberofURBEntries
= brw
->urb
.nr_gs_entries
;
2747 gs
.URBEntryAllocationSize
= brw
->urb
.vsize
- 1;
2748 gs
.MaximumNumberofThreads
= brw
->urb
.nr_gs_entries
>= 8 ? 1 : 0;
2749 gs
.FloatingPointMode
= FLOATING_POINT_MODE_Alternate
;
2752 gs
.VectorMaskEnable
= true;
2753 gs
.SVBIPayloadEnable
= true;
2754 gs
.SVBIPostIncrementEnable
= true;
2755 gs
.SVBIPostIncrementValue
=
2756 brw
->ff_gs
.prog_data
->svbi_postincrement_value
;
2757 gs
.SOStatisticsEnable
= true;
2758 gs
.MaximumNumberofThreads
= devinfo
->max_gs_threads
- 1;
2762 if (!active
&& !brw
->ff_gs
.prog_active
) {
2764 gs
.DispatchGRFStartRegisterForURBData
= 1;
2766 gs
.IncludeVertexHandles
= true;
2772 gs
.StatisticsEnable
= true;
2774 #if GEN_GEN == 5 || GEN_GEN == 6
2775 gs
.RenderingEnabled
= true;
2778 gs
.MaximumVPIndex
= brw
->clip
.viewport_count
- 1;
2783 brw
->gs
.enabled
= active
;
2787 static const struct brw_tracked_state
genX(gs_state
) = {
2789 .mesa
= (GEN_GEN
== 6 ? _NEW_PROGRAM_CONSTANTS
: 0),
2790 .brw
= BRW_NEW_BATCH
|
2792 (GEN_GEN
<= 5 ? BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
2793 BRW_NEW_PROGRAM_CACHE
|
2795 BRW_NEW_VIEWPORT_COUNT
2797 (GEN_GEN
>= 6 ? BRW_NEW_CONTEXT
|
2798 BRW_NEW_GEOMETRY_PROGRAM
|
2799 BRW_NEW_GS_PROG_DATA
2801 (GEN_GEN
< 7 ? BRW_NEW_FF_GS_PROG_DATA
: 0),
2803 .emit
= genX(upload_gs_state
),
2806 /* ---------------------------------------------------------------------- */
2808 UNUSED
static GLenum
2809 fix_dual_blend_alpha_to_one(GLenum function
)
2815 case GL_ONE_MINUS_SRC1_ALPHA
:
2822 #define blend_factor(x) brw_translate_blend_factor(x)
2823 #define blend_eqn(x) brw_translate_blend_equation(x)
2826 * Modify blend function to force destination alpha to 1.0
2828 * If \c function specifies a blend function that uses destination alpha,
2829 * replace it with a function that hard-wires destination alpha to 1.0. This
2830 * is used when rendering to xRGB targets.
2833 brw_fix_xRGB_alpha(GLenum function
)
2839 case GL_ONE_MINUS_DST_ALPHA
:
2840 case GL_SRC_ALPHA_SATURATE
:
2848 typedef struct GENX(BLEND_STATE_ENTRY
) BLEND_ENTRY_GENXML
;
2850 typedef struct GENX(COLOR_CALC_STATE
) BLEND_ENTRY_GENXML
;
2854 set_blend_entry_bits(struct brw_context
*brw
, BLEND_ENTRY_GENXML
*entry
, int i
,
2857 struct gl_context
*ctx
= &brw
->ctx
;
2860 const struct gl_renderbuffer
*rb
= ctx
->DrawBuffer
->_ColorDrawBuffers
[i
];
2862 bool independent_alpha_blend
= false;
2864 /* Used for implementing the following bit of GL_EXT_texture_integer:
2865 * "Per-fragment operations that require floating-point color
2866 * components, including multisample alpha operations, alpha test,
2867 * blending, and dithering, have no effect when the corresponding
2868 * colors are written to an integer color buffer."
2870 const bool integer
= ctx
->DrawBuffer
->_IntegerBuffers
& (0x1 << i
);
2872 const unsigned blend_enabled
= GEN_GEN
>= 6 ?
2873 ctx
->Color
.BlendEnabled
& (1 << i
) : ctx
->Color
.BlendEnabled
;
2876 if (ctx
->Color
.ColorLogicOpEnabled
) {
2877 GLenum rb_type
= rb
? _mesa_get_format_datatype(rb
->Format
)
2878 : GL_UNSIGNED_NORMALIZED
;
2879 WARN_ONCE(ctx
->Color
.LogicOp
!= GL_COPY
&&
2880 rb_type
!= GL_UNSIGNED_NORMALIZED
&&
2881 rb_type
!= GL_FLOAT
, "Ignoring %s logic op on %s "
2883 _mesa_enum_to_string(ctx
->Color
.LogicOp
),
2884 _mesa_enum_to_string(rb_type
));
2885 if (GEN_GEN
>= 8 || rb_type
== GL_UNSIGNED_NORMALIZED
) {
2886 entry
->LogicOpEnable
= true;
2887 entry
->LogicOpFunction
= ctx
->Color
._LogicOp
;
2889 } else if (blend_enabled
&& !ctx
->Color
._AdvancedBlendMode
2890 && (GEN_GEN
<= 5 || !integer
)) {
2891 GLenum eqRGB
= ctx
->Color
.Blend
[i
].EquationRGB
;
2892 GLenum eqA
= ctx
->Color
.Blend
[i
].EquationA
;
2893 GLenum srcRGB
= ctx
->Color
.Blend
[i
].SrcRGB
;
2894 GLenum dstRGB
= ctx
->Color
.Blend
[i
].DstRGB
;
2895 GLenum srcA
= ctx
->Color
.Blend
[i
].SrcA
;
2896 GLenum dstA
= ctx
->Color
.Blend
[i
].DstA
;
2898 if (eqRGB
== GL_MIN
|| eqRGB
== GL_MAX
)
2899 srcRGB
= dstRGB
= GL_ONE
;
2901 if (eqA
== GL_MIN
|| eqA
== GL_MAX
)
2902 srcA
= dstA
= GL_ONE
;
2904 /* Due to hardware limitations, the destination may have information
2905 * in an alpha channel even when the format specifies no alpha
2906 * channel. In order to avoid getting any incorrect blending due to
2907 * that alpha channel, coerce the blend factors to values that will
2908 * not read the alpha channel, but will instead use the correct
2909 * implicit value for alpha.
2911 if (rb
&& !_mesa_base_format_has_channel(rb
->_BaseFormat
,
2912 GL_TEXTURE_ALPHA_TYPE
)) {
2913 srcRGB
= brw_fix_xRGB_alpha(srcRGB
);
2914 srcA
= brw_fix_xRGB_alpha(srcA
);
2915 dstRGB
= brw_fix_xRGB_alpha(dstRGB
);
2916 dstA
= brw_fix_xRGB_alpha(dstA
);
2919 /* From the BLEND_STATE docs, DWord 0, Bit 29 (AlphaToOne Enable):
2920 * "If Dual Source Blending is enabled, this bit must be disabled."
2922 * We override SRC1_ALPHA to ONE and ONE_MINUS_SRC1_ALPHA to ZERO,
2923 * and leave it enabled anyway.
2925 if (GEN_GEN
>= 6 && ctx
->Color
.Blend
[i
]._UsesDualSrc
&& alpha_to_one
) {
2926 srcRGB
= fix_dual_blend_alpha_to_one(srcRGB
);
2927 srcA
= fix_dual_blend_alpha_to_one(srcA
);
2928 dstRGB
= fix_dual_blend_alpha_to_one(dstRGB
);
2929 dstA
= fix_dual_blend_alpha_to_one(dstA
);
2932 entry
->ColorBufferBlendEnable
= true;
2933 entry
->DestinationBlendFactor
= blend_factor(dstRGB
);
2934 entry
->SourceBlendFactor
= blend_factor(srcRGB
);
2935 entry
->DestinationAlphaBlendFactor
= blend_factor(dstA
);
2936 entry
->SourceAlphaBlendFactor
= blend_factor(srcA
);
2937 entry
->ColorBlendFunction
= blend_eqn(eqRGB
);
2938 entry
->AlphaBlendFunction
= blend_eqn(eqA
);
2940 if (srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
)
2941 independent_alpha_blend
= true;
2944 return independent_alpha_blend
;
2949 genX(upload_blend_state
)(struct brw_context
*brw
)
2951 struct gl_context
*ctx
= &brw
->ctx
;
2954 /* We need at least one BLEND_STATE written, because we might do
2955 * thread dispatch even if _NumColorDrawBuffers is 0 (for example
2956 * for computed depth or alpha test), which will do an FB write
2957 * with render target 0, which will reference BLEND_STATE[0] for
2958 * alpha test enable.
2960 int nr_draw_buffers
= ctx
->DrawBuffer
->_NumColorDrawBuffers
;
2961 if (nr_draw_buffers
== 0 && ctx
->Color
.AlphaEnabled
)
2962 nr_draw_buffers
= 1;
2964 size
= GENX(BLEND_STATE_ENTRY_length
) * 4 * nr_draw_buffers
;
2966 size
+= GENX(BLEND_STATE_length
) * 4;
2969 uint32_t *blend_map
;
2970 blend_map
= brw_state_batch(brw
, size
, 64, &brw
->cc
.blend_state_offset
);
2973 struct GENX(BLEND_STATE
) blend
= { 0 };
2976 for (int i
= 0; i
< nr_draw_buffers
; i
++) {
2977 struct GENX(BLEND_STATE_ENTRY
) entry
= { 0 };
2980 /* OpenGL specification 3.3 (page 196), section 4.1.3 says:
2981 * "If drawbuffer zero is not NONE and the buffer it references has an
2982 * integer format, the SAMPLE_ALPHA_TO_COVERAGE and SAMPLE_ALPHA_TO_ONE
2983 * operations are skipped."
2985 if (!(ctx
->DrawBuffer
->_IntegerBuffers
& 0x1)) {
2986 /* _NEW_MULTISAMPLE */
2987 if (_mesa_is_multisample_enabled(ctx
)) {
2988 if (ctx
->Multisample
.SampleAlphaToCoverage
) {
2989 blend
.AlphaToCoverageEnable
= true;
2990 blend
.AlphaToCoverageDitherEnable
= GEN_GEN
>= 7;
2992 if (ctx
->Multisample
.SampleAlphaToOne
)
2993 blend
.AlphaToOneEnable
= true;
2997 if (ctx
->Color
.AlphaEnabled
) {
2998 blend
.AlphaTestEnable
= true;
2999 blend
.AlphaTestFunction
=
3000 intel_translate_compare_func(ctx
->Color
.AlphaFunc
);
3003 if (ctx
->Color
.DitherFlag
) {
3004 blend
.ColorDitherEnable
= true;
3009 for (int i
= 0; i
< nr_draw_buffers
; i
++) {
3010 struct GENX(BLEND_STATE_ENTRY
) entry
= { 0 };
3014 blend
.IndependentAlphaBlendEnable
=
3015 set_blend_entry_bits(brw
, &entry
, i
, blend
.AlphaToOneEnable
) ||
3016 blend
.IndependentAlphaBlendEnable
;
3018 /* See section 8.1.6 "Pre-Blend Color Clamping" of the
3019 * SandyBridge PRM Volume 2 Part 1 for HW requirements.
3021 * We do our ARB_color_buffer_float CLAMP_FRAGMENT_COLOR
3022 * clamping in the fragment shader. For its clamping of
3023 * blending, the spec says:
3025 * "RESOLVED: For fixed-point color buffers, the inputs and
3026 * the result of the blending equation are clamped. For
3027 * floating-point color buffers, no clamping occurs."
3029 * So, generally, we want clamping to the render target's range.
3030 * And, good news, the hardware tables for both pre- and
3031 * post-blend color clamping are either ignored, or any are
3032 * allowed, or clamping is required but RT range clamping is a
3035 entry
.PreBlendColorClampEnable
= true;
3036 entry
.PostBlendColorClampEnable
= true;
3037 entry
.ColorClampRange
= COLORCLAMP_RTFORMAT
;
3039 entry
.WriteDisableRed
= !GET_COLORMASK_BIT(ctx
->Color
.ColorMask
, i
, 0);
3040 entry
.WriteDisableGreen
= !GET_COLORMASK_BIT(ctx
->Color
.ColorMask
, i
, 1);
3041 entry
.WriteDisableBlue
= !GET_COLORMASK_BIT(ctx
->Color
.ColorMask
, i
, 2);
3042 entry
.WriteDisableAlpha
= !GET_COLORMASK_BIT(ctx
->Color
.ColorMask
, i
, 3);
3045 GENX(BLEND_STATE_ENTRY_pack
)(NULL
, &blend_map
[1 + i
* 2], &entry
);
3047 GENX(BLEND_STATE_ENTRY_pack
)(NULL
, &blend_map
[i
* 2], &entry
);
3053 GENX(BLEND_STATE_pack
)(NULL
, blend_map
, &blend
);
3057 brw_batch_emit(brw
, GENX(3DSTATE_CC_STATE_POINTERS
), ptr
) {
3058 ptr
.PointertoBLEND_STATE
= brw
->cc
.blend_state_offset
;
3059 ptr
.BLEND_STATEChange
= true;
3062 brw_batch_emit(brw
, GENX(3DSTATE_BLEND_STATE_POINTERS
), ptr
) {
3063 ptr
.BlendStatePointer
= brw
->cc
.blend_state_offset
;
3065 ptr
.BlendStatePointerValid
= true;
3071 static const struct brw_tracked_state
genX(blend_state
) = {
3073 .mesa
= _NEW_BUFFERS
|
3076 .brw
= BRW_NEW_BATCH
|
3078 BRW_NEW_STATE_BASE_ADDRESS
,
3080 .emit
= genX(upload_blend_state
),
3084 /* ---------------------------------------------------------------------- */
3087 UNUSED
static const uint32_t push_constant_opcodes
[] = {
3088 [MESA_SHADER_VERTEX
] = 21,
3089 [MESA_SHADER_TESS_CTRL
] = 25, /* HS */
3090 [MESA_SHADER_TESS_EVAL
] = 26, /* DS */
3091 [MESA_SHADER_GEOMETRY
] = 22,
3092 [MESA_SHADER_FRAGMENT
] = 23,
3093 [MESA_SHADER_COMPUTE
] = 0,
3097 genX(upload_push_constant_packets
)(struct brw_context
*brw
)
3099 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
3100 struct gl_context
*ctx
= &brw
->ctx
;
3102 UNUSED
uint32_t mocs
= GEN_GEN
< 8 ? GEN7_MOCS_L3
: 0;
3104 struct brw_stage_state
*stage_states
[] = {
3112 if (GEN_GEN
== 7 && !GEN_IS_HASWELL
&& !devinfo
->is_baytrail
&&
3113 stage_states
[MESA_SHADER_VERTEX
]->push_constants_dirty
)
3114 gen7_emit_vs_workaround_flush(brw
);
3116 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
3117 struct brw_stage_state
*stage_state
= stage_states
[stage
];
3118 UNUSED
struct gl_program
*prog
= ctx
->_Shader
->CurrentProgram
[stage
];
3120 if (!stage_state
->push_constants_dirty
)
3123 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_VS
), pkt
) {
3124 pkt
._3DCommandSubOpcode
= push_constant_opcodes
[stage
];
3125 if (stage_state
->prog_data
) {
3126 #if GEN_GEN >= 8 || GEN_IS_HASWELL
3127 /* The Skylake PRM contains the following restriction:
3129 * "The driver must ensure The following case does not occur
3130 * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
3131 * buffer 3 read length equal to zero committed followed by a
3132 * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
3135 * To avoid this, we program the buffers in the highest slots.
3136 * This way, slot 0 is only used if slot 3 is also used.
3140 for (int i
= 3; i
>= 0; i
--) {
3141 const struct brw_ubo_range
*range
=
3142 &stage_state
->prog_data
->ubo_ranges
[i
];
3144 if (range
->length
== 0)
3147 const struct gl_uniform_block
*block
=
3148 prog
->sh
.UniformBlocks
[range
->block
];
3149 const struct gl_buffer_binding
*binding
=
3150 &ctx
->UniformBufferBindings
[block
->Binding
];
3152 if (binding
->BufferObject
== ctx
->Shared
->NullBufferObj
) {
3153 static unsigned msg_id
= 0;
3154 _mesa_gl_debug(ctx
, &msg_id
, MESA_DEBUG_SOURCE_API
,
3155 MESA_DEBUG_TYPE_UNDEFINED
,
3156 MESA_DEBUG_SEVERITY_HIGH
,
3157 "UBO %d unbound, %s shader uniform data "
3158 "will be undefined.",
3160 _mesa_shader_stage_to_string(stage
));
3164 assert(binding
->Offset
% 32 == 0);
3166 struct brw_bo
*bo
= intel_bufferobj_buffer(brw
,
3167 intel_buffer_object(binding
->BufferObject
),
3168 binding
->Offset
, range
->length
* 32, false);
3170 pkt
.ConstantBody
.ReadLength
[n
] = range
->length
;
3171 pkt
.ConstantBody
.Buffer
[n
] =
3172 ro_bo(bo
, range
->start
* 32 + binding
->Offset
);
3176 if (stage_state
->push_const_size
> 0) {
3178 pkt
.ConstantBody
.ReadLength
[n
] = stage_state
->push_const_size
;
3179 pkt
.ConstantBody
.Buffer
[n
] =
3180 ro_bo(stage_state
->push_const_bo
,
3181 stage_state
->push_const_offset
);
3184 pkt
.ConstantBody
.ReadLength
[0] = stage_state
->push_const_size
;
3185 pkt
.ConstantBody
.Buffer
[0].offset
=
3186 stage_state
->push_const_offset
| mocs
;
3191 stage_state
->push_constants_dirty
= false;
3192 brw
->ctx
.NewDriverState
|= GEN_GEN
>= 9 ? BRW_NEW_SURFACES
: 0;
3196 const struct brw_tracked_state
genX(push_constant_packets
) = {
3199 .brw
= BRW_NEW_DRAW_CALL
,
3201 .emit
= genX(upload_push_constant_packets
),
3207 genX(upload_vs_push_constants
)(struct brw_context
*brw
)
3209 struct brw_stage_state
*stage_state
= &brw
->vs
.base
;
3211 /* BRW_NEW_VERTEX_PROGRAM */
3212 const struct gl_program
*vp
= brw
->programs
[MESA_SHADER_VERTEX
];
3213 /* BRW_NEW_VS_PROG_DATA */
3214 const struct brw_stage_prog_data
*prog_data
= brw
->vs
.base
.prog_data
;
3216 gen6_upload_push_constants(brw
, vp
, prog_data
, stage_state
);
3219 static const struct brw_tracked_state
genX(vs_push_constants
) = {
3221 .mesa
= _NEW_PROGRAM_CONSTANTS
|
3223 .brw
= BRW_NEW_BATCH
|
3225 BRW_NEW_VERTEX_PROGRAM
|
3226 BRW_NEW_VS_PROG_DATA
,
3228 .emit
= genX(upload_vs_push_constants
),
3232 genX(upload_gs_push_constants
)(struct brw_context
*brw
)
3234 struct brw_stage_state
*stage_state
= &brw
->gs
.base
;
3236 /* BRW_NEW_GEOMETRY_PROGRAM */
3237 const struct gl_program
*gp
= brw
->programs
[MESA_SHADER_GEOMETRY
];
3239 /* BRW_NEW_GS_PROG_DATA */
3240 struct brw_stage_prog_data
*prog_data
= brw
->gs
.base
.prog_data
;
3242 gen6_upload_push_constants(brw
, gp
, prog_data
, stage_state
);
3245 static const struct brw_tracked_state
genX(gs_push_constants
) = {
3247 .mesa
= _NEW_PROGRAM_CONSTANTS
|
3249 .brw
= BRW_NEW_BATCH
|
3251 BRW_NEW_GEOMETRY_PROGRAM
|
3252 BRW_NEW_GS_PROG_DATA
,
3254 .emit
= genX(upload_gs_push_constants
),
3258 genX(upload_wm_push_constants
)(struct brw_context
*brw
)
3260 struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
3261 /* BRW_NEW_FRAGMENT_PROGRAM */
3262 const struct gl_program
*fp
= brw
->programs
[MESA_SHADER_FRAGMENT
];
3263 /* BRW_NEW_FS_PROG_DATA */
3264 const struct brw_stage_prog_data
*prog_data
= brw
->wm
.base
.prog_data
;
3266 gen6_upload_push_constants(brw
, fp
, prog_data
, stage_state
);
3269 static const struct brw_tracked_state
genX(wm_push_constants
) = {
3271 .mesa
= _NEW_PROGRAM_CONSTANTS
,
3272 .brw
= BRW_NEW_BATCH
|
3274 BRW_NEW_FRAGMENT_PROGRAM
|
3275 BRW_NEW_FS_PROG_DATA
,
3277 .emit
= genX(upload_wm_push_constants
),
3281 /* ---------------------------------------------------------------------- */
3285 genX(determine_sample_mask
)(struct brw_context
*brw
)
3287 struct gl_context
*ctx
= &brw
->ctx
;
3288 float coverage
= 1.0f
;
3289 float coverage_invert
= false;
3290 unsigned sample_mask
= ~0u;
3292 /* BRW_NEW_NUM_SAMPLES */
3293 unsigned num_samples
= brw
->num_samples
;
3295 if (_mesa_is_multisample_enabled(ctx
)) {
3296 if (ctx
->Multisample
.SampleCoverage
) {
3297 coverage
= ctx
->Multisample
.SampleCoverageValue
;
3298 coverage_invert
= ctx
->Multisample
.SampleCoverageInvert
;
3300 if (ctx
->Multisample
.SampleMask
) {
3301 sample_mask
= ctx
->Multisample
.SampleMaskValue
;
3305 if (num_samples
> 1) {
3306 int coverage_int
= (int) (num_samples
* coverage
+ 0.5f
);
3307 uint32_t coverage_bits
= (1 << coverage_int
) - 1;
3308 if (coverage_invert
)
3309 coverage_bits
^= (1 << num_samples
) - 1;
3310 return coverage_bits
& sample_mask
;
3317 genX(emit_3dstate_multisample2
)(struct brw_context
*brw
,
3318 unsigned num_samples
)
3320 unsigned log2_samples
= ffs(num_samples
) - 1;
3322 brw_batch_emit(brw
, GENX(3DSTATE_MULTISAMPLE
), multi
) {
3323 multi
.PixelLocation
= CENTER
;
3324 multi
.NumberofMultisamples
= log2_samples
;
3326 GEN_SAMPLE_POS_4X(multi
.Sample
);
3328 switch (num_samples
) {
3330 GEN_SAMPLE_POS_1X(multi
.Sample
);
3333 GEN_SAMPLE_POS_2X(multi
.Sample
);
3336 GEN_SAMPLE_POS_4X(multi
.Sample
);
3339 GEN_SAMPLE_POS_8X(multi
.Sample
);
3349 genX(upload_multisample_state
)(struct brw_context
*brw
)
3351 assert(brw
->num_samples
> 0 && brw
->num_samples
<= 16);
3353 genX(emit_3dstate_multisample2
)(brw
, brw
->num_samples
);
3355 brw_batch_emit(brw
, GENX(3DSTATE_SAMPLE_MASK
), sm
) {
3356 sm
.SampleMask
= genX(determine_sample_mask
)(brw
);
3360 static const struct brw_tracked_state
genX(multisample_state
) = {
3362 .mesa
= _NEW_MULTISAMPLE
|
3363 (GEN_GEN
== 10 ? _NEW_BUFFERS
: 0),
3364 .brw
= BRW_NEW_BLORP
|
3366 BRW_NEW_NUM_SAMPLES
,
3368 .emit
= genX(upload_multisample_state
)
3372 /* ---------------------------------------------------------------------- */
3375 genX(upload_color_calc_state
)(struct brw_context
*brw
)
3377 struct gl_context
*ctx
= &brw
->ctx
;
3379 brw_state_emit(brw
, GENX(COLOR_CALC_STATE
), 64, &brw
->cc
.state_offset
, cc
) {
3381 cc
.IndependentAlphaBlendEnable
=
3382 set_blend_entry_bits(brw
, &cc
, 0, false);
3383 set_depth_stencil_bits(brw
, &cc
);
3385 if (ctx
->Color
.AlphaEnabled
&&
3386 ctx
->DrawBuffer
->_NumColorDrawBuffers
<= 1) {
3387 cc
.AlphaTestEnable
= true;
3388 cc
.AlphaTestFunction
=
3389 intel_translate_compare_func(ctx
->Color
.AlphaFunc
);
3392 cc
.ColorDitherEnable
= ctx
->Color
.DitherFlag
;
3394 cc
.StatisticsEnable
= brw
->stats_wm
;
3396 cc
.CCViewportStatePointer
=
3397 ro_bo(brw
->batch
.state
.bo
, brw
->cc
.vp_offset
);
3400 cc
.BlendConstantColorRed
= ctx
->Color
.BlendColorUnclamped
[0];
3401 cc
.BlendConstantColorGreen
= ctx
->Color
.BlendColorUnclamped
[1];
3402 cc
.BlendConstantColorBlue
= ctx
->Color
.BlendColorUnclamped
[2];
3403 cc
.BlendConstantColorAlpha
= ctx
->Color
.BlendColorUnclamped
[3];
3407 cc
.StencilReferenceValue
= _mesa_get_stencil_ref(ctx
, 0);
3408 cc
.BackfaceStencilReferenceValue
=
3409 _mesa_get_stencil_ref(ctx
, ctx
->Stencil
._BackFace
);
3415 UNCLAMPED_FLOAT_TO_UBYTE(cc
.AlphaReferenceValueAsUNORM8
,
3416 ctx
->Color
.AlphaRef
);
3420 brw_batch_emit(brw
, GENX(3DSTATE_CC_STATE_POINTERS
), ptr
) {
3421 ptr
.ColorCalcStatePointer
= brw
->cc
.state_offset
;
3423 ptr
.ColorCalcStatePointerValid
= true;
3427 brw
->ctx
.NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
3431 static const struct brw_tracked_state
genX(color_calc_state
) = {
3433 .mesa
= _NEW_COLOR
|
3435 (GEN_GEN
<= 5 ? _NEW_BUFFERS
|
3438 .brw
= BRW_NEW_BATCH
|
3440 (GEN_GEN
<= 5 ? BRW_NEW_CC_VP
|
3442 : BRW_NEW_CC_STATE
|
3443 BRW_NEW_STATE_BASE_ADDRESS
),
3445 .emit
= genX(upload_color_calc_state
),
3449 /* ---------------------------------------------------------------------- */
3453 genX(upload_sbe
)(struct brw_context
*brw
)
3455 struct gl_context
*ctx
= &brw
->ctx
;
3456 /* BRW_NEW_FRAGMENT_PROGRAM */
3457 UNUSED
const struct gl_program
*fp
= brw
->programs
[MESA_SHADER_FRAGMENT
];
3458 /* BRW_NEW_FS_PROG_DATA */
3459 const struct brw_wm_prog_data
*wm_prog_data
=
3460 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
3462 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) attr_overrides
[16] = { { 0 } };
3464 #define attr_overrides sbe.Attribute
3466 uint32_t urb_entry_read_length
;
3467 uint32_t urb_entry_read_offset
;
3468 uint32_t point_sprite_enables
;
3470 brw_batch_emit(brw
, GENX(3DSTATE_SBE
), sbe
) {
3471 sbe
.AttributeSwizzleEnable
= true;
3472 sbe
.NumberofSFOutputAttributes
= wm_prog_data
->num_varying_inputs
;
3475 bool render_to_fbo
= _mesa_is_user_fbo(ctx
->DrawBuffer
);
3479 * Window coordinates in an FBO are inverted, which means point
3480 * sprite origin must be inverted.
3482 if ((ctx
->Point
.SpriteOrigin
== GL_LOWER_LEFT
) != render_to_fbo
)
3483 sbe
.PointSpriteTextureCoordinateOrigin
= LOWERLEFT
;
3485 sbe
.PointSpriteTextureCoordinateOrigin
= UPPERLEFT
;
3487 /* _NEW_POINT | _NEW_LIGHT | _NEW_PROGRAM,
3488 * BRW_NEW_FS_PROG_DATA | BRW_NEW_FRAGMENT_PROGRAM |
3489 * BRW_NEW_GS_PROG_DATA | BRW_NEW_PRIMITIVE | BRW_NEW_TES_PROG_DATA |
3490 * BRW_NEW_VUE_MAP_GEOM_OUT
3492 genX(calculate_attr_overrides
)(brw
,
3494 &point_sprite_enables
,
3495 &urb_entry_read_length
,
3496 &urb_entry_read_offset
);
3498 /* Typically, the URB entry read length and offset should be programmed
3499 * in 3DSTATE_VS and 3DSTATE_GS; SBE inherits it from the last active
3500 * stage which produces geometry. However, we don't know the proper
3501 * value until we call calculate_attr_overrides().
3503 * To fit with our existing code, we override the inherited values and
3504 * specify it here directly, as we did on previous generations.
3506 sbe
.VertexURBEntryReadLength
= urb_entry_read_length
;
3507 sbe
.VertexURBEntryReadOffset
= urb_entry_read_offset
;
3508 sbe
.PointSpriteTextureCoordinateEnable
= point_sprite_enables
;
3509 sbe
.ConstantInterpolationEnable
= wm_prog_data
->flat_inputs
;
3512 sbe
.ForceVertexURBEntryReadLength
= true;
3513 sbe
.ForceVertexURBEntryReadOffset
= true;
3517 /* prepare the active component dwords */
3518 for (int i
= 0; i
< 32; i
++)
3519 sbe
.AttributeActiveComponentFormat
[i
] = ACTIVE_COMPONENT_XYZW
;
3524 brw_batch_emit(brw
, GENX(3DSTATE_SBE_SWIZ
), sbes
) {
3525 for (int i
= 0; i
< 16; i
++)
3526 sbes
.Attribute
[i
] = attr_overrides
[i
];
3530 #undef attr_overrides
3533 static const struct brw_tracked_state
genX(sbe_state
) = {
3535 .mesa
= _NEW_BUFFERS
|
3540 .brw
= BRW_NEW_BLORP
|
3542 BRW_NEW_FRAGMENT_PROGRAM
|
3543 BRW_NEW_FS_PROG_DATA
|
3544 BRW_NEW_GS_PROG_DATA
|
3545 BRW_NEW_TES_PROG_DATA
|
3546 BRW_NEW_VUE_MAP_GEOM_OUT
|
3547 (GEN_GEN
== 7 ? BRW_NEW_PRIMITIVE
3550 .emit
= genX(upload_sbe
),
3554 /* ---------------------------------------------------------------------- */
3558 * Outputs the 3DSTATE_SO_DECL_LIST command.
3560 * The data output is a series of 64-bit entries containing a SO_DECL per
3561 * stream. We only have one stream of rendering coming out of the GS unit, so
3562 * we only emit stream 0 (low 16 bits) SO_DECLs.
3565 genX(upload_3dstate_so_decl_list
)(struct brw_context
*brw
,
3566 const struct brw_vue_map
*vue_map
)
3568 struct gl_context
*ctx
= &brw
->ctx
;
3569 /* BRW_NEW_TRANSFORM_FEEDBACK */
3570 struct gl_transform_feedback_object
*xfb_obj
=
3571 ctx
->TransformFeedback
.CurrentObject
;
3572 const struct gl_transform_feedback_info
*linked_xfb_info
=
3573 xfb_obj
->program
->sh
.LinkedTransformFeedback
;
3574 struct GENX(SO_DECL
) so_decl
[MAX_VERTEX_STREAMS
][128];
3575 int buffer_mask
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
3576 int next_offset
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
3577 int decls
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
3579 STATIC_ASSERT(ARRAY_SIZE(so_decl
[0]) >= MAX_PROGRAM_OUTPUTS
);
3581 memset(so_decl
, 0, sizeof(so_decl
));
3583 /* Construct the list of SO_DECLs to be emitted. The formatting of the
3584 * command feels strange -- each dword pair contains a SO_DECL per stream.
3586 for (unsigned i
= 0; i
< linked_xfb_info
->NumOutputs
; i
++) {
3587 const struct gl_transform_feedback_output
*output
=
3588 &linked_xfb_info
->Outputs
[i
];
3589 const int buffer
= output
->OutputBuffer
;
3590 const int varying
= output
->OutputRegister
;
3591 const unsigned stream_id
= output
->StreamId
;
3592 assert(stream_id
< MAX_VERTEX_STREAMS
);
3594 buffer_mask
[stream_id
] |= 1 << buffer
;
3596 assert(vue_map
->varying_to_slot
[varying
] >= 0);
3598 /* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
3599 * array. Instead, it simply increments DstOffset for the following
3600 * input by the number of components that should be skipped.
3602 * Our hardware is unusual in that it requires us to program SO_DECLs
3603 * for fake "hole" components, rather than simply taking the offset
3604 * for each real varying. Each hole can have size 1, 2, 3, or 4; we
3605 * program as many size = 4 holes as we can, then a final hole to
3606 * accommodate the final 1, 2, or 3 remaining.
3608 int skip_components
= output
->DstOffset
- next_offset
[buffer
];
3610 while (skip_components
> 0) {
3611 so_decl
[stream_id
][decls
[stream_id
]++] = (struct GENX(SO_DECL
)) {
3613 .OutputBufferSlot
= output
->OutputBuffer
,
3614 .ComponentMask
= (1 << MIN2(skip_components
, 4)) - 1,
3616 skip_components
-= 4;
3619 next_offset
[buffer
] = output
->DstOffset
+ output
->NumComponents
;
3621 so_decl
[stream_id
][decls
[stream_id
]++] = (struct GENX(SO_DECL
)) {
3622 .OutputBufferSlot
= output
->OutputBuffer
,
3623 .RegisterIndex
= vue_map
->varying_to_slot
[varying
],
3625 ((1 << output
->NumComponents
) - 1) << output
->ComponentOffset
,
3628 if (decls
[stream_id
] > max_decls
)
3629 max_decls
= decls
[stream_id
];
3633 dw
= brw_batch_emitn(brw
, GENX(3DSTATE_SO_DECL_LIST
), 3 + 2 * max_decls
,
3634 .StreamtoBufferSelects0
= buffer_mask
[0],
3635 .StreamtoBufferSelects1
= buffer_mask
[1],
3636 .StreamtoBufferSelects2
= buffer_mask
[2],
3637 .StreamtoBufferSelects3
= buffer_mask
[3],
3638 .NumEntries0
= decls
[0],
3639 .NumEntries1
= decls
[1],
3640 .NumEntries2
= decls
[2],
3641 .NumEntries3
= decls
[3]);
3643 for (int i
= 0; i
< max_decls
; i
++) {
3644 GENX(SO_DECL_ENTRY_pack
)(
3645 brw
, dw
+ 2 + i
* 2,
3646 &(struct GENX(SO_DECL_ENTRY
)) {
3647 .Stream0Decl
= so_decl
[0][i
],
3648 .Stream1Decl
= so_decl
[1][i
],
3649 .Stream2Decl
= so_decl
[2][i
],
3650 .Stream3Decl
= so_decl
[3][i
],
3656 genX(upload_3dstate_so_buffers
)(struct brw_context
*brw
)
3658 struct gl_context
*ctx
= &brw
->ctx
;
3659 /* BRW_NEW_TRANSFORM_FEEDBACK */
3660 struct gl_transform_feedback_object
*xfb_obj
=
3661 ctx
->TransformFeedback
.CurrentObject
;
3663 const struct gl_transform_feedback_info
*linked_xfb_info
=
3664 xfb_obj
->program
->sh
.LinkedTransformFeedback
;
3666 struct brw_transform_feedback_object
*brw_obj
=
3667 (struct brw_transform_feedback_object
*) xfb_obj
;
3668 uint32_t mocs_wb
= GEN_GEN
>= 9 ? SKL_MOCS_WB
: BDW_MOCS_WB
;
3671 /* Set up the up to 4 output buffers. These are the ranges defined in the
3672 * gl_transform_feedback_object.
3674 for (int i
= 0; i
< 4; i
++) {
3675 struct intel_buffer_object
*bufferobj
=
3676 intel_buffer_object(xfb_obj
->Buffers
[i
]);
3679 brw_batch_emit(brw
, GENX(3DSTATE_SO_BUFFER
), sob
) {
3680 sob
.SOBufferIndex
= i
;
3685 uint32_t start
= xfb_obj
->Offset
[i
];
3686 assert(start
% 4 == 0);
3687 uint32_t end
= ALIGN(start
+ xfb_obj
->Size
[i
], 4);
3689 intel_bufferobj_buffer(brw
, bufferobj
, start
, end
- start
, true);
3690 assert(end
<= bo
->size
);
3692 brw_batch_emit(brw
, GENX(3DSTATE_SO_BUFFER
), sob
) {
3693 sob
.SOBufferIndex
= i
;
3695 sob
.SurfaceBaseAddress
= rw_bo(bo
, start
);
3697 sob
.SurfacePitch
= linked_xfb_info
->Buffers
[i
].Stride
* 4;
3698 sob
.SurfaceEndAddress
= rw_bo(bo
, end
);
3700 sob
.SOBufferEnable
= true;
3701 sob
.StreamOffsetWriteEnable
= true;
3702 sob
.StreamOutputBufferOffsetAddressEnable
= true;
3703 sob
.SOBufferMOCS
= mocs_wb
;
3705 sob
.SurfaceSize
= MAX2(xfb_obj
->Size
[i
] / 4, 1) - 1;
3706 sob
.StreamOutputBufferOffsetAddress
=
3707 rw_bo(brw_obj
->offset_bo
, i
* sizeof(uint32_t));
3709 if (brw_obj
->zero_offsets
) {
3710 /* Zero out the offset and write that to offset_bo */
3711 sob
.StreamOffset
= 0;
3713 /* Use offset_bo as the "Stream Offset." */
3714 sob
.StreamOffset
= 0xFFFFFFFF;
3721 brw_obj
->zero_offsets
= false;
3726 query_active(struct gl_query_object
*q
)
3728 return q
&& q
->Active
;
3732 genX(upload_3dstate_streamout
)(struct brw_context
*brw
, bool active
,
3733 const struct brw_vue_map
*vue_map
)
3735 struct gl_context
*ctx
= &brw
->ctx
;
3736 /* BRW_NEW_TRANSFORM_FEEDBACK */
3737 struct gl_transform_feedback_object
*xfb_obj
=
3738 ctx
->TransformFeedback
.CurrentObject
;
3740 brw_batch_emit(brw
, GENX(3DSTATE_STREAMOUT
), sos
) {
3742 int urb_entry_read_offset
= 0;
3743 int urb_entry_read_length
= (vue_map
->num_slots
+ 1) / 2 -
3744 urb_entry_read_offset
;
3746 sos
.SOFunctionEnable
= true;
3747 sos
.SOStatisticsEnable
= true;
3749 /* BRW_NEW_RASTERIZER_DISCARD */
3750 if (ctx
->RasterDiscard
) {
3751 if (!query_active(ctx
->Query
.PrimitivesGenerated
[0])) {
3752 sos
.RenderingDisable
= true;
3754 perf_debug("Rasterizer discard with a GL_PRIMITIVES_GENERATED "
3755 "query active relies on the clipper.\n");
3760 if (ctx
->Light
.ProvokingVertex
!= GL_FIRST_VERTEX_CONVENTION
)
3761 sos
.ReorderMode
= TRAILING
;
3764 sos
.SOBufferEnable0
= xfb_obj
->Buffers
[0] != NULL
;
3765 sos
.SOBufferEnable1
= xfb_obj
->Buffers
[1] != NULL
;
3766 sos
.SOBufferEnable2
= xfb_obj
->Buffers
[2] != NULL
;
3767 sos
.SOBufferEnable3
= xfb_obj
->Buffers
[3] != NULL
;
3769 const struct gl_transform_feedback_info
*linked_xfb_info
=
3770 xfb_obj
->program
->sh
.LinkedTransformFeedback
;
3771 /* Set buffer pitches; 0 means unbound. */
3772 if (xfb_obj
->Buffers
[0])
3773 sos
.Buffer0SurfacePitch
= linked_xfb_info
->Buffers
[0].Stride
* 4;
3774 if (xfb_obj
->Buffers
[1])
3775 sos
.Buffer1SurfacePitch
= linked_xfb_info
->Buffers
[1].Stride
* 4;
3776 if (xfb_obj
->Buffers
[2])
3777 sos
.Buffer2SurfacePitch
= linked_xfb_info
->Buffers
[2].Stride
* 4;
3778 if (xfb_obj
->Buffers
[3])
3779 sos
.Buffer3SurfacePitch
= linked_xfb_info
->Buffers
[3].Stride
* 4;
3782 /* We always read the whole vertex. This could be reduced at some
3783 * point by reading less and offsetting the register index in the
3786 sos
.Stream0VertexReadOffset
= urb_entry_read_offset
;
3787 sos
.Stream0VertexReadLength
= urb_entry_read_length
- 1;
3788 sos
.Stream1VertexReadOffset
= urb_entry_read_offset
;
3789 sos
.Stream1VertexReadLength
= urb_entry_read_length
- 1;
3790 sos
.Stream2VertexReadOffset
= urb_entry_read_offset
;
3791 sos
.Stream2VertexReadLength
= urb_entry_read_length
- 1;
3792 sos
.Stream3VertexReadOffset
= urb_entry_read_offset
;
3793 sos
.Stream3VertexReadLength
= urb_entry_read_length
- 1;
3799 genX(upload_sol
)(struct brw_context
*brw
)
3801 struct gl_context
*ctx
= &brw
->ctx
;
3802 /* BRW_NEW_TRANSFORM_FEEDBACK */
3803 bool active
= _mesa_is_xfb_active_and_unpaused(ctx
);
3806 genX(upload_3dstate_so_buffers
)(brw
);
3808 /* BRW_NEW_VUE_MAP_GEOM_OUT */
3809 genX(upload_3dstate_so_decl_list
)(brw
, &brw
->vue_map_geom_out
);
3812 /* Finally, set up the SOL stage. This command must always follow updates to
3813 * the nonpipelined SOL state (3DSTATE_SO_BUFFER, 3DSTATE_SO_DECL_LIST) or
3814 * MMIO register updates (current performed by the kernel at each batch
3817 genX(upload_3dstate_streamout
)(brw
, active
, &brw
->vue_map_geom_out
);
3820 static const struct brw_tracked_state
genX(sol_state
) = {
3823 .brw
= BRW_NEW_BATCH
|
3825 BRW_NEW_RASTERIZER_DISCARD
|
3826 BRW_NEW_VUE_MAP_GEOM_OUT
|
3827 BRW_NEW_TRANSFORM_FEEDBACK
,
3829 .emit
= genX(upload_sol
),
3833 /* ---------------------------------------------------------------------- */
3837 genX(upload_ps
)(struct brw_context
*brw
)
3839 UNUSED
const struct gl_context
*ctx
= &brw
->ctx
;
3840 UNUSED
const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
3842 /* BRW_NEW_FS_PROG_DATA */
3843 const struct brw_wm_prog_data
*prog_data
=
3844 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
3845 const struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
3850 brw_batch_emit(brw
, GENX(3DSTATE_PS
), ps
) {
3851 /* Initialize the execution mask with VMask. Otherwise, derivatives are
3852 * incorrect for subspans where some of the pixels are unlit. We believe
3853 * the bit just didn't take effect in previous generations.
3855 ps
.VectorMaskEnable
= GEN_GEN
>= 8;
3858 DIV_ROUND_UP(CLAMP(stage_state
->sampler_count
, 0, 16), 4);
3860 /* BRW_NEW_FS_PROG_DATA */
3861 ps
.BindingTableEntryCount
= prog_data
->base
.binding_table
.size_bytes
/ 4;
3863 if (prog_data
->base
.use_alt_mode
)
3864 ps
.FloatingPointMode
= Alternate
;
3866 /* Haswell requires the sample mask to be set in this packet as well as
3867 * in 3DSTATE_SAMPLE_MASK; the values should match.
3870 /* _NEW_BUFFERS, _NEW_MULTISAMPLE */
3872 ps
.SampleMask
= genX(determine_sample_mask(brw
));
3875 /* 3DSTATE_PS expects the number of threads per PSD, which is always 64
3876 * for pre Gen11 and 128 for gen11+; On gen11+ If a programmed value is
3877 * k, it implies 2(k+1) threads. It implicitly scales for different GT
3878 * levels (which have some # of PSDs).
3880 * In Gen8 the format is U8-2 whereas in Gen9+ it is U9-1.
3883 ps
.MaximumNumberofThreadsPerPSD
= 64 - 1;
3885 ps
.MaximumNumberofThreadsPerPSD
= 64 - 2;
3887 ps
.MaximumNumberofThreads
= devinfo
->max_wm_threads
- 1;
3890 if (prog_data
->base
.nr_params
> 0 ||
3891 prog_data
->base
.ubo_ranges
[0].length
> 0)
3892 ps
.PushConstantEnable
= true;
3895 /* From the IVB PRM, volume 2 part 1, page 287:
3896 * "This bit is inserted in the PS payload header and made available to
3897 * the DataPort (either via the message header or via header bypass) to
3898 * indicate that oMask data (one or two phases) is included in Render
3899 * Target Write messages. If present, the oMask data is used to mask off
3902 ps
.oMaskPresenttoRenderTarget
= prog_data
->uses_omask
;
3904 /* The hardware wedges if you have this bit set but don't turn on any
3905 * dual source blend factors.
3907 * BRW_NEW_FS_PROG_DATA | _NEW_COLOR
3909 ps
.DualSourceBlendEnable
= prog_data
->dual_src_blend
&&
3910 (ctx
->Color
.BlendEnabled
& 1) &&
3911 ctx
->Color
.Blend
[0]._UsesDualSrc
;
3913 /* BRW_NEW_FS_PROG_DATA */
3914 ps
.AttributeEnable
= (prog_data
->num_varying_inputs
!= 0);
3917 /* From the documentation for this packet:
3918 * "If the PS kernel does not need the Position XY Offsets to
3919 * compute a Position Value, then this field should be programmed
3920 * to POSOFFSET_NONE."
3922 * "SW Recommendation: If the PS kernel needs the Position Offsets
3923 * to compute a Position XY value, this field should match Position
3924 * ZW Interpolation Mode to ensure a consistent position.xyzw
3927 * We only require XY sample offsets. So, this recommendation doesn't
3928 * look useful at the moment. We might need this in future.
3930 if (prog_data
->uses_pos_offset
)
3931 ps
.PositionXYOffsetSelect
= POSOFFSET_SAMPLE
;
3933 ps
.PositionXYOffsetSelect
= POSOFFSET_NONE
;
3935 ps
._8PixelDispatchEnable
= prog_data
->dispatch_8
;
3936 ps
._16PixelDispatchEnable
= prog_data
->dispatch_16
;
3937 ps
.DispatchGRFStartRegisterForConstantSetupData0
=
3938 prog_data
->base
.dispatch_grf_start_reg
;
3939 ps
.DispatchGRFStartRegisterForConstantSetupData2
=
3940 prog_data
->dispatch_grf_start_reg_2
;
3942 ps
.KernelStartPointer0
= stage_state
->prog_offset
;
3943 ps
.KernelStartPointer2
= stage_state
->prog_offset
+
3944 prog_data
->prog_offset_2
;
3946 if (prog_data
->base
.total_scratch
) {
3947 ps
.ScratchSpaceBasePointer
=
3948 rw_32_bo(stage_state
->scratch_bo
,
3949 ffs(stage_state
->per_thread_scratch
) - 11);
3954 static const struct brw_tracked_state
genX(ps_state
) = {
3956 .mesa
= _NEW_MULTISAMPLE
|
3957 (GEN_GEN
< 8 ? _NEW_BUFFERS
|
3960 .brw
= BRW_NEW_BATCH
|
3962 BRW_NEW_FS_PROG_DATA
,
3964 .emit
= genX(upload_ps
),
3968 /* ---------------------------------------------------------------------- */
3972 genX(upload_hs_state
)(struct brw_context
*brw
)
3974 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
3975 struct brw_stage_state
*stage_state
= &brw
->tcs
.base
;
3976 struct brw_stage_prog_data
*stage_prog_data
= stage_state
->prog_data
;
3977 const struct brw_vue_prog_data
*vue_prog_data
=
3978 brw_vue_prog_data(stage_prog_data
);
3980 /* BRW_NEW_TES_PROG_DATA */
3981 struct brw_tcs_prog_data
*tcs_prog_data
=
3982 brw_tcs_prog_data(stage_prog_data
);
3984 if (!tcs_prog_data
) {
3985 brw_batch_emit(brw
, GENX(3DSTATE_HS
), hs
);
3987 brw_batch_emit(brw
, GENX(3DSTATE_HS
), hs
) {
3988 INIT_THREAD_DISPATCH_FIELDS(hs
, Vertex
);
3990 hs
.InstanceCount
= tcs_prog_data
->instances
- 1;
3991 hs
.IncludeVertexHandles
= true;
3993 hs
.MaximumNumberofThreads
= devinfo
->max_tcs_threads
- 1;
3998 static const struct brw_tracked_state
genX(hs_state
) = {
4001 .brw
= BRW_NEW_BATCH
|
4003 BRW_NEW_TCS_PROG_DATA
|
4004 BRW_NEW_TESS_PROGRAMS
,
4006 .emit
= genX(upload_hs_state
),
4010 genX(upload_ds_state
)(struct brw_context
*brw
)
4012 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
4013 const struct brw_stage_state
*stage_state
= &brw
->tes
.base
;
4014 struct brw_stage_prog_data
*stage_prog_data
= stage_state
->prog_data
;
4016 /* BRW_NEW_TES_PROG_DATA */
4017 const struct brw_tes_prog_data
*tes_prog_data
=
4018 brw_tes_prog_data(stage_prog_data
);
4019 const struct brw_vue_prog_data
*vue_prog_data
=
4020 brw_vue_prog_data(stage_prog_data
);
4022 if (!tes_prog_data
) {
4023 brw_batch_emit(brw
, GENX(3DSTATE_DS
), ds
);
4025 assert(GEN_GEN
< 11 ||
4026 vue_prog_data
->dispatch_mode
== DISPATCH_MODE_SIMD8
);
4028 brw_batch_emit(brw
, GENX(3DSTATE_DS
), ds
) {
4029 INIT_THREAD_DISPATCH_FIELDS(ds
, Patch
);
4031 ds
.MaximumNumberofThreads
= devinfo
->max_tes_threads
- 1;
4032 ds
.ComputeWCoordinateEnable
=
4033 tes_prog_data
->domain
== BRW_TESS_DOMAIN_TRI
;
4036 if (vue_prog_data
->dispatch_mode
== DISPATCH_MODE_SIMD8
)
4037 ds
.DispatchMode
= DISPATCH_MODE_SIMD8_SINGLE_PATCH
;
4038 ds
.UserClipDistanceCullTestEnableBitmask
=
4039 vue_prog_data
->cull_distance_mask
;
4045 static const struct brw_tracked_state
genX(ds_state
) = {
4048 .brw
= BRW_NEW_BATCH
|
4050 BRW_NEW_TESS_PROGRAMS
|
4051 BRW_NEW_TES_PROG_DATA
,
4053 .emit
= genX(upload_ds_state
),
4056 /* ---------------------------------------------------------------------- */
4059 upload_te_state(struct brw_context
*brw
)
4061 /* BRW_NEW_TESS_PROGRAMS */
4062 bool active
= brw
->programs
[MESA_SHADER_TESS_EVAL
];
4064 /* BRW_NEW_TES_PROG_DATA */
4065 const struct brw_tes_prog_data
*tes_prog_data
=
4066 brw_tes_prog_data(brw
->tes
.base
.prog_data
);
4069 brw_batch_emit(brw
, GENX(3DSTATE_TE
), te
) {
4070 te
.Partitioning
= tes_prog_data
->partitioning
;
4071 te
.OutputTopology
= tes_prog_data
->output_topology
;
4072 te
.TEDomain
= tes_prog_data
->domain
;
4074 te
.MaximumTessellationFactorOdd
= 63.0;
4075 te
.MaximumTessellationFactorNotOdd
= 64.0;
4078 brw_batch_emit(brw
, GENX(3DSTATE_TE
), te
);
4082 static const struct brw_tracked_state
genX(te_state
) = {
4085 .brw
= BRW_NEW_BLORP
|
4087 BRW_NEW_TES_PROG_DATA
|
4088 BRW_NEW_TESS_PROGRAMS
,
4090 .emit
= upload_te_state
,
4093 /* ---------------------------------------------------------------------- */
4096 genX(upload_tes_push_constants
)(struct brw_context
*brw
)
4098 struct brw_stage_state
*stage_state
= &brw
->tes
.base
;
4099 /* BRW_NEW_TESS_PROGRAMS */
4100 const struct gl_program
*tep
= brw
->programs
[MESA_SHADER_TESS_EVAL
];
4102 /* BRW_NEW_TES_PROG_DATA */
4103 const struct brw_stage_prog_data
*prog_data
= brw
->tes
.base
.prog_data
;
4104 gen6_upload_push_constants(brw
, tep
, prog_data
, stage_state
);
4107 static const struct brw_tracked_state
genX(tes_push_constants
) = {
4109 .mesa
= _NEW_PROGRAM_CONSTANTS
,
4110 .brw
= BRW_NEW_BATCH
|
4112 BRW_NEW_TESS_PROGRAMS
|
4113 BRW_NEW_TES_PROG_DATA
,
4115 .emit
= genX(upload_tes_push_constants
),
4119 genX(upload_tcs_push_constants
)(struct brw_context
*brw
)
4121 struct brw_stage_state
*stage_state
= &brw
->tcs
.base
;
4122 /* BRW_NEW_TESS_PROGRAMS */
4123 const struct gl_program
*tcp
= brw
->programs
[MESA_SHADER_TESS_CTRL
];
4125 /* BRW_NEW_TCS_PROG_DATA */
4126 const struct brw_stage_prog_data
*prog_data
= brw
->tcs
.base
.prog_data
;
4128 gen6_upload_push_constants(brw
, tcp
, prog_data
, stage_state
);
4131 static const struct brw_tracked_state
genX(tcs_push_constants
) = {
4133 .mesa
= _NEW_PROGRAM_CONSTANTS
,
4134 .brw
= BRW_NEW_BATCH
|
4136 BRW_NEW_DEFAULT_TESS_LEVELS
|
4137 BRW_NEW_TESS_PROGRAMS
|
4138 BRW_NEW_TCS_PROG_DATA
,
4140 .emit
= genX(upload_tcs_push_constants
),
4145 /* ---------------------------------------------------------------------- */
4149 genX(upload_cs_push_constants
)(struct brw_context
*brw
)
4151 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
4153 /* BRW_NEW_COMPUTE_PROGRAM */
4154 const struct gl_program
*cp
= brw
->programs
[MESA_SHADER_COMPUTE
];
4157 /* BRW_NEW_CS_PROG_DATA */
4158 struct brw_cs_prog_data
*cs_prog_data
=
4159 brw_cs_prog_data(brw
->cs
.base
.prog_data
);
4161 _mesa_shader_write_subroutine_indices(&brw
->ctx
, MESA_SHADER_COMPUTE
);
4162 brw_upload_cs_push_constants(brw
, cp
, cs_prog_data
, stage_state
);
4166 const struct brw_tracked_state
genX(cs_push_constants
) = {
4168 .mesa
= _NEW_PROGRAM_CONSTANTS
,
4169 .brw
= BRW_NEW_BATCH
|
4171 BRW_NEW_COMPUTE_PROGRAM
|
4172 BRW_NEW_CS_PROG_DATA
,
4174 .emit
= genX(upload_cs_push_constants
),
4178 * Creates a new CS constant buffer reflecting the current CS program's
4179 * constants, if needed by the CS program.
4182 genX(upload_cs_pull_constants
)(struct brw_context
*brw
)
4184 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
4186 /* BRW_NEW_COMPUTE_PROGRAM */
4187 struct brw_program
*cp
=
4188 (struct brw_program
*) brw
->programs
[MESA_SHADER_COMPUTE
];
4190 /* BRW_NEW_CS_PROG_DATA */
4191 const struct brw_stage_prog_data
*prog_data
= brw
->cs
.base
.prog_data
;
4193 _mesa_shader_write_subroutine_indices(&brw
->ctx
, MESA_SHADER_COMPUTE
);
4194 /* _NEW_PROGRAM_CONSTANTS */
4195 brw_upload_pull_constants(brw
, BRW_NEW_SURFACES
, &cp
->program
,
4196 stage_state
, prog_data
);
4199 const struct brw_tracked_state
genX(cs_pull_constants
) = {
4201 .mesa
= _NEW_PROGRAM_CONSTANTS
,
4202 .brw
= BRW_NEW_BATCH
|
4204 BRW_NEW_COMPUTE_PROGRAM
|
4205 BRW_NEW_CS_PROG_DATA
,
4207 .emit
= genX(upload_cs_pull_constants
),
4211 genX(upload_cs_state
)(struct brw_context
*brw
)
4213 if (!brw
->cs
.base
.prog_data
)
4217 uint32_t *desc
= (uint32_t*) brw_state_batch(
4218 brw
, GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t), 64,
4221 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
4222 struct brw_stage_prog_data
*prog_data
= stage_state
->prog_data
;
4223 struct brw_cs_prog_data
*cs_prog_data
= brw_cs_prog_data(prog_data
);
4224 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
4226 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
4227 brw_emit_buffer_surface_state(
4228 brw
, &stage_state
->surf_offset
[
4229 prog_data
->binding_table
.shader_time_start
],
4230 brw
->shader_time
.bo
, 0, ISL_FORMAT_RAW
,
4231 brw
->shader_time
.bo
->size
, 1,
4235 uint32_t *bind
= brw_state_batch(brw
, prog_data
->binding_table
.size_bytes
,
4236 32, &stage_state
->bind_bo_offset
);
4238 /* The MEDIA_VFE_STATE documentation for Gen8+ says:
4240 * "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
4241 * the only bits that are changed are scoreboard related: Scoreboard
4242 * Enable, Scoreboard Type, Scoreboard Mask, Scoreboard * Delta. For
4243 * these scoreboard related states, a MEDIA_STATE_FLUSH is sufficient."
4245 * Earlier generations say "MI_FLUSH" instead of "stalling PIPE_CONTROL",
4246 * but MI_FLUSH isn't really a thing, so we assume they meant PIPE_CONTROL.
4248 brw_emit_pipe_control_flush(brw
, PIPE_CONTROL_CS_STALL
);
4250 brw_batch_emit(brw
, GENX(MEDIA_VFE_STATE
), vfe
) {
4251 if (prog_data
->total_scratch
) {
4252 uint32_t per_thread_scratch_value
;
4255 /* Broadwell's Per Thread Scratch Space is in the range [0, 11]
4256 * where 0 = 1k, 1 = 2k, 2 = 4k, ..., 11 = 2M.
4258 per_thread_scratch_value
= ffs(stage_state
->per_thread_scratch
) - 11;
4259 } else if (GEN_IS_HASWELL
) {
4260 /* Haswell's Per Thread Scratch Space is in the range [0, 10]
4261 * where 0 = 2k, 1 = 4k, 2 = 8k, ..., 10 = 2M.
4263 per_thread_scratch_value
= ffs(stage_state
->per_thread_scratch
) - 12;
4265 /* Earlier platforms use the range [0, 11] to mean [1kB, 12kB]
4266 * where 0 = 1kB, 1 = 2kB, 2 = 3kB, ..., 11 = 12kB.
4268 per_thread_scratch_value
= stage_state
->per_thread_scratch
/ 1024 - 1;
4270 vfe
.ScratchSpaceBasePointer
= rw_32_bo(stage_state
->scratch_bo
, 0);
4271 vfe
.PerThreadScratchSpace
= per_thread_scratch_value
;
4274 /* If brw->screen->subslice_total is greater than one, then
4275 * devinfo->max_cs_threads stores number of threads per sub-slice;
4276 * thus we need to multiply by that number by subslices to get
4277 * the actual maximum number of threads; the -1 is because the HW
4278 * has a bias of 1 (would not make sense to say the maximum number
4281 const uint32_t subslices
= MAX2(brw
->screen
->subslice_total
, 1);
4282 vfe
.MaximumNumberofThreads
= devinfo
->max_cs_threads
* subslices
- 1;
4283 vfe
.NumberofURBEntries
= GEN_GEN
>= 8 ? 2 : 0;
4285 vfe
.ResetGatewayTimer
=
4286 Resettingrelativetimerandlatchingtheglobaltimestamp
;
4289 vfe
.BypassGatewayControl
= BypassingOpenGatewayCloseGatewayprotocol
;
4295 /* We are uploading duplicated copies of push constant uniforms for each
4296 * thread. Although the local id data needs to vary per thread, it won't
4297 * change for other uniform data. Unfortunately this duplication is
4298 * required for gen7. As of Haswell, this duplication can be avoided,
4299 * but this older mechanism with duplicated data continues to work.
4301 * FINISHME: As of Haswell, we could make use of the
4302 * INTERFACE_DESCRIPTOR_DATA "Cross-Thread Constant Data Read Length"
4303 * field to only store one copy of uniform data.
4305 * FINISHME: Broadwell adds a new alternative "Indirect Payload Storage"
4306 * which is described in the GPGPU_WALKER command and in the Broadwell
4307 * PRM Volume 7: 3D Media GPGPU, under Media GPGPU Pipeline => Mode of
4308 * Operations => GPGPU Mode => Indirect Payload Storage.
4310 * Note: The constant data is built in brw_upload_cs_push_constants
4313 vfe
.URBEntryAllocationSize
= GEN_GEN
>= 8 ? 2 : 0;
4315 const uint32_t vfe_curbe_allocation
=
4316 ALIGN(cs_prog_data
->push
.per_thread
.regs
* cs_prog_data
->threads
+
4317 cs_prog_data
->push
.cross_thread
.regs
, 2);
4318 vfe
.CURBEAllocationSize
= vfe_curbe_allocation
;
4321 if (cs_prog_data
->push
.total
.size
> 0) {
4322 brw_batch_emit(brw
, GENX(MEDIA_CURBE_LOAD
), curbe
) {
4323 curbe
.CURBETotalDataLength
=
4324 ALIGN(cs_prog_data
->push
.total
.size
, 64);
4325 curbe
.CURBEDataStartAddress
= stage_state
->push_const_offset
;
4329 /* BRW_NEW_SURFACES and BRW_NEW_*_CONSTBUF */
4330 memcpy(bind
, stage_state
->surf_offset
,
4331 prog_data
->binding_table
.size_bytes
);
4332 const struct GENX(INTERFACE_DESCRIPTOR_DATA
) idd
= {
4333 .KernelStartPointer
= brw
->cs
.base
.prog_offset
,
4334 .SamplerStatePointer
= stage_state
->sampler_offset
,
4335 .SamplerCount
= DIV_ROUND_UP(CLAMP(stage_state
->sampler_count
, 0, 16), 4),
4336 .BindingTablePointer
= stage_state
->bind_bo_offset
,
4337 .ConstantURBEntryReadLength
= cs_prog_data
->push
.per_thread
.regs
,
4338 .NumberofThreadsinGPGPUThreadGroup
= cs_prog_data
->threads
,
4339 .SharedLocalMemorySize
= encode_slm_size(GEN_GEN
,
4340 prog_data
->total_shared
),
4341 .BarrierEnable
= cs_prog_data
->uses_barrier
,
4342 #if GEN_GEN >= 8 || GEN_IS_HASWELL
4343 .CrossThreadConstantDataReadLength
=
4344 cs_prog_data
->push
.cross_thread
.regs
,
4348 GENX(INTERFACE_DESCRIPTOR_DATA_pack
)(brw
, desc
, &idd
);
4350 brw_batch_emit(brw
, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
), load
) {
4351 load
.InterfaceDescriptorTotalLength
=
4352 GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
4353 load
.InterfaceDescriptorDataStartAddress
= offset
;
4357 static const struct brw_tracked_state
genX(cs_state
) = {
4359 .mesa
= _NEW_PROGRAM_CONSTANTS
,
4360 .brw
= BRW_NEW_BATCH
|
4362 BRW_NEW_CS_PROG_DATA
|
4363 BRW_NEW_SAMPLER_STATE_TABLE
|
4366 .emit
= genX(upload_cs_state
)
4371 /* ---------------------------------------------------------------------- */
4375 genX(upload_raster
)(struct brw_context
*brw
)
4377 const struct gl_context
*ctx
= &brw
->ctx
;
4380 const bool render_to_fbo
= _mesa_is_user_fbo(ctx
->DrawBuffer
);
4383 const struct gl_polygon_attrib
*polygon
= &ctx
->Polygon
;
4386 const struct gl_point_attrib
*point
= &ctx
->Point
;
4388 brw_batch_emit(brw
, GENX(3DSTATE_RASTER
), raster
) {
4389 if (brw
->polygon_front_bit
== render_to_fbo
)
4390 raster
.FrontWinding
= CounterClockwise
;
4392 if (polygon
->CullFlag
) {
4393 switch (polygon
->CullFaceMode
) {
4395 raster
.CullMode
= CULLMODE_FRONT
;
4398 raster
.CullMode
= CULLMODE_BACK
;
4400 case GL_FRONT_AND_BACK
:
4401 raster
.CullMode
= CULLMODE_BOTH
;
4404 unreachable("not reached");
4407 raster
.CullMode
= CULLMODE_NONE
;
4410 raster
.SmoothPointEnable
= point
->SmoothFlag
;
4412 raster
.DXMultisampleRasterizationEnable
=
4413 _mesa_is_multisample_enabled(ctx
);
4415 raster
.GlobalDepthOffsetEnableSolid
= polygon
->OffsetFill
;
4416 raster
.GlobalDepthOffsetEnableWireframe
= polygon
->OffsetLine
;
4417 raster
.GlobalDepthOffsetEnablePoint
= polygon
->OffsetPoint
;
4419 switch (polygon
->FrontMode
) {
4421 raster
.FrontFaceFillMode
= FILL_MODE_SOLID
;
4424 raster
.FrontFaceFillMode
= FILL_MODE_WIREFRAME
;
4427 raster
.FrontFaceFillMode
= FILL_MODE_POINT
;
4430 unreachable("not reached");
4433 switch (polygon
->BackMode
) {
4435 raster
.BackFaceFillMode
= FILL_MODE_SOLID
;
4438 raster
.BackFaceFillMode
= FILL_MODE_WIREFRAME
;
4441 raster
.BackFaceFillMode
= FILL_MODE_POINT
;
4444 unreachable("not reached");
4448 raster
.AntialiasingEnable
= ctx
->Line
.SmoothFlag
;
4452 * Antialiasing Enable bit MUST not be set when NUM_MULTISAMPLES > 1.
4454 const bool multisampled_fbo
=
4455 _mesa_geometric_samples(ctx
->DrawBuffer
) > 1;
4456 if (multisampled_fbo
)
4457 raster
.AntialiasingEnable
= false;
4461 raster
.ScissorRectangleEnable
= ctx
->Scissor
.EnableFlags
;
4463 /* _NEW_TRANSFORM */
4464 if (!ctx
->Transform
.DepthClamp
) {
4466 raster
.ViewportZFarClipTestEnable
= true;
4467 raster
.ViewportZNearClipTestEnable
= true;
4469 raster
.ViewportZClipTestEnable
= true;
4473 /* BRW_NEW_CONSERVATIVE_RASTERIZATION */
4475 raster
.ConservativeRasterizationEnable
=
4476 ctx
->IntelConservativeRasterization
;
4479 raster
.GlobalDepthOffsetClamp
= polygon
->OffsetClamp
;
4480 raster
.GlobalDepthOffsetScale
= polygon
->OffsetFactor
;
4482 raster
.GlobalDepthOffsetConstant
= polygon
->OffsetUnits
* 2;
4486 static const struct brw_tracked_state
genX(raster_state
) = {
4488 .mesa
= _NEW_BUFFERS
|
4495 .brw
= BRW_NEW_BLORP
|
4497 BRW_NEW_CONSERVATIVE_RASTERIZATION
,
4499 .emit
= genX(upload_raster
),
4503 /* ---------------------------------------------------------------------- */
4507 genX(upload_ps_extra
)(struct brw_context
*brw
)
4509 UNUSED
struct gl_context
*ctx
= &brw
->ctx
;
4511 const struct brw_wm_prog_data
*prog_data
=
4512 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
4514 brw_batch_emit(brw
, GENX(3DSTATE_PS_EXTRA
), psx
) {
4515 psx
.PixelShaderValid
= true;
4516 psx
.PixelShaderComputedDepthMode
= prog_data
->computed_depth_mode
;
4517 psx
.PixelShaderKillsPixel
= prog_data
->uses_kill
;
4518 psx
.AttributeEnable
= prog_data
->num_varying_inputs
!= 0;
4519 psx
.PixelShaderUsesSourceDepth
= prog_data
->uses_src_depth
;
4520 psx
.PixelShaderUsesSourceW
= prog_data
->uses_src_w
;
4521 psx
.PixelShaderIsPerSample
= prog_data
->persample_dispatch
;
4523 /* _NEW_MULTISAMPLE | BRW_NEW_CONSERVATIVE_RASTERIZATION */
4524 if (prog_data
->uses_sample_mask
) {
4526 if (prog_data
->post_depth_coverage
)
4527 psx
.InputCoverageMaskState
= ICMS_DEPTH_COVERAGE
;
4528 else if (prog_data
->inner_coverage
&& ctx
->IntelConservativeRasterization
)
4529 psx
.InputCoverageMaskState
= ICMS_INNER_CONSERVATIVE
;
4531 psx
.InputCoverageMaskState
= ICMS_NORMAL
;
4533 psx
.PixelShaderUsesInputCoverageMask
= true;
4537 psx
.oMaskPresenttoRenderTarget
= prog_data
->uses_omask
;
4539 psx
.PixelShaderPullsBary
= prog_data
->pulls_bary
;
4540 psx
.PixelShaderComputesStencil
= prog_data
->computed_stencil
;
4543 /* The stricter cross-primitive coherency guarantees that the hardware
4544 * gives us with the "Accesses UAV" bit set for at least one shader stage
4545 * and the "UAV coherency required" bit set on the 3DPRIMITIVE command
4546 * are redundant within the current image, atomic counter and SSBO GL
4547 * APIs, which all have very loose ordering and coherency requirements
4548 * and generally rely on the application to insert explicit barriers when
4549 * a shader invocation is expected to see the memory writes performed by
4550 * the invocations of some previous primitive. Regardless of the value
4551 * of "UAV coherency required", the "Accesses UAV" bits will implicitly
4552 * cause an in most cases useless DC flush when the lowermost stage with
4553 * the bit set finishes execution.
4555 * It would be nice to disable it, but in some cases we can't because on
4556 * Gen8+ it also has an influence on rasterization via the PS UAV-only
4557 * signal (which could be set independently from the coherency mechanism
4558 * in the 3DSTATE_WM command on Gen7), and because in some cases it will
4559 * determine whether the hardware skips execution of the fragment shader
4560 * or not via the ThreadDispatchEnable signal. However if we know that
4561 * GEN8_PS_BLEND_HAS_WRITEABLE_RT is going to be set and
4562 * GEN8_PSX_PIXEL_SHADER_NO_RT_WRITE is not set it shouldn't make any
4563 * difference so we may just disable it here.
4565 * Gen8 hardware tries to compute ThreadDispatchEnable for us but doesn't
4566 * take into account KillPixels when no depth or stencil writes are
4567 * enabled. In order for occlusion queries to work correctly with no
4568 * attachments, we need to force-enable here.
4570 * BRW_NEW_FS_PROG_DATA | BRW_NEW_FRAGMENT_PROGRAM | _NEW_BUFFERS |
4573 if ((prog_data
->has_side_effects
|| prog_data
->uses_kill
) &&
4574 !brw_color_buffer_write_enabled(brw
))
4575 psx
.PixelShaderHasUAV
= true;
4579 const struct brw_tracked_state
genX(ps_extra
) = {
4581 .mesa
= _NEW_BUFFERS
| _NEW_COLOR
,
4582 .brw
= BRW_NEW_BLORP
|
4584 BRW_NEW_FRAGMENT_PROGRAM
|
4585 BRW_NEW_FS_PROG_DATA
|
4586 BRW_NEW_CONSERVATIVE_RASTERIZATION
,
4588 .emit
= genX(upload_ps_extra
),
4592 /* ---------------------------------------------------------------------- */
4596 genX(upload_ps_blend
)(struct brw_context
*brw
)
4598 struct gl_context
*ctx
= &brw
->ctx
;
4601 struct gl_renderbuffer
*rb
= ctx
->DrawBuffer
->_ColorDrawBuffers
[0];
4602 const bool buffer0_is_integer
= ctx
->DrawBuffer
->_IntegerBuffers
& 0x1;
4605 struct gl_colorbuffer_attrib
*color
= &ctx
->Color
;
4607 brw_batch_emit(brw
, GENX(3DSTATE_PS_BLEND
), pb
) {
4608 /* BRW_NEW_FRAGMENT_PROGRAM | _NEW_BUFFERS | _NEW_COLOR */
4609 pb
.HasWriteableRT
= brw_color_buffer_write_enabled(brw
);
4611 bool alpha_to_one
= false;
4613 if (!buffer0_is_integer
) {
4614 /* _NEW_MULTISAMPLE */
4616 if (_mesa_is_multisample_enabled(ctx
)) {
4617 pb
.AlphaToCoverageEnable
= ctx
->Multisample
.SampleAlphaToCoverage
;
4618 alpha_to_one
= ctx
->Multisample
.SampleAlphaToOne
;
4621 pb
.AlphaTestEnable
= color
->AlphaEnabled
;
4624 /* Used for implementing the following bit of GL_EXT_texture_integer:
4625 * "Per-fragment operations that require floating-point color
4626 * components, including multisample alpha operations, alpha test,
4627 * blending, and dithering, have no effect when the corresponding
4628 * colors are written to an integer color buffer."
4630 * The OpenGL specification 3.3 (page 196), section 4.1.3 says:
4631 * "If drawbuffer zero is not NONE and the buffer it references has an
4632 * integer format, the SAMPLE_ALPHA_TO_COVERAGE and SAMPLE_ALPHA_TO_ONE
4633 * operations are skipped."
4635 if (rb
&& !buffer0_is_integer
&& (color
->BlendEnabled
& 1)) {
4636 GLenum eqRGB
= color
->Blend
[0].EquationRGB
;
4637 GLenum eqA
= color
->Blend
[0].EquationA
;
4638 GLenum srcRGB
= color
->Blend
[0].SrcRGB
;
4639 GLenum dstRGB
= color
->Blend
[0].DstRGB
;
4640 GLenum srcA
= color
->Blend
[0].SrcA
;
4641 GLenum dstA
= color
->Blend
[0].DstA
;
4643 if (eqRGB
== GL_MIN
|| eqRGB
== GL_MAX
)
4644 srcRGB
= dstRGB
= GL_ONE
;
4646 if (eqA
== GL_MIN
|| eqA
== GL_MAX
)
4647 srcA
= dstA
= GL_ONE
;
4649 /* Due to hardware limitations, the destination may have information
4650 * in an alpha channel even when the format specifies no alpha
4651 * channel. In order to avoid getting any incorrect blending due to
4652 * that alpha channel, coerce the blend factors to values that will
4653 * not read the alpha channel, but will instead use the correct
4654 * implicit value for alpha.
4656 if (!_mesa_base_format_has_channel(rb
->_BaseFormat
,
4657 GL_TEXTURE_ALPHA_TYPE
)) {
4658 srcRGB
= brw_fix_xRGB_alpha(srcRGB
);
4659 srcA
= brw_fix_xRGB_alpha(srcA
);
4660 dstRGB
= brw_fix_xRGB_alpha(dstRGB
);
4661 dstA
= brw_fix_xRGB_alpha(dstA
);
4664 /* Alpha to One doesn't work with Dual Color Blending. Override
4665 * SRC1_ALPHA to ONE and ONE_MINUS_SRC1_ALPHA to ZERO.
4667 if (alpha_to_one
&& color
->Blend
[0]._UsesDualSrc
) {
4668 srcRGB
= fix_dual_blend_alpha_to_one(srcRGB
);
4669 srcA
= fix_dual_blend_alpha_to_one(srcA
);
4670 dstRGB
= fix_dual_blend_alpha_to_one(dstRGB
);
4671 dstA
= fix_dual_blend_alpha_to_one(dstA
);
4674 pb
.ColorBufferBlendEnable
= true;
4675 pb
.SourceAlphaBlendFactor
= brw_translate_blend_factor(srcA
);
4676 pb
.DestinationAlphaBlendFactor
= brw_translate_blend_factor(dstA
);
4677 pb
.SourceBlendFactor
= brw_translate_blend_factor(srcRGB
);
4678 pb
.DestinationBlendFactor
= brw_translate_blend_factor(dstRGB
);
4680 pb
.IndependentAlphaBlendEnable
=
4681 srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
;
4686 static const struct brw_tracked_state
genX(ps_blend
) = {
4688 .mesa
= _NEW_BUFFERS
|
4691 .brw
= BRW_NEW_BLORP
|
4693 BRW_NEW_FRAGMENT_PROGRAM
,
4695 .emit
= genX(upload_ps_blend
)
4699 /* ---------------------------------------------------------------------- */
4703 genX(emit_vf_topology
)(struct brw_context
*brw
)
4705 brw_batch_emit(brw
, GENX(3DSTATE_VF_TOPOLOGY
), vftopo
) {
4706 vftopo
.PrimitiveTopologyType
= brw
->primitive
;
4710 static const struct brw_tracked_state
genX(vf_topology
) = {
4713 .brw
= BRW_NEW_BLORP
|
4716 .emit
= genX(emit_vf_topology
),
4720 /* ---------------------------------------------------------------------- */
4724 genX(emit_mi_report_perf_count
)(struct brw_context
*brw
,
4726 uint32_t offset_in_bytes
,
4729 brw_batch_emit(brw
, GENX(MI_REPORT_PERF_COUNT
), mi_rpc
) {
4730 mi_rpc
.MemoryAddress
= ggtt_bo(bo
, offset_in_bytes
);
4731 mi_rpc
.ReportID
= report_id
;
4736 /* ---------------------------------------------------------------------- */
4739 * Emit a 3DSTATE_SAMPLER_STATE_POINTERS_{VS,HS,GS,DS,PS} packet.
4742 genX(emit_sampler_state_pointers_xs
)(MAYBE_UNUSED
struct brw_context
*brw
,
4743 MAYBE_UNUSED
struct brw_stage_state
*stage_state
)
4746 static const uint16_t packet_headers
[] = {
4747 [MESA_SHADER_VERTEX
] = 43,
4748 [MESA_SHADER_TESS_CTRL
] = 44,
4749 [MESA_SHADER_TESS_EVAL
] = 45,
4750 [MESA_SHADER_GEOMETRY
] = 46,
4751 [MESA_SHADER_FRAGMENT
] = 47,
4754 /* Ivybridge requires a workaround flush before VS packets. */
4755 if (GEN_GEN
== 7 && !GEN_IS_HASWELL
&&
4756 stage_state
->stage
== MESA_SHADER_VERTEX
) {
4757 gen7_emit_vs_workaround_flush(brw
);
4760 brw_batch_emit(brw
, GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS
), ptr
) {
4761 ptr
._3DCommandSubOpcode
= packet_headers
[stage_state
->stage
];
4762 ptr
.PointertoVSSamplerState
= stage_state
->sampler_offset
;
4768 has_component(mesa_format format
, int i
)
4770 if (_mesa_is_format_color_format(format
))
4771 return _mesa_format_has_color_component(format
, i
);
4773 /* depth and stencil have only one component */
4778 * Upload SAMPLER_BORDER_COLOR_STATE.
4781 genX(upload_default_color
)(struct brw_context
*brw
,
4782 const struct gl_sampler_object
*sampler
,
4783 MAYBE_UNUSED mesa_format format
, GLenum base_format
,
4784 bool is_integer_format
, bool is_stencil_sampling
,
4785 uint32_t *sdc_offset
)
4787 union gl_color_union color
;
4789 switch (base_format
) {
4790 case GL_DEPTH_COMPONENT
:
4791 /* GL specs that border color for depth textures is taken from the
4792 * R channel, while the hardware uses A. Spam R into all the
4793 * channels for safety.
4795 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4796 color
.ui
[1] = sampler
->BorderColor
.ui
[0];
4797 color
.ui
[2] = sampler
->BorderColor
.ui
[0];
4798 color
.ui
[3] = sampler
->BorderColor
.ui
[0];
4804 color
.ui
[3] = sampler
->BorderColor
.ui
[3];
4807 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4808 color
.ui
[1] = sampler
->BorderColor
.ui
[0];
4809 color
.ui
[2] = sampler
->BorderColor
.ui
[0];
4810 color
.ui
[3] = sampler
->BorderColor
.ui
[0];
4813 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4814 color
.ui
[1] = sampler
->BorderColor
.ui
[0];
4815 color
.ui
[2] = sampler
->BorderColor
.ui
[0];
4816 color
.ui
[3] = float_as_int(1.0);
4818 case GL_LUMINANCE_ALPHA
:
4819 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4820 color
.ui
[1] = sampler
->BorderColor
.ui
[0];
4821 color
.ui
[2] = sampler
->BorderColor
.ui
[0];
4822 color
.ui
[3] = sampler
->BorderColor
.ui
[3];
4825 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4826 color
.ui
[1] = sampler
->BorderColor
.ui
[1];
4827 color
.ui
[2] = sampler
->BorderColor
.ui
[2];
4828 color
.ui
[3] = sampler
->BorderColor
.ui
[3];
4832 /* In some cases we use an RGBA surface format for GL RGB textures,
4833 * where we've initialized the A channel to 1.0. We also have to set
4834 * the border color alpha to 1.0 in that case.
4836 if (base_format
== GL_RGB
)
4837 color
.ui
[3] = float_as_int(1.0);
4842 } else if (GEN_IS_HASWELL
&& (is_integer_format
|| is_stencil_sampling
)) {
4846 uint32_t *sdc
= brw_state_batch(
4847 brw
, GENX(SAMPLER_BORDER_COLOR_STATE_length
) * sizeof(uint32_t),
4848 alignment
, sdc_offset
);
4850 struct GENX(SAMPLER_BORDER_COLOR_STATE
) state
= { 0 };
4852 #define ASSIGN(dst, src) \
4857 #define ASSIGNu16(dst, src) \
4859 dst = (uint16_t)src; \
4862 #define ASSIGNu8(dst, src) \
4864 dst = (uint8_t)src; \
4867 #define BORDER_COLOR_ATTR(macro, _color_type, src) \
4868 macro(state.BorderColor ## _color_type ## Red, src[0]); \
4869 macro(state.BorderColor ## _color_type ## Green, src[1]); \
4870 macro(state.BorderColor ## _color_type ## Blue, src[2]); \
4871 macro(state.BorderColor ## _color_type ## Alpha, src[3]);
4874 /* On Broadwell, the border color is represented as four 32-bit floats,
4875 * integers, or unsigned values, interpreted according to the surface
4876 * format. This matches the sampler->BorderColor union exactly; just
4877 * memcpy the values.
4879 BORDER_COLOR_ATTR(ASSIGN
, 32bit
, color
.ui
);
4880 #elif GEN_IS_HASWELL
4881 if (is_integer_format
|| is_stencil_sampling
) {
4882 bool stencil
= format
== MESA_FORMAT_S_UINT8
|| is_stencil_sampling
;
4883 const int bits_per_channel
=
4884 _mesa_get_format_bits(format
, stencil
? GL_STENCIL_BITS
: GL_RED_BITS
);
4886 /* From the Haswell PRM, "Command Reference: Structures", Page 36:
4887 * "If any color channel is missing from the surface format,
4888 * corresponding border color should be programmed as zero and if
4889 * alpha channel is missing, corresponding Alpha border color should
4890 * be programmed as 1."
4892 unsigned c
[4] = { 0, 0, 0, 1 };
4893 for (int i
= 0; i
< 4; i
++) {
4894 if (has_component(format
, i
))
4898 switch (bits_per_channel
) {
4900 /* Copy RGBA in order. */
4901 BORDER_COLOR_ATTR(ASSIGNu8
, 8bit
, c
);
4904 /* R10G10B10A2_UINT is treated like a 16-bit format. */
4906 BORDER_COLOR_ATTR(ASSIGNu16
, 16bit
, c
);
4909 if (base_format
== GL_RG
) {
4910 /* Careful inspection of the tables reveals that for RG32 formats,
4911 * the green channel needs to go where blue normally belongs.
4913 state
.BorderColor32bitRed
= c
[0];
4914 state
.BorderColor32bitBlue
= c
[1];
4915 state
.BorderColor32bitAlpha
= 1;
4917 /* Copy RGBA in order. */
4918 BORDER_COLOR_ATTR(ASSIGN
, 32bit
, c
);
4922 assert(!"Invalid number of bits per channel in integer format.");
4926 BORDER_COLOR_ATTR(ASSIGN
, Float
, color
.f
);
4928 #elif GEN_GEN == 5 || GEN_GEN == 6
4929 BORDER_COLOR_ATTR(UNCLAMPED_FLOAT_TO_UBYTE
, Unorm
, color
.f
);
4930 BORDER_COLOR_ATTR(UNCLAMPED_FLOAT_TO_USHORT
, Unorm16
, color
.f
);
4931 BORDER_COLOR_ATTR(UNCLAMPED_FLOAT_TO_SHORT
, Snorm16
, color
.f
);
4933 #define MESA_FLOAT_TO_HALF(dst, src) \
4934 dst = _mesa_float_to_half(src);
4936 BORDER_COLOR_ATTR(MESA_FLOAT_TO_HALF
, Float16
, color
.f
);
4938 #undef MESA_FLOAT_TO_HALF
4940 state
.BorderColorSnorm8Red
= state
.BorderColorSnorm16Red
>> 8;
4941 state
.BorderColorSnorm8Green
= state
.BorderColorSnorm16Green
>> 8;
4942 state
.BorderColorSnorm8Blue
= state
.BorderColorSnorm16Blue
>> 8;
4943 state
.BorderColorSnorm8Alpha
= state
.BorderColorSnorm16Alpha
>> 8;
4945 BORDER_COLOR_ATTR(ASSIGN
, Float
, color
.f
);
4947 BORDER_COLOR_ATTR(ASSIGN
, , color
.f
);
4949 BORDER_COLOR_ATTR(ASSIGN
, Float
, color
.f
);
4953 #undef BORDER_COLOR_ATTR
4955 GENX(SAMPLER_BORDER_COLOR_STATE_pack
)(brw
, sdc
, &state
);
4959 translate_wrap_mode(GLenum wrap
, MAYBE_UNUSED
bool using_nearest
)
4966 /* GL_CLAMP is the weird mode where coordinates are clamped to
4967 * [0.0, 1.0], so linear filtering of coordinates outside of
4968 * [0.0, 1.0] give you half edge texel value and half border
4971 * Gen8+ supports this natively.
4973 return TCM_HALF_BORDER
;
4975 /* On Gen4-7.5, we clamp the coordinates in the fragment shader
4976 * and set clamp_border here, which gets the result desired.
4977 * We just use clamp(_to_edge) for nearest, because for nearest
4978 * clamping to 1.0 gives border color instead of the desired
4984 return TCM_CLAMP_BORDER
;
4986 case GL_CLAMP_TO_EDGE
:
4988 case GL_CLAMP_TO_BORDER
:
4989 return TCM_CLAMP_BORDER
;
4990 case GL_MIRRORED_REPEAT
:
4992 case GL_MIRROR_CLAMP_TO_EDGE
:
4993 return TCM_MIRROR_ONCE
;
5000 * Return true if the given wrap mode requires the border color to exist.
5003 wrap_mode_needs_border_color(unsigned wrap_mode
)
5006 return wrap_mode
== TCM_CLAMP_BORDER
||
5007 wrap_mode
== TCM_HALF_BORDER
;
5009 return wrap_mode
== TCM_CLAMP_BORDER
;
5014 * Sets the sampler state for a single unit based off of the sampler key
5018 genX(update_sampler_state
)(struct brw_context
*brw
,
5019 GLenum target
, bool tex_cube_map_seamless
,
5020 GLfloat tex_unit_lod_bias
,
5021 mesa_format format
, GLenum base_format
,
5022 const struct gl_texture_object
*texObj
,
5023 const struct gl_sampler_object
*sampler
,
5024 uint32_t *sampler_state
)
5026 struct GENX(SAMPLER_STATE
) samp_st
= { 0 };
5028 /* Select min and mip filters. */
5029 switch (sampler
->MinFilter
) {
5031 samp_st
.MinModeFilter
= MAPFILTER_NEAREST
;
5032 samp_st
.MipModeFilter
= MIPFILTER_NONE
;
5035 samp_st
.MinModeFilter
= MAPFILTER_LINEAR
;
5036 samp_st
.MipModeFilter
= MIPFILTER_NONE
;
5038 case GL_NEAREST_MIPMAP_NEAREST
:
5039 samp_st
.MinModeFilter
= MAPFILTER_NEAREST
;
5040 samp_st
.MipModeFilter
= MIPFILTER_NEAREST
;
5042 case GL_LINEAR_MIPMAP_NEAREST
:
5043 samp_st
.MinModeFilter
= MAPFILTER_LINEAR
;
5044 samp_st
.MipModeFilter
= MIPFILTER_NEAREST
;
5046 case GL_NEAREST_MIPMAP_LINEAR
:
5047 samp_st
.MinModeFilter
= MAPFILTER_NEAREST
;
5048 samp_st
.MipModeFilter
= MIPFILTER_LINEAR
;
5050 case GL_LINEAR_MIPMAP_LINEAR
:
5051 samp_st
.MinModeFilter
= MAPFILTER_LINEAR
;
5052 samp_st
.MipModeFilter
= MIPFILTER_LINEAR
;
5055 unreachable("not reached");
5058 /* Select mag filter. */
5059 samp_st
.MagModeFilter
= sampler
->MagFilter
== GL_LINEAR
?
5060 MAPFILTER_LINEAR
: MAPFILTER_NEAREST
;
5062 /* Enable anisotropic filtering if desired. */
5063 samp_st
.MaximumAnisotropy
= RATIO21
;
5065 if (sampler
->MaxAnisotropy
> 1.0f
) {
5066 if (samp_st
.MinModeFilter
== MAPFILTER_LINEAR
)
5067 samp_st
.MinModeFilter
= MAPFILTER_ANISOTROPIC
;
5068 if (samp_st
.MagModeFilter
== MAPFILTER_LINEAR
)
5069 samp_st
.MagModeFilter
= MAPFILTER_ANISOTROPIC
;
5071 if (sampler
->MaxAnisotropy
> 2.0f
) {
5072 samp_st
.MaximumAnisotropy
=
5073 MIN2((sampler
->MaxAnisotropy
- 2) / 2, RATIO161
);
5077 /* Set address rounding bits if not using nearest filtering. */
5078 if (samp_st
.MinModeFilter
!= MAPFILTER_NEAREST
) {
5079 samp_st
.UAddressMinFilterRoundingEnable
= true;
5080 samp_st
.VAddressMinFilterRoundingEnable
= true;
5081 samp_st
.RAddressMinFilterRoundingEnable
= true;
5084 if (samp_st
.MagModeFilter
!= MAPFILTER_NEAREST
) {
5085 samp_st
.UAddressMagFilterRoundingEnable
= true;
5086 samp_st
.VAddressMagFilterRoundingEnable
= true;
5087 samp_st
.RAddressMagFilterRoundingEnable
= true;
5090 bool either_nearest
=
5091 sampler
->MinFilter
== GL_NEAREST
|| sampler
->MagFilter
== GL_NEAREST
;
5092 unsigned wrap_s
= translate_wrap_mode(sampler
->WrapS
, either_nearest
);
5093 unsigned wrap_t
= translate_wrap_mode(sampler
->WrapT
, either_nearest
);
5094 unsigned wrap_r
= translate_wrap_mode(sampler
->WrapR
, either_nearest
);
5096 if (target
== GL_TEXTURE_CUBE_MAP
||
5097 target
== GL_TEXTURE_CUBE_MAP_ARRAY
) {
5098 /* Cube maps must use the same wrap mode for all three coordinate
5099 * dimensions. Prior to Haswell, only CUBE and CLAMP are valid.
5101 * Ivybridge and Baytrail seem to have problems with CUBE mode and
5102 * integer formats. Fall back to CLAMP for now.
5104 if ((tex_cube_map_seamless
|| sampler
->CubeMapSeamless
) &&
5105 !(GEN_GEN
== 7 && !GEN_IS_HASWELL
&& texObj
->_IsIntegerFormat
)) {
5114 } else if (target
== GL_TEXTURE_1D
) {
5115 /* There's a bug in 1D texture sampling - it actually pays
5116 * attention to the wrap_t value, though it should not.
5117 * Override the wrap_t value here to GL_REPEAT to keep
5118 * any nonexistent border pixels from floating in.
5123 samp_st
.TCXAddressControlMode
= wrap_s
;
5124 samp_st
.TCYAddressControlMode
= wrap_t
;
5125 samp_st
.TCZAddressControlMode
= wrap_r
;
5127 samp_st
.ShadowFunction
=
5128 sampler
->CompareMode
== GL_COMPARE_R_TO_TEXTURE_ARB
?
5129 intel_translate_shadow_compare_func(sampler
->CompareFunc
) : 0;
5132 /* Set shadow function. */
5133 samp_st
.AnisotropicAlgorithm
=
5134 samp_st
.MinModeFilter
== MAPFILTER_ANISOTROPIC
?
5135 EWAApproximation
: LEGACY
;
5139 samp_st
.NonnormalizedCoordinateEnable
= target
== GL_TEXTURE_RECTANGLE
;
5142 const float hw_max_lod
= GEN_GEN
>= 7 ? 14 : 13;
5143 samp_st
.MinLOD
= CLAMP(sampler
->MinLod
, 0, hw_max_lod
);
5144 samp_st
.MaxLOD
= CLAMP(sampler
->MaxLod
, 0, hw_max_lod
);
5145 samp_st
.TextureLODBias
=
5146 CLAMP(tex_unit_lod_bias
+ sampler
->LodBias
, -16, 15);
5149 samp_st
.BaseMipLevel
=
5150 CLAMP(texObj
->MinLevel
+ texObj
->BaseLevel
, 0, hw_max_lod
);
5151 samp_st
.MinandMagStateNotEqual
=
5152 samp_st
.MinModeFilter
!= samp_st
.MagModeFilter
;
5155 /* Upload the border color if necessary. If not, just point it at
5156 * offset 0 (the start of the batch) - the color should be ignored,
5157 * but that address won't fault in case something reads it anyway.
5159 uint32_t border_color_offset
= 0;
5160 if (wrap_mode_needs_border_color(wrap_s
) ||
5161 wrap_mode_needs_border_color(wrap_t
) ||
5162 wrap_mode_needs_border_color(wrap_r
)) {
5163 genX(upload_default_color
)(brw
, sampler
, format
, base_format
,
5164 texObj
->_IsIntegerFormat
,
5165 texObj
->StencilSampling
,
5166 &border_color_offset
);
5169 samp_st
.BorderColorPointer
=
5170 ro_bo(brw
->batch
.state
.bo
, border_color_offset
);
5172 samp_st
.BorderColorPointer
= border_color_offset
;
5176 samp_st
.LODPreClampMode
= CLAMP_MODE_OGL
;
5178 samp_st
.LODPreClampEnable
= true;
5181 GENX(SAMPLER_STATE_pack
)(brw
, sampler_state
, &samp_st
);
5185 update_sampler_state(struct brw_context
*brw
,
5187 uint32_t *sampler_state
)
5189 struct gl_context
*ctx
= &brw
->ctx
;
5190 const struct gl_texture_unit
*texUnit
= &ctx
->Texture
.Unit
[unit
];
5191 const struct gl_texture_object
*texObj
= texUnit
->_Current
;
5192 const struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, unit
);
5194 /* These don't use samplers at all. */
5195 if (texObj
->Target
== GL_TEXTURE_BUFFER
)
5198 struct gl_texture_image
*firstImage
= texObj
->Image
[0][texObj
->BaseLevel
];
5199 genX(update_sampler_state
)(brw
, texObj
->Target
,
5200 ctx
->Texture
.CubeMapSeamless
,
5202 firstImage
->TexFormat
, firstImage
->_BaseFormat
,
5208 genX(upload_sampler_state_table
)(struct brw_context
*brw
,
5209 struct gl_program
*prog
,
5210 struct brw_stage_state
*stage_state
)
5212 struct gl_context
*ctx
= &brw
->ctx
;
5213 uint32_t sampler_count
= stage_state
->sampler_count
;
5215 GLbitfield SamplersUsed
= prog
->SamplersUsed
;
5217 if (sampler_count
== 0)
5220 /* SAMPLER_STATE is 4 DWords on all platforms. */
5221 const int dwords
= GENX(SAMPLER_STATE_length
);
5222 const int size_in_bytes
= dwords
* sizeof(uint32_t);
5224 uint32_t *sampler_state
= brw_state_batch(brw
,
5225 sampler_count
* size_in_bytes
,
5226 32, &stage_state
->sampler_offset
);
5227 /* memset(sampler_state, 0, sampler_count * size_in_bytes); */
5229 for (unsigned s
= 0; s
< sampler_count
; s
++) {
5230 if (SamplersUsed
& (1 << s
)) {
5231 const unsigned unit
= prog
->SamplerUnits
[s
];
5232 if (ctx
->Texture
.Unit
[unit
]._Current
) {
5233 update_sampler_state(brw
, unit
, sampler_state
);
5237 sampler_state
+= dwords
;
5240 if (GEN_GEN
>= 7 && stage_state
->stage
!= MESA_SHADER_COMPUTE
) {
5241 /* Emit a 3DSTATE_SAMPLER_STATE_POINTERS_XS packet. */
5242 genX(emit_sampler_state_pointers_xs
)(brw
, stage_state
);
5244 /* Flag that the sampler state table pointer has changed; later atoms
5247 brw
->ctx
.NewDriverState
|= BRW_NEW_SAMPLER_STATE_TABLE
;
5252 genX(upload_fs_samplers
)(struct brw_context
*brw
)
5254 /* BRW_NEW_FRAGMENT_PROGRAM */
5255 struct gl_program
*fs
= brw
->programs
[MESA_SHADER_FRAGMENT
];
5256 genX(upload_sampler_state_table
)(brw
, fs
, &brw
->wm
.base
);
5259 static const struct brw_tracked_state
genX(fs_samplers
) = {
5261 .mesa
= _NEW_TEXTURE
,
5262 .brw
= BRW_NEW_BATCH
|
5264 BRW_NEW_FRAGMENT_PROGRAM
,
5266 .emit
= genX(upload_fs_samplers
),
5270 genX(upload_vs_samplers
)(struct brw_context
*brw
)
5272 /* BRW_NEW_VERTEX_PROGRAM */
5273 struct gl_program
*vs
= brw
->programs
[MESA_SHADER_VERTEX
];
5274 genX(upload_sampler_state_table
)(brw
, vs
, &brw
->vs
.base
);
5277 static const struct brw_tracked_state
genX(vs_samplers
) = {
5279 .mesa
= _NEW_TEXTURE
,
5280 .brw
= BRW_NEW_BATCH
|
5282 BRW_NEW_VERTEX_PROGRAM
,
5284 .emit
= genX(upload_vs_samplers
),
5289 genX(upload_gs_samplers
)(struct brw_context
*brw
)
5291 /* BRW_NEW_GEOMETRY_PROGRAM */
5292 struct gl_program
*gs
= brw
->programs
[MESA_SHADER_GEOMETRY
];
5296 genX(upload_sampler_state_table
)(brw
, gs
, &brw
->gs
.base
);
5300 static const struct brw_tracked_state
genX(gs_samplers
) = {
5302 .mesa
= _NEW_TEXTURE
,
5303 .brw
= BRW_NEW_BATCH
|
5305 BRW_NEW_GEOMETRY_PROGRAM
,
5307 .emit
= genX(upload_gs_samplers
),
5313 genX(upload_tcs_samplers
)(struct brw_context
*brw
)
5315 /* BRW_NEW_TESS_PROGRAMS */
5316 struct gl_program
*tcs
= brw
->programs
[MESA_SHADER_TESS_CTRL
];
5320 genX(upload_sampler_state_table
)(brw
, tcs
, &brw
->tcs
.base
);
5323 static const struct brw_tracked_state
genX(tcs_samplers
) = {
5325 .mesa
= _NEW_TEXTURE
,
5326 .brw
= BRW_NEW_BATCH
|
5328 BRW_NEW_TESS_PROGRAMS
,
5330 .emit
= genX(upload_tcs_samplers
),
5336 genX(upload_tes_samplers
)(struct brw_context
*brw
)
5338 /* BRW_NEW_TESS_PROGRAMS */
5339 struct gl_program
*tes
= brw
->programs
[MESA_SHADER_TESS_EVAL
];
5343 genX(upload_sampler_state_table
)(brw
, tes
, &brw
->tes
.base
);
5346 static const struct brw_tracked_state
genX(tes_samplers
) = {
5348 .mesa
= _NEW_TEXTURE
,
5349 .brw
= BRW_NEW_BATCH
|
5351 BRW_NEW_TESS_PROGRAMS
,
5353 .emit
= genX(upload_tes_samplers
),
5359 genX(upload_cs_samplers
)(struct brw_context
*brw
)
5361 /* BRW_NEW_COMPUTE_PROGRAM */
5362 struct gl_program
*cs
= brw
->programs
[MESA_SHADER_COMPUTE
];
5366 genX(upload_sampler_state_table
)(brw
, cs
, &brw
->cs
.base
);
5369 const struct brw_tracked_state
genX(cs_samplers
) = {
5371 .mesa
= _NEW_TEXTURE
,
5372 .brw
= BRW_NEW_BATCH
|
5374 BRW_NEW_COMPUTE_PROGRAM
,
5376 .emit
= genX(upload_cs_samplers
),
5380 /* ---------------------------------------------------------------------- */
5384 static void genX(upload_blend_constant_color
)(struct brw_context
*brw
)
5386 struct gl_context
*ctx
= &brw
->ctx
;
5388 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_COLOR
), blend_cc
) {
5389 blend_cc
.BlendConstantColorRed
= ctx
->Color
.BlendColorUnclamped
[0];
5390 blend_cc
.BlendConstantColorGreen
= ctx
->Color
.BlendColorUnclamped
[1];
5391 blend_cc
.BlendConstantColorBlue
= ctx
->Color
.BlendColorUnclamped
[2];
5392 blend_cc
.BlendConstantColorAlpha
= ctx
->Color
.BlendColorUnclamped
[3];
5396 static const struct brw_tracked_state
genX(blend_constant_color
) = {
5399 .brw
= BRW_NEW_CONTEXT
|
5402 .emit
= genX(upload_blend_constant_color
)
5406 /* ---------------------------------------------------------------------- */
5409 genX(init_atoms
)(struct brw_context
*brw
)
5412 static const struct brw_tracked_state
*render_atoms
[] =
5414 /* Once all the programs are done, we know how large urb entry
5415 * sizes need to be and can decide if we need to change the urb
5419 &brw_recalculate_urb_fence
,
5422 &genX(color_calc_state
),
5424 /* Surface state setup. Must come before the VS/WM unit. The binding
5425 * table upload must be last.
5427 &brw_vs_pull_constants
,
5428 &brw_wm_pull_constants
,
5429 &brw_renderbuffer_surfaces
,
5430 &brw_renderbuffer_read_surfaces
,
5431 &brw_texture_surfaces
,
5432 &brw_vs_binding_table
,
5433 &brw_wm_binding_table
,
5438 /* These set up state for brw_psp_urb_cbs */
5440 &genX(sf_clip_viewport
),
5442 &genX(vs_state
), /* always required, enabled or not */
5448 &brw_binding_table_pointers
,
5449 &genX(blend_constant_color
),
5453 &genX(polygon_stipple
),
5454 &genX(polygon_stipple_offset
),
5456 &genX(line_stipple
),
5460 &genX(drawing_rect
),
5461 &brw_indices
, /* must come before brw_vertices */
5462 &genX(index_buffer
),
5465 &brw_constant_buffer
5468 static const struct brw_tracked_state
*render_atoms
[] =
5470 &genX(sf_clip_viewport
),
5472 /* Command packets: */
5477 &genX(blend_state
), /* must do before cc unit */
5478 &genX(color_calc_state
), /* must do before cc unit */
5479 &genX(depth_stencil_state
), /* must do before cc unit */
5481 &genX(vs_push_constants
), /* Before vs_state */
5482 &genX(gs_push_constants
), /* Before gs_state */
5483 &genX(wm_push_constants
), /* Before wm_state */
5485 /* Surface state setup. Must come before the VS/WM unit. The binding
5486 * table upload must be last.
5488 &brw_vs_pull_constants
,
5489 &brw_vs_ubo_surfaces
,
5490 &brw_gs_pull_constants
,
5491 &brw_gs_ubo_surfaces
,
5492 &brw_wm_pull_constants
,
5493 &brw_wm_ubo_surfaces
,
5494 &gen6_renderbuffer_surfaces
,
5495 &brw_renderbuffer_read_surfaces
,
5496 &brw_texture_surfaces
,
5498 &brw_vs_binding_table
,
5499 &gen6_gs_binding_table
,
5500 &brw_wm_binding_table
,
5505 &gen6_sampler_state
,
5506 &genX(multisample_state
),
5514 &genX(scissor_state
),
5516 &gen6_binding_table_pointers
,
5520 &genX(polygon_stipple
),
5521 &genX(polygon_stipple_offset
),
5523 &genX(line_stipple
),
5525 &genX(drawing_rect
),
5527 &brw_indices
, /* must come before brw_vertices */
5528 &genX(index_buffer
),
5532 static const struct brw_tracked_state
*render_atoms
[] =
5534 /* Command packets: */
5537 &genX(sf_clip_viewport
),
5540 &gen7_push_constant_space
,
5542 &genX(blend_state
), /* must do before cc unit */
5543 &genX(color_calc_state
), /* must do before cc unit */
5544 &genX(depth_stencil_state
), /* must do before cc unit */
5546 &brw_vs_image_surfaces
, /* Before vs push/pull constants and binding table */
5547 &brw_tcs_image_surfaces
, /* Before tcs push/pull constants and binding table */
5548 &brw_tes_image_surfaces
, /* Before tes push/pull constants and binding table */
5549 &brw_gs_image_surfaces
, /* Before gs push/pull constants and binding table */
5550 &brw_wm_image_surfaces
, /* Before wm push/pull constants and binding table */
5552 &genX(vs_push_constants
), /* Before vs_state */
5553 &genX(tcs_push_constants
),
5554 &genX(tes_push_constants
),
5555 &genX(gs_push_constants
), /* Before gs_state */
5556 &genX(wm_push_constants
), /* Before wm_surfaces and constant_buffer */
5558 /* Surface state setup. Must come before the VS/WM unit. The binding
5559 * table upload must be last.
5561 &brw_vs_pull_constants
,
5562 &brw_vs_ubo_surfaces
,
5563 &brw_tcs_pull_constants
,
5564 &brw_tcs_ubo_surfaces
,
5565 &brw_tes_pull_constants
,
5566 &brw_tes_ubo_surfaces
,
5567 &brw_gs_pull_constants
,
5568 &brw_gs_ubo_surfaces
,
5569 &brw_wm_pull_constants
,
5570 &brw_wm_ubo_surfaces
,
5571 &gen6_renderbuffer_surfaces
,
5572 &brw_renderbuffer_read_surfaces
,
5573 &brw_texture_surfaces
,
5575 &genX(push_constant_packets
),
5577 &brw_vs_binding_table
,
5578 &brw_tcs_binding_table
,
5579 &brw_tes_binding_table
,
5580 &brw_gs_binding_table
,
5581 &brw_wm_binding_table
,
5585 &genX(tcs_samplers
),
5586 &genX(tes_samplers
),
5588 &genX(multisample_state
),
5602 &genX(scissor_state
),
5606 &genX(polygon_stipple
),
5607 &genX(polygon_stipple_offset
),
5609 &genX(line_stipple
),
5611 &genX(drawing_rect
),
5613 &brw_indices
, /* must come before brw_vertices */
5614 &genX(index_buffer
),
5622 static const struct brw_tracked_state
*render_atoms
[] =
5625 &genX(sf_clip_viewport
),
5628 &gen7_push_constant_space
,
5631 &genX(color_calc_state
),
5633 &brw_vs_image_surfaces
, /* Before vs push/pull constants and binding table */
5634 &brw_tcs_image_surfaces
, /* Before tcs push/pull constants and binding table */
5635 &brw_tes_image_surfaces
, /* Before tes push/pull constants and binding table */
5636 &brw_gs_image_surfaces
, /* Before gs push/pull constants and binding table */
5637 &brw_wm_image_surfaces
, /* Before wm push/pull constants and binding table */
5639 &genX(vs_push_constants
), /* Before vs_state */
5640 &genX(tcs_push_constants
),
5641 &genX(tes_push_constants
),
5642 &genX(gs_push_constants
), /* Before gs_state */
5643 &genX(wm_push_constants
), /* Before wm_surfaces and constant_buffer */
5645 /* Surface state setup. Must come before the VS/WM unit. The binding
5646 * table upload must be last.
5648 &brw_vs_pull_constants
,
5649 &brw_vs_ubo_surfaces
,
5650 &brw_tcs_pull_constants
,
5651 &brw_tcs_ubo_surfaces
,
5652 &brw_tes_pull_constants
,
5653 &brw_tes_ubo_surfaces
,
5654 &brw_gs_pull_constants
,
5655 &brw_gs_ubo_surfaces
,
5656 &brw_wm_pull_constants
,
5657 &brw_wm_ubo_surfaces
,
5658 &gen6_renderbuffer_surfaces
,
5659 &brw_renderbuffer_read_surfaces
,
5660 &brw_texture_surfaces
,
5662 &genX(push_constant_packets
),
5664 &brw_vs_binding_table
,
5665 &brw_tcs_binding_table
,
5666 &brw_tes_binding_table
,
5667 &brw_gs_binding_table
,
5668 &brw_wm_binding_table
,
5672 &genX(tcs_samplers
),
5673 &genX(tes_samplers
),
5675 &genX(multisample_state
),
5684 &genX(raster_state
),
5690 &genX(depth_stencil_state
),
5693 &genX(scissor_state
),
5697 &genX(polygon_stipple
),
5698 &genX(polygon_stipple_offset
),
5700 &genX(line_stipple
),
5702 &genX(drawing_rect
),
5707 &genX(index_buffer
),
5715 STATIC_ASSERT(ARRAY_SIZE(render_atoms
) <= ARRAY_SIZE(brw
->render_atoms
));
5716 brw_copy_pipeline_atoms(brw
, BRW_RENDER_PIPELINE
,
5717 render_atoms
, ARRAY_SIZE(render_atoms
));
5720 static const struct brw_tracked_state
*compute_atoms
[] =
5723 &brw_cs_image_surfaces
,
5724 &genX(cs_push_constants
),
5725 &genX(cs_pull_constants
),
5726 &brw_cs_ubo_surfaces
,
5727 &brw_cs_texture_surfaces
,
5728 &brw_cs_work_groups_surface
,
5733 STATIC_ASSERT(ARRAY_SIZE(compute_atoms
) <= ARRAY_SIZE(brw
->compute_atoms
));
5734 brw_copy_pipeline_atoms(brw
, BRW_COMPUTE_PIPELINE
,
5735 compute_atoms
, ARRAY_SIZE(compute_atoms
));
5737 brw
->vtbl
.emit_mi_report_perf_count
= genX(emit_mi_report_perf_count
);