2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "common/gen_device_info.h"
27 #include "common/gen_sample_positions.h"
28 #include "genxml/gen_macros.h"
30 #include "main/bufferobj.h"
31 #include "main/context.h"
32 #include "main/enums.h"
33 #include "main/macros.h"
34 #include "main/state.h"
36 #include "brw_context.h"
38 #include "brw_defines.h"
41 #include "brw_multisample_state.h"
42 #include "brw_state.h"
46 #include "intel_batchbuffer.h"
47 #include "intel_buffer_objects.h"
48 #include "intel_fbo.h"
50 #include "main/enums.h"
51 #include "main/fbobject.h"
52 #include "main/framebuffer.h"
53 #include "main/glformats.h"
54 #include "main/samplerobj.h"
55 #include "main/shaderapi.h"
56 #include "main/stencil.h"
57 #include "main/transformfeedback.h"
58 #include "main/varray.h"
59 #include "main/viewport.h"
60 #include "util/half_float.h"
63 emit_dwords(struct brw_context
*brw
, unsigned n
)
65 intel_batchbuffer_begin(brw
, n
, RENDER_RING
);
66 uint32_t *map
= brw
->batch
.map_next
;
67 brw
->batch
.map_next
+= n
;
68 intel_batchbuffer_advance(brw
);
74 uint32_t read_domains
;
75 uint32_t write_domain
;
80 emit_reloc(struct brw_context
*brw
,
81 void *location
, struct brw_address address
, uint32_t delta
)
83 uint32_t offset
= (char *) location
- (char *) brw
->batch
.map
;
85 return brw_emit_reloc(&brw
->batch
, offset
, address
.bo
,
86 address
.offset
+ delta
,
88 address
.write_domain
);
91 #define __gen_address_type struct brw_address
92 #define __gen_user_data struct brw_context
95 __gen_combine_address(struct brw_context
*brw
, void *location
,
96 struct brw_address address
, uint32_t delta
)
98 if (address
.bo
== NULL
) {
99 return address
.offset
+ delta
;
101 return emit_reloc(brw
, location
, address
, delta
);
105 static inline struct brw_address
106 render_bo(struct brw_bo
*bo
, uint32_t offset
)
108 return (struct brw_address
) {
111 .read_domains
= I915_GEM_DOMAIN_RENDER
,
112 .write_domain
= I915_GEM_DOMAIN_RENDER
,
116 static inline struct brw_address
117 render_ro_bo(struct brw_bo
*bo
, uint32_t offset
)
119 return (struct brw_address
) {
122 .read_domains
= I915_GEM_DOMAIN_RENDER
,
127 static inline struct brw_address
128 instruction_bo(struct brw_bo
*bo
, uint32_t offset
)
130 return (struct brw_address
) {
133 .read_domains
= I915_GEM_DOMAIN_INSTRUCTION
,
134 .write_domain
= I915_GEM_DOMAIN_INSTRUCTION
,
138 static inline struct brw_address
139 instruction_ro_bo(struct brw_bo
*bo
, uint32_t offset
)
141 return (struct brw_address
) {
144 .read_domains
= I915_GEM_DOMAIN_INSTRUCTION
,
149 static inline struct brw_address
150 vertex_bo(struct brw_bo
*bo
, uint32_t offset
)
152 return (struct brw_address
) {
155 .read_domains
= I915_GEM_DOMAIN_VERTEX
,
161 static inline struct brw_address
162 KSP(struct brw_context
*brw
, uint32_t offset
)
164 return instruction_bo(brw
->cache
.bo
, offset
);
167 static inline struct brw_address
168 KSP_ro(struct brw_context
*brw
, uint32_t offset
)
170 return instruction_ro_bo(brw
->cache
.bo
, offset
);
173 static inline uint32_t
174 KSP(struct brw_context
*brw
, uint32_t offset
)
183 #include "genxml/genX_pack.h"
185 #define _brw_cmd_length(cmd) cmd ## _length
186 #define _brw_cmd_length_bias(cmd) cmd ## _length_bias
187 #define _brw_cmd_header(cmd) cmd ## _header
188 #define _brw_cmd_pack(cmd) cmd ## _pack
190 #define brw_batch_emit(brw, cmd, name) \
191 for (struct cmd name = { _brw_cmd_header(cmd) }, \
192 *_dst = emit_dwords(brw, _brw_cmd_length(cmd)); \
193 __builtin_expect(_dst != NULL, 1); \
194 _brw_cmd_pack(cmd)(brw, (void *)_dst, &name), \
197 #define brw_batch_emitn(brw, cmd, n, ...) ({ \
198 uint32_t *_dw = emit_dwords(brw, n); \
199 struct cmd template = { \
200 _brw_cmd_header(cmd), \
201 .DWordLength = n - _brw_cmd_length_bias(cmd), \
204 _brw_cmd_pack(cmd)(brw, _dw, &template); \
205 _dw + 1; /* Array starts at dw[1] */ \
208 #define brw_state_emit(brw, cmd, align, offset, name) \
209 for (struct cmd name = { 0, }, \
210 *_dst = brw_state_batch(brw, _brw_cmd_length(cmd) * 4, \
212 __builtin_expect(_dst != NULL, 1); \
213 _brw_cmd_pack(cmd)(brw, (void *)_dst, &name), \
217 * Polygon stipple packet
220 genX(upload_polygon_stipple
)(struct brw_context
*brw
)
222 struct gl_context
*ctx
= &brw
->ctx
;
225 if (!ctx
->Polygon
.StippleFlag
)
228 brw_batch_emit(brw
, GENX(3DSTATE_POLY_STIPPLE_PATTERN
), poly
) {
229 /* Polygon stipple is provided in OpenGL order, i.e. bottom
230 * row first. If we're rendering to a window (i.e. the
231 * default frame buffer object, 0), then we need to invert
232 * it to match our pixel layout. But if we're rendering
233 * to a FBO (i.e. any named frame buffer object), we *don't*
234 * need to invert - we already match the layout.
236 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
237 for (unsigned i
= 0; i
< 32; i
++)
238 poly
.PatternRow
[i
] = ctx
->PolygonStipple
[31 - i
]; /* invert */
240 for (unsigned i
= 0; i
< 32; i
++)
241 poly
.PatternRow
[i
] = ctx
->PolygonStipple
[i
];
246 static const struct brw_tracked_state
genX(polygon_stipple
) = {
248 .mesa
= _NEW_POLYGON
|
250 .brw
= BRW_NEW_CONTEXT
,
252 .emit
= genX(upload_polygon_stipple
),
256 * Polygon stipple offset packet
259 genX(upload_polygon_stipple_offset
)(struct brw_context
*brw
)
261 struct gl_context
*ctx
= &brw
->ctx
;
264 if (!ctx
->Polygon
.StippleFlag
)
267 brw_batch_emit(brw
, GENX(3DSTATE_POLY_STIPPLE_OFFSET
), poly
) {
270 * If we're drawing to a system window we have to invert the Y axis
271 * in order to match the OpenGL pixel coordinate system, and our
272 * offset must be matched to the window position. If we're drawing
273 * to a user-created FBO then our native pixel coordinate system
274 * works just fine, and there's no window system to worry about.
276 if (_mesa_is_winsys_fbo(ctx
->DrawBuffer
)) {
277 poly
.PolygonStippleYOffset
=
278 (32 - (_mesa_geometric_height(ctx
->DrawBuffer
) & 31)) & 31;
283 static const struct brw_tracked_state
genX(polygon_stipple_offset
) = {
285 .mesa
= _NEW_BUFFERS
|
287 .brw
= BRW_NEW_CONTEXT
,
289 .emit
= genX(upload_polygon_stipple_offset
),
293 * Line stipple packet
296 genX(upload_line_stipple
)(struct brw_context
*brw
)
298 struct gl_context
*ctx
= &brw
->ctx
;
300 if (!ctx
->Line
.StippleFlag
)
303 brw_batch_emit(brw
, GENX(3DSTATE_LINE_STIPPLE
), line
) {
304 line
.LineStipplePattern
= ctx
->Line
.StipplePattern
;
306 line
.LineStippleInverseRepeatCount
= 1.0f
/ ctx
->Line
.StippleFactor
;
307 line
.LineStippleRepeatCount
= ctx
->Line
.StippleFactor
;
311 static const struct brw_tracked_state
genX(line_stipple
) = {
314 .brw
= BRW_NEW_CONTEXT
,
316 .emit
= genX(upload_line_stipple
),
319 /* Constant single cliprect for framebuffer object or DRI2 drawing */
321 genX(upload_drawing_rect
)(struct brw_context
*brw
)
323 struct gl_context
*ctx
= &brw
->ctx
;
324 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
325 const unsigned int fb_width
= _mesa_geometric_width(fb
);
326 const unsigned int fb_height
= _mesa_geometric_height(fb
);
328 brw_batch_emit(brw
, GENX(3DSTATE_DRAWING_RECTANGLE
), rect
) {
329 rect
.ClippedDrawingRectangleXMax
= fb_width
- 1;
330 rect
.ClippedDrawingRectangleYMax
= fb_height
- 1;
334 static const struct brw_tracked_state
genX(drawing_rect
) = {
336 .mesa
= _NEW_BUFFERS
,
337 .brw
= BRW_NEW_BLORP
|
340 .emit
= genX(upload_drawing_rect
),
344 genX(emit_vertex_buffer_state
)(struct brw_context
*brw
,
348 unsigned start_offset
,
353 struct GENX(VERTEX_BUFFER_STATE
) buf_state
= {
354 .VertexBufferIndex
= buffer_nr
,
355 .BufferPitch
= stride
,
356 .BufferStartingAddress
= vertex_bo(bo
, start_offset
),
358 .BufferSize
= end_offset
- start_offset
,
362 .AddressModifyEnable
= true,
366 .BufferAccessType
= step_rate
? INSTANCEDATA
: VERTEXDATA
,
367 .InstanceDataStepRate
= step_rate
,
369 .EndAddress
= vertex_bo(bo
, end_offset
- 1),
374 .VertexBufferMOCS
= CNL_MOCS_WB
,
376 .VertexBufferMOCS
= SKL_MOCS_WB
,
378 .VertexBufferMOCS
= BDW_MOCS_WB
,
380 .VertexBufferMOCS
= GEN7_MOCS_L3
,
384 GENX(VERTEX_BUFFER_STATE_pack
)(brw
, dw
, &buf_state
);
385 return dw
+ GENX(VERTEX_BUFFER_STATE_length
);
389 is_passthru_format(uint32_t format
)
392 case ISL_FORMAT_R64_PASSTHRU
:
393 case ISL_FORMAT_R64G64_PASSTHRU
:
394 case ISL_FORMAT_R64G64B64_PASSTHRU
:
395 case ISL_FORMAT_R64G64B64A64_PASSTHRU
:
403 uploads_needed(uint32_t format
)
405 if (!is_passthru_format(format
))
409 case ISL_FORMAT_R64_PASSTHRU
:
410 case ISL_FORMAT_R64G64_PASSTHRU
:
412 case ISL_FORMAT_R64G64B64_PASSTHRU
:
413 case ISL_FORMAT_R64G64B64A64_PASSTHRU
:
416 unreachable("not reached");
421 * Returns the format that we are finally going to use when upload a vertex
422 * element. It will only change if we are using *64*PASSTHRU formats, as for
423 * gen < 8 they need to be splitted on two *32*FLOAT formats.
425 * @upload points in which upload we are. Valid values are [0,1]
428 downsize_format_if_needed(uint32_t format
,
431 assert(upload
== 0 || upload
== 1);
433 if (!is_passthru_format(format
))
437 case ISL_FORMAT_R64_PASSTHRU
:
438 return ISL_FORMAT_R32G32_FLOAT
;
439 case ISL_FORMAT_R64G64_PASSTHRU
:
440 return ISL_FORMAT_R32G32B32A32_FLOAT
;
441 case ISL_FORMAT_R64G64B64_PASSTHRU
:
442 return !upload
? ISL_FORMAT_R32G32B32A32_FLOAT
443 : ISL_FORMAT_R32G32_FLOAT
;
444 case ISL_FORMAT_R64G64B64A64_PASSTHRU
:
445 return ISL_FORMAT_R32G32B32A32_FLOAT
;
447 unreachable("not reached");
452 * Returns the number of componentes associated with a format that is used on
453 * a 64 to 32 format split. See downsize_format()
456 upload_format_size(uint32_t upload_format
)
458 switch (upload_format
) {
459 case ISL_FORMAT_R32G32_FLOAT
:
461 case ISL_FORMAT_R32G32B32A32_FLOAT
:
464 unreachable("not reached");
469 genX(emit_vertices
)(struct brw_context
*brw
)
473 brw_prepare_vertices(brw
);
474 brw_prepare_shader_draw_parameters(brw
);
477 brw_emit_query_begin(brw
);
480 const struct brw_vs_prog_data
*vs_prog_data
=
481 brw_vs_prog_data(brw
->vs
.base
.prog_data
);
484 struct gl_context
*ctx
= &brw
->ctx
;
485 const bool uses_edge_flag
= (ctx
->Polygon
.FrontMode
!= GL_FILL
||
486 ctx
->Polygon
.BackMode
!= GL_FILL
);
488 if (vs_prog_data
->uses_vertexid
|| vs_prog_data
->uses_instanceid
) {
489 unsigned vue
= brw
->vb
.nr_enabled
;
491 /* The element for the edge flags must always be last, so we have to
492 * insert the SGVS before it in that case.
494 if (uses_edge_flag
) {
500 "Trying to insert VID/IID past 33rd vertex element, "
501 "need to reorder the vertex attrbutes.");
503 brw_batch_emit(brw
, GENX(3DSTATE_VF_SGVS
), vfs
) {
504 if (vs_prog_data
->uses_vertexid
) {
505 vfs
.VertexIDEnable
= true;
506 vfs
.VertexIDComponentNumber
= 2;
507 vfs
.VertexIDElementOffset
= vue
;
510 if (vs_prog_data
->uses_instanceid
) {
511 vfs
.InstanceIDEnable
= true;
512 vfs
.InstanceIDComponentNumber
= 3;
513 vfs
.InstanceIDElementOffset
= vue
;
517 brw_batch_emit(brw
, GENX(3DSTATE_VF_INSTANCING
), vfi
) {
518 vfi
.InstancingEnable
= true;
519 vfi
.VertexElementIndex
= vue
;
522 brw_batch_emit(brw
, GENX(3DSTATE_VF_SGVS
), vfs
);
525 /* Normally we don't need an element for the SGVS attribute because the
526 * 3DSTATE_VF_SGVS instruction lets you store the generated attribute in an
527 * element that is past the list in 3DSTATE_VERTEX_ELEMENTS. However if
528 * we're using draw parameters then we need an element for the those
529 * values. Additionally if there is an edge flag element then the SGVS
530 * can't be inserted past that so we need a dummy element to ensure that
531 * the edge flag is the last one.
533 const bool needs_sgvs_element
= (vs_prog_data
->uses_basevertex
||
534 vs_prog_data
->uses_baseinstance
||
535 ((vs_prog_data
->uses_instanceid
||
536 vs_prog_data
->uses_vertexid
)
539 const bool needs_sgvs_element
= (vs_prog_data
->uses_basevertex
||
540 vs_prog_data
->uses_baseinstance
||
541 vs_prog_data
->uses_instanceid
||
542 vs_prog_data
->uses_vertexid
);
544 unsigned nr_elements
=
545 brw
->vb
.nr_enabled
+ needs_sgvs_element
+ vs_prog_data
->uses_drawid
;
548 /* If any of the formats of vb.enabled needs more that one upload, we need
549 * to add it to nr_elements
551 for (unsigned i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
552 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
553 uint32_t format
= brw_get_vertex_surface_type(brw
, input
->glarray
);
555 if (uploads_needed(format
) > 1)
560 /* If the VS doesn't read any inputs (calculating vertex position from
561 * a state variable for some reason, for example), emit a single pad
562 * VERTEX_ELEMENT struct and bail.
564 * The stale VB state stays in place, but they don't do anything unless
565 * a VE loads from them.
567 if (nr_elements
== 0) {
568 dw
= brw_batch_emitn(brw
, GENX(3DSTATE_VERTEX_ELEMENTS
),
569 1 + GENX(VERTEX_ELEMENT_STATE_length
));
570 struct GENX(VERTEX_ELEMENT_STATE
) elem
= {
572 .SourceElementFormat
= ISL_FORMAT_R32G32B32A32_FLOAT
,
573 .Component0Control
= VFCOMP_STORE_0
,
574 .Component1Control
= VFCOMP_STORE_0
,
575 .Component2Control
= VFCOMP_STORE_0
,
576 .Component3Control
= VFCOMP_STORE_1_FP
,
578 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem
);
582 /* Now emit 3DSTATE_VERTEX_BUFFERS and 3DSTATE_VERTEX_ELEMENTS packets. */
583 const bool uses_draw_params
=
584 vs_prog_data
->uses_basevertex
||
585 vs_prog_data
->uses_baseinstance
;
586 const unsigned nr_buffers
= brw
->vb
.nr_buffers
+
587 uses_draw_params
+ vs_prog_data
->uses_drawid
;
590 assert(nr_buffers
<= (GEN_GEN
>= 6 ? 33 : 17));
592 dw
= brw_batch_emitn(brw
, GENX(3DSTATE_VERTEX_BUFFERS
),
593 1 + GENX(VERTEX_BUFFER_STATE_length
) * nr_buffers
);
595 for (unsigned i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
596 const struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[i
];
597 /* Prior to Haswell and Bay Trail we have to use 4-component formats
598 * to fake 3-component ones. In particular, we do this for
599 * half-float and 8 and 16-bit integer formats. This means that the
600 * vertex element may poke over the end of the buffer by 2 bytes.
602 const unsigned padding
=
603 (GEN_GEN
<= 7 && !brw
->is_baytrail
&& !brw
->is_haswell
) * 2;
604 const unsigned end
= buffer
->offset
+ buffer
->size
+ padding
;
605 dw
= genX(emit_vertex_buffer_state
)(brw
, dw
, i
, buffer
->bo
,
612 if (uses_draw_params
) {
613 dw
= genX(emit_vertex_buffer_state
)(brw
, dw
, brw
->vb
.nr_buffers
,
614 brw
->draw
.draw_params_bo
,
615 brw
->draw
.draw_params_offset
,
616 brw
->draw
.draw_params_bo
->size
,
621 if (vs_prog_data
->uses_drawid
) {
622 dw
= genX(emit_vertex_buffer_state
)(brw
, dw
, brw
->vb
.nr_buffers
+ 1,
623 brw
->draw
.draw_id_bo
,
624 brw
->draw
.draw_id_offset
,
625 brw
->draw
.draw_id_bo
->size
,
631 /* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS,
632 * presumably for VertexID/InstanceID.
635 assert(nr_elements
<= 34);
636 const struct brw_vertex_element
*gen6_edgeflag_input
= NULL
;
638 assert(nr_elements
<= 18);
641 dw
= brw_batch_emitn(brw
, GENX(3DSTATE_VERTEX_ELEMENTS
),
642 1 + GENX(VERTEX_ELEMENT_STATE_length
) * nr_elements
);
644 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
645 const struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
646 uint32_t format
= brw_get_vertex_surface_type(brw
, input
->glarray
);
647 uint32_t comp0
= VFCOMP_STORE_SRC
;
648 uint32_t comp1
= VFCOMP_STORE_SRC
;
649 uint32_t comp2
= VFCOMP_STORE_SRC
;
650 uint32_t comp3
= VFCOMP_STORE_SRC
;
651 const unsigned num_uploads
= GEN_GEN
< 8 ? uploads_needed(format
) : 1;
654 /* From the BDW PRM, Volume 2d, page 588 (VERTEX_ELEMENT_STATE):
655 * "Any SourceElementFormat of *64*_PASSTHRU cannot be used with an
656 * element which has edge flag enabled."
658 assert(!(is_passthru_format(format
) && uses_edge_flag
));
661 /* The gen4 driver expects edgeflag to come in as a float, and passes
662 * that float on to the tests in the clipper. Mesa's current vertex
663 * attribute value for EdgeFlag is stored as a float, which works out.
664 * glEdgeFlagPointer, on the other hand, gives us an unnormalized
665 * integer ubyte. Just rewrite that to convert to a float.
667 * Gen6+ passes edgeflag as sideband along with the vertex, instead
668 * of in the VUE. We have to upload it sideband as the last vertex
669 * element according to the B-Spec.
672 if (input
== &brw
->vb
.inputs
[VERT_ATTRIB_EDGEFLAG
]) {
673 gen6_edgeflag_input
= input
;
678 for (unsigned c
= 0; c
< num_uploads
; c
++) {
679 const uint32_t upload_format
= GEN_GEN
>= 8 ? format
:
680 downsize_format_if_needed(format
, c
);
681 /* If we need more that one upload, the offset stride would be 128
682 * bits (16 bytes), as for previous uploads we are using the full
684 const unsigned offset
= input
->offset
+ c
* 16;
686 const int size
= (GEN_GEN
< 8 && is_passthru_format(format
)) ?
687 upload_format_size(upload_format
) : input
->glarray
->Size
;
690 case 0: comp0
= VFCOMP_STORE_0
;
691 case 1: comp1
= VFCOMP_STORE_0
;
692 case 2: comp2
= VFCOMP_STORE_0
;
694 if (GEN_GEN
>= 8 && input
->glarray
->Doubles
) {
695 comp3
= VFCOMP_STORE_0
;
696 } else if (input
->glarray
->Integer
) {
697 comp3
= VFCOMP_STORE_1_INT
;
699 comp3
= VFCOMP_STORE_1_FP
;
706 /* From the BDW PRM, Volume 2d, page 586 (VERTEX_ELEMENT_STATE):
708 * "When SourceElementFormat is set to one of the *64*_PASSTHRU
709 * formats, 64-bit components are stored in the URB without any
710 * conversion. In this case, vertex elements must be written as 128
711 * or 256 bits, with VFCOMP_STORE_0 being used to pad the output as
712 * required. E.g., if R64_PASSTHRU is used to copy a 64-bit Red
713 * component into the URB, Component 1 must be specified as
714 * VFCOMP_STORE_0 (with Components 2,3 set to VFCOMP_NOSTORE) in
715 * order to output a 128-bit vertex element, or Components 1-3 must
716 * be specified as VFCOMP_STORE_0 in order to output a 256-bit vertex
717 * element. Likewise, use of R64G64B64_PASSTHRU requires Component 3
718 * to be specified as VFCOMP_STORE_0 in order to output a 256-bit
721 if (input
->glarray
->Doubles
&& !input
->is_dual_slot
) {
722 /* Store vertex elements which correspond to double and dvec2 vertex
723 * shader inputs as 128-bit vertex elements, instead of 256-bits.
725 comp2
= VFCOMP_NOSTORE
;
726 comp3
= VFCOMP_NOSTORE
;
730 struct GENX(VERTEX_ELEMENT_STATE
) elem_state
= {
731 .VertexBufferIndex
= input
->buffer
,
733 .SourceElementFormat
= upload_format
,
734 .SourceElementOffset
= offset
,
735 .Component0Control
= comp0
,
736 .Component1Control
= comp1
,
737 .Component2Control
= comp2
,
738 .Component3Control
= comp3
,
740 .DestinationElementOffset
= i
* 4,
744 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem_state
);
745 dw
+= GENX(VERTEX_ELEMENT_STATE_length
);
749 if (needs_sgvs_element
) {
750 struct GENX(VERTEX_ELEMENT_STATE
) elem_state
= {
752 .Component0Control
= VFCOMP_STORE_0
,
753 .Component1Control
= VFCOMP_STORE_0
,
754 .Component2Control
= VFCOMP_STORE_0
,
755 .Component3Control
= VFCOMP_STORE_0
,
757 .DestinationElementOffset
= i
* 4,
762 if (vs_prog_data
->uses_basevertex
||
763 vs_prog_data
->uses_baseinstance
) {
764 elem_state
.VertexBufferIndex
= brw
->vb
.nr_buffers
;
765 elem_state
.SourceElementFormat
= ISL_FORMAT_R32G32_UINT
;
766 elem_state
.Component0Control
= VFCOMP_STORE_SRC
;
767 elem_state
.Component1Control
= VFCOMP_STORE_SRC
;
770 elem_state
.VertexBufferIndex
= brw
->vb
.nr_buffers
;
771 elem_state
.SourceElementFormat
= ISL_FORMAT_R32G32_UINT
;
772 if (vs_prog_data
->uses_basevertex
)
773 elem_state
.Component0Control
= VFCOMP_STORE_SRC
;
775 if (vs_prog_data
->uses_baseinstance
)
776 elem_state
.Component1Control
= VFCOMP_STORE_SRC
;
778 if (vs_prog_data
->uses_vertexid
)
779 elem_state
.Component2Control
= VFCOMP_STORE_VID
;
781 if (vs_prog_data
->uses_instanceid
)
782 elem_state
.Component3Control
= VFCOMP_STORE_IID
;
785 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem_state
);
786 dw
+= GENX(VERTEX_ELEMENT_STATE_length
);
789 if (vs_prog_data
->uses_drawid
) {
790 struct GENX(VERTEX_ELEMENT_STATE
) elem_state
= {
792 .VertexBufferIndex
= brw
->vb
.nr_buffers
+ 1,
793 .SourceElementFormat
= ISL_FORMAT_R32_UINT
,
794 .Component0Control
= VFCOMP_STORE_SRC
,
795 .Component1Control
= VFCOMP_STORE_0
,
796 .Component2Control
= VFCOMP_STORE_0
,
797 .Component3Control
= VFCOMP_STORE_0
,
799 .DestinationElementOffset
= i
* 4,
803 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem_state
);
804 dw
+= GENX(VERTEX_ELEMENT_STATE_length
);
808 if (gen6_edgeflag_input
) {
809 const uint32_t format
=
810 brw_get_vertex_surface_type(brw
, gen6_edgeflag_input
->glarray
);
812 struct GENX(VERTEX_ELEMENT_STATE
) elem_state
= {
814 .VertexBufferIndex
= gen6_edgeflag_input
->buffer
,
815 .EdgeFlagEnable
= true,
816 .SourceElementFormat
= format
,
817 .SourceElementOffset
= gen6_edgeflag_input
->offset
,
818 .Component0Control
= VFCOMP_STORE_SRC
,
819 .Component1Control
= VFCOMP_STORE_0
,
820 .Component2Control
= VFCOMP_STORE_0
,
821 .Component3Control
= VFCOMP_STORE_0
,
824 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem_state
);
825 dw
+= GENX(VERTEX_ELEMENT_STATE_length
);
830 for (unsigned i
= 0, j
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
831 const struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
832 const struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[input
->buffer
];
833 unsigned element_index
;
835 /* The edge flag element is reordered to be the last one in the code
836 * above so we need to compensate for that in the element indices used
839 if (input
== gen6_edgeflag_input
)
840 element_index
= nr_elements
- 1;
844 brw_batch_emit(brw
, GENX(3DSTATE_VF_INSTANCING
), vfi
) {
845 vfi
.VertexElementIndex
= element_index
;
846 vfi
.InstancingEnable
= buffer
->step_rate
!= 0;
847 vfi
.InstanceDataStepRate
= buffer
->step_rate
;
851 if (vs_prog_data
->uses_drawid
) {
852 const unsigned element
= brw
->vb
.nr_enabled
+ needs_sgvs_element
;
854 brw_batch_emit(brw
, GENX(3DSTATE_VF_INSTANCING
), vfi
) {
855 vfi
.VertexElementIndex
= element
;
861 static const struct brw_tracked_state
genX(vertices
) = {
863 .mesa
= _NEW_POLYGON
,
864 .brw
= BRW_NEW_BATCH
|
867 BRW_NEW_VS_PROG_DATA
,
869 .emit
= genX(emit_vertices
),
873 genX(emit_index_buffer
)(struct brw_context
*brw
)
875 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
877 if (index_buffer
== NULL
)
880 brw_batch_emit(brw
, GENX(3DSTATE_INDEX_BUFFER
), ib
) {
881 #if GEN_GEN < 8 && !GEN_IS_HASWELL
882 ib
.CutIndexEnable
= brw
->prim_restart
.enable_cut_index
;
884 ib
.IndexFormat
= brw_get_index_type(index_buffer
->index_size
);
885 ib
.BufferStartingAddress
= vertex_bo(brw
->ib
.bo
, 0);
887 ib
.IndexBufferMOCS
= GEN_GEN
>= 9 ? SKL_MOCS_WB
: BDW_MOCS_WB
;
888 ib
.BufferSize
= brw
->ib
.size
;
890 ib
.BufferEndingAddress
= vertex_bo(brw
->ib
.bo
, brw
->ib
.size
- 1);
895 static const struct brw_tracked_state
genX(index_buffer
) = {
898 .brw
= BRW_NEW_BATCH
|
900 BRW_NEW_INDEX_BUFFER
,
902 .emit
= genX(emit_index_buffer
),
905 #if GEN_IS_HASWELL || GEN_GEN >= 8
907 genX(upload_cut_index
)(struct brw_context
*brw
)
909 const struct gl_context
*ctx
= &brw
->ctx
;
911 brw_batch_emit(brw
, GENX(3DSTATE_VF
), vf
) {
912 if (ctx
->Array
._PrimitiveRestart
&& brw
->ib
.ib
) {
913 vf
.IndexedDrawCutIndexEnable
= true;
914 vf
.CutIndex
= _mesa_primitive_restart_index(ctx
, brw
->ib
.index_size
);
919 const struct brw_tracked_state
genX(cut_index
) = {
921 .mesa
= _NEW_TRANSFORM
,
922 .brw
= BRW_NEW_INDEX_BUFFER
,
924 .emit
= genX(upload_cut_index
),
930 * Determine the appropriate attribute override value to store into the
931 * 3DSTATE_SF structure for a given fragment shader attribute. The attribute
932 * override value contains two pieces of information: the location of the
933 * attribute in the VUE (relative to urb_entry_read_offset, see below), and a
934 * flag indicating whether to "swizzle" the attribute based on the direction
935 * the triangle is facing.
937 * If an attribute is "swizzled", then the given VUE location is used for
938 * front-facing triangles, and the VUE location that immediately follows is
939 * used for back-facing triangles. We use this to implement the mapping from
940 * gl_FrontColor/gl_BackColor to gl_Color.
942 * urb_entry_read_offset is the offset into the VUE at which the SF unit is
943 * being instructed to begin reading attribute data. It can be set to a
944 * nonzero value to prevent the SF unit from wasting time reading elements of
945 * the VUE that are not needed by the fragment shader. It is measured in
946 * 256-bit increments.
949 genX(get_attr_override
)(struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) *attr
,
950 const struct brw_vue_map
*vue_map
,
951 int urb_entry_read_offset
, int fs_attr
,
952 bool two_side_color
, uint32_t *max_source_attr
)
954 /* Find the VUE slot for this attribute. */
955 int slot
= vue_map
->varying_to_slot
[fs_attr
];
957 /* Viewport and Layer are stored in the VUE header. We need to override
958 * them to zero if earlier stages didn't write them, as GL requires that
959 * they read back as zero when not explicitly set.
961 if (fs_attr
== VARYING_SLOT_VIEWPORT
|| fs_attr
== VARYING_SLOT_LAYER
) {
962 attr
->ComponentOverrideX
= true;
963 attr
->ComponentOverrideW
= true;
964 attr
->ConstantSource
= CONST_0000
;
966 if (!(vue_map
->slots_valid
& VARYING_BIT_LAYER
))
967 attr
->ComponentOverrideY
= true;
968 if (!(vue_map
->slots_valid
& VARYING_BIT_VIEWPORT
))
969 attr
->ComponentOverrideZ
= true;
974 /* If there was only a back color written but not front, use back
975 * as the color instead of undefined
977 if (slot
== -1 && fs_attr
== VARYING_SLOT_COL0
)
978 slot
= vue_map
->varying_to_slot
[VARYING_SLOT_BFC0
];
979 if (slot
== -1 && fs_attr
== VARYING_SLOT_COL1
)
980 slot
= vue_map
->varying_to_slot
[VARYING_SLOT_BFC1
];
983 /* This attribute does not exist in the VUE--that means that the vertex
984 * shader did not write to it. This means that either:
986 * (a) This attribute is a texture coordinate, and it is going to be
987 * replaced with point coordinates (as a consequence of a call to
988 * glTexEnvi(GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE)), so the
989 * hardware will ignore whatever attribute override we supply.
991 * (b) This attribute is read by the fragment shader but not written by
992 * the vertex shader, so its value is undefined. Therefore the
993 * attribute override we supply doesn't matter.
995 * (c) This attribute is gl_PrimitiveID, and it wasn't written by the
996 * previous shader stage.
998 * Note that we don't have to worry about the cases where the attribute
999 * is gl_PointCoord or is undergoing point sprite coordinate
1000 * replacement, because in those cases, this function isn't called.
1002 * In case (c), we need to program the attribute overrides so that the
1003 * primitive ID will be stored in this slot. In every other case, the
1004 * attribute override we supply doesn't matter. So just go ahead and
1005 * program primitive ID in every case.
1007 attr
->ComponentOverrideW
= true;
1008 attr
->ComponentOverrideX
= true;
1009 attr
->ComponentOverrideY
= true;
1010 attr
->ComponentOverrideZ
= true;
1011 attr
->ConstantSource
= PRIM_ID
;
1015 /* Compute the location of the attribute relative to urb_entry_read_offset.
1016 * Each increment of urb_entry_read_offset represents a 256-bit value, so
1017 * it counts for two 128-bit VUE slots.
1019 int source_attr
= slot
- 2 * urb_entry_read_offset
;
1020 assert(source_attr
>= 0 && source_attr
< 32);
1022 /* If we are doing two-sided color, and the VUE slot following this one
1023 * represents a back-facing color, then we need to instruct the SF unit to
1024 * do back-facing swizzling.
1026 bool swizzling
= two_side_color
&&
1027 ((vue_map
->slot_to_varying
[slot
] == VARYING_SLOT_COL0
&&
1028 vue_map
->slot_to_varying
[slot
+1] == VARYING_SLOT_BFC0
) ||
1029 (vue_map
->slot_to_varying
[slot
] == VARYING_SLOT_COL1
&&
1030 vue_map
->slot_to_varying
[slot
+1] == VARYING_SLOT_BFC1
));
1032 /* Update max_source_attr. If swizzling, the SF will read this slot + 1. */
1033 if (*max_source_attr
< source_attr
+ swizzling
)
1034 *max_source_attr
= source_attr
+ swizzling
;
1036 attr
->SourceAttribute
= source_attr
;
1038 attr
->SwizzleSelect
= INPUTATTR_FACING
;
1043 genX(calculate_attr_overrides
)(const struct brw_context
*brw
,
1044 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) *attr_overrides
,
1045 uint32_t *point_sprite_enables
,
1046 uint32_t *urb_entry_read_length
,
1047 uint32_t *urb_entry_read_offset
)
1049 const struct gl_context
*ctx
= &brw
->ctx
;
1052 const struct gl_point_attrib
*point
= &ctx
->Point
;
1054 /* BRW_NEW_FS_PROG_DATA */
1055 const struct brw_wm_prog_data
*wm_prog_data
=
1056 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1057 uint32_t max_source_attr
= 0;
1059 *point_sprite_enables
= 0;
1061 /* BRW_NEW_FRAGMENT_PROGRAM
1063 * If the fragment shader reads VARYING_SLOT_LAYER, then we need to pass in
1064 * the full vertex header. Otherwise, we can program the SF to start
1065 * reading at an offset of 1 (2 varying slots) to skip unnecessary data:
1066 * - VARYING_SLOT_PSIZ and BRW_VARYING_SLOT_NDC on gen4-5
1067 * - VARYING_SLOT_{PSIZ,LAYER} and VARYING_SLOT_POS on gen6+
1070 bool fs_needs_vue_header
= brw
->fragment_program
->info
.inputs_read
&
1071 (VARYING_BIT_LAYER
| VARYING_BIT_VIEWPORT
);
1073 *urb_entry_read_offset
= fs_needs_vue_header
? 0 : 1;
1075 /* From the Ivybridge PRM, Vol 2 Part 1, 3DSTATE_SBE,
1076 * description of dw10 Point Sprite Texture Coordinate Enable:
1078 * "This field must be programmed to zero when non-point primitives
1081 * The SandyBridge PRM doesn't explicitly say that point sprite enables
1082 * must be programmed to zero when rendering non-point primitives, but
1083 * the IvyBridge PRM does, and if we don't, we get garbage.
1085 * This is not required on Haswell, as the hardware ignores this state
1086 * when drawing non-points -- although we do still need to be careful to
1087 * correctly set the attr overrides.
1090 * BRW_NEW_PRIMITIVE | BRW_NEW_GS_PROG_DATA | BRW_NEW_TES_PROG_DATA
1092 bool drawing_points
= brw_is_drawing_points(brw
);
1094 for (int attr
= 0; attr
< VARYING_SLOT_MAX
; attr
++) {
1095 int input_index
= wm_prog_data
->urb_setup
[attr
];
1097 if (input_index
< 0)
1101 bool point_sprite
= false;
1102 if (drawing_points
) {
1103 if (point
->PointSprite
&&
1104 (attr
>= VARYING_SLOT_TEX0
&& attr
<= VARYING_SLOT_TEX7
) &&
1105 (point
->CoordReplace
& (1u << (attr
- VARYING_SLOT_TEX0
)))) {
1106 point_sprite
= true;
1109 if (attr
== VARYING_SLOT_PNTC
)
1110 point_sprite
= true;
1113 *point_sprite_enables
|= (1 << input_index
);
1116 /* BRW_NEW_VUE_MAP_GEOM_OUT | _NEW_LIGHT | _NEW_PROGRAM */
1117 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) attribute
= { 0 };
1119 if (!point_sprite
) {
1120 genX(get_attr_override
)(&attribute
,
1121 &brw
->vue_map_geom_out
,
1122 *urb_entry_read_offset
, attr
,
1123 _mesa_vertex_program_two_side_enabled(ctx
),
1127 /* The hardware can only do the overrides on 16 overrides at a
1128 * time, and the other up to 16 have to be lined up so that the
1129 * input index = the output index. We'll need to do some
1130 * tweaking to make sure that's the case.
1132 if (input_index
< 16)
1133 attr_overrides
[input_index
] = attribute
;
1135 assert(attribute
.SourceAttribute
== input_index
);
1138 /* From the Sandy Bridge PRM, Volume 2, Part 1, documentation for
1139 * 3DSTATE_SF DWord 1 bits 15:11, "Vertex URB Entry Read Length":
1141 * "This field should be set to the minimum length required to read the
1142 * maximum source attribute. The maximum source attribute is indicated
1143 * by the maximum value of the enabled Attribute # Source Attribute if
1144 * Attribute Swizzle Enable is set, Number of Output Attributes-1 if
1145 * enable is not set.
1146 * read_length = ceiling((max_source_attr + 1) / 2)
1148 * [errata] Corruption/Hang possible if length programmed larger than
1151 * Similar text exists for Ivy Bridge.
1153 *urb_entry_read_length
= DIV_ROUND_UP(max_source_attr
+ 1, 2);
1157 /* ---------------------------------------------------------------------- */
1161 genX(upload_depth_stencil_state
)(struct brw_context
*brw
)
1163 struct gl_context
*ctx
= &brw
->ctx
;
1166 struct intel_renderbuffer
*depth_irb
=
1167 intel_get_renderbuffer(ctx
->DrawBuffer
, BUFFER_DEPTH
);
1170 struct gl_depthbuffer_attrib
*depth
= &ctx
->Depth
;
1173 struct gl_stencil_attrib
*stencil
= &ctx
->Stencil
;
1174 const int b
= stencil
->_BackFace
;
1177 brw_batch_emit(brw
, GENX(3DSTATE_WM_DEPTH_STENCIL
), wmds
) {
1180 brw_state_emit(brw
, GENX(DEPTH_STENCIL_STATE
), 64, &ds_offset
, wmds
) {
1182 if (depth
->Test
&& depth_irb
) {
1183 wmds
.DepthTestEnable
= true;
1184 wmds
.DepthBufferWriteEnable
= brw_depth_writes_enabled(brw
);
1185 wmds
.DepthTestFunction
= intel_translate_compare_func(depth
->Func
);
1188 if (brw
->stencil_enabled
) {
1189 wmds
.StencilTestEnable
= true;
1190 wmds
.StencilWriteMask
= stencil
->WriteMask
[0] & 0xff;
1191 wmds
.StencilTestMask
= stencil
->ValueMask
[0] & 0xff;
1193 wmds
.StencilTestFunction
=
1194 intel_translate_compare_func(stencil
->Function
[0]);
1195 wmds
.StencilFailOp
=
1196 intel_translate_stencil_op(stencil
->FailFunc
[0]);
1197 wmds
.StencilPassDepthPassOp
=
1198 intel_translate_stencil_op(stencil
->ZPassFunc
[0]);
1199 wmds
.StencilPassDepthFailOp
=
1200 intel_translate_stencil_op(stencil
->ZFailFunc
[0]);
1202 wmds
.StencilBufferWriteEnable
= brw
->stencil_write_enabled
;
1204 if (brw
->stencil_two_sided
) {
1205 wmds
.DoubleSidedStencilEnable
= true;
1206 wmds
.BackfaceStencilWriteMask
= stencil
->WriteMask
[b
] & 0xff;
1207 wmds
.BackfaceStencilTestMask
= stencil
->ValueMask
[b
] & 0xff;
1209 wmds
.BackfaceStencilTestFunction
=
1210 intel_translate_compare_func(stencil
->Function
[b
]);
1211 wmds
.BackfaceStencilFailOp
=
1212 intel_translate_stencil_op(stencil
->FailFunc
[b
]);
1213 wmds
.BackfaceStencilPassDepthPassOp
=
1214 intel_translate_stencil_op(stencil
->ZPassFunc
[b
]);
1215 wmds
.BackfaceStencilPassDepthFailOp
=
1216 intel_translate_stencil_op(stencil
->ZFailFunc
[b
]);
1220 wmds
.StencilReferenceValue
= _mesa_get_stencil_ref(ctx
, 0);
1221 wmds
.BackfaceStencilReferenceValue
= _mesa_get_stencil_ref(ctx
, b
);
1227 brw_batch_emit(brw
, GENX(3DSTATE_CC_STATE_POINTERS
), ptr
) {
1228 ptr
.PointertoDEPTH_STENCIL_STATE
= ds_offset
;
1229 ptr
.DEPTH_STENCIL_STATEChange
= true;
1232 brw_batch_emit(brw
, GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS
), ptr
) {
1233 ptr
.PointertoDEPTH_STENCIL_STATE
= ds_offset
;
1238 static const struct brw_tracked_state
genX(depth_stencil_state
) = {
1240 .mesa
= _NEW_BUFFERS
|
1243 .brw
= BRW_NEW_BLORP
|
1244 (GEN_GEN
>= 8 ? BRW_NEW_CONTEXT
1246 BRW_NEW_STATE_BASE_ADDRESS
),
1248 .emit
= genX(upload_depth_stencil_state
),
1252 /* ---------------------------------------------------------------------- */
1256 genX(upload_clip_state
)(struct brw_context
*brw
)
1258 struct gl_context
*ctx
= &brw
->ctx
;
1261 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1263 /* BRW_NEW_FS_PROG_DATA */
1264 struct brw_wm_prog_data
*wm_prog_data
=
1265 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1267 brw_batch_emit(brw
, GENX(3DSTATE_CLIP
), clip
) {
1268 clip
.StatisticsEnable
= !brw
->meta_in_progress
;
1270 if (wm_prog_data
->barycentric_interp_modes
&
1271 BRW_BARYCENTRIC_NONPERSPECTIVE_BITS
)
1272 clip
.NonPerspectiveBarycentricEnable
= true;
1275 clip
.EarlyCullEnable
= true;
1279 clip
.FrontWinding
= brw
->polygon_front_bit
== _mesa_is_user_fbo(fb
);
1281 if (ctx
->Polygon
.CullFlag
) {
1282 switch (ctx
->Polygon
.CullFaceMode
) {
1284 clip
.CullMode
= CULLMODE_FRONT
;
1287 clip
.CullMode
= CULLMODE_BACK
;
1289 case GL_FRONT_AND_BACK
:
1290 clip
.CullMode
= CULLMODE_BOTH
;
1293 unreachable("Should not get here: invalid CullFlag");
1296 clip
.CullMode
= CULLMODE_NONE
;
1301 clip
.UserClipDistanceCullTestEnableBitmask
=
1302 brw_vue_prog_data(brw
->vs
.base
.prog_data
)->cull_distance_mask
;
1304 clip
.ViewportZClipTestEnable
= !ctx
->Transform
.DepthClamp
;
1308 if (ctx
->Light
.ProvokingVertex
== GL_FIRST_VERTEX_CONVENTION
) {
1309 clip
.TriangleStripListProvokingVertexSelect
= 0;
1310 clip
.TriangleFanProvokingVertexSelect
= 1;
1311 clip
.LineStripListProvokingVertexSelect
= 0;
1313 clip
.TriangleStripListProvokingVertexSelect
= 2;
1314 clip
.TriangleFanProvokingVertexSelect
= 2;
1315 clip
.LineStripListProvokingVertexSelect
= 1;
1318 /* _NEW_TRANSFORM */
1319 clip
.UserClipDistanceClipTestEnableBitmask
=
1320 ctx
->Transform
.ClipPlanesEnabled
;
1323 clip
.ForceUserClipDistanceClipTestEnableBitmask
= true;
1326 if (ctx
->Transform
.ClipDepthMode
== GL_ZERO_TO_ONE
)
1327 clip
.APIMode
= APIMODE_D3D
;
1329 clip
.APIMode
= APIMODE_OGL
;
1331 clip
.GuardbandClipTestEnable
= true;
1333 /* BRW_NEW_VIEWPORT_COUNT */
1334 const unsigned viewport_count
= brw
->clip
.viewport_count
;
1336 if (ctx
->RasterDiscard
) {
1337 clip
.ClipMode
= CLIPMODE_REJECT_ALL
;
1339 perf_debug("Rasterizer discard is currently implemented via the "
1340 "clipper; having the GS not write primitives would "
1341 "likely be faster.\n");
1344 clip
.ClipMode
= CLIPMODE_NORMAL
;
1347 clip
.ClipEnable
= true;
1350 * BRW_NEW_GEOMETRY_PROGRAM | BRW_NEW_TES_PROG_DATA | BRW_NEW_PRIMITIVE
1352 if (!brw_is_drawing_points(brw
) && !brw_is_drawing_lines(brw
))
1353 clip
.ViewportXYClipTestEnable
= true;
1355 clip
.MinimumPointWidth
= 0.125;
1356 clip
.MaximumPointWidth
= 255.875;
1357 clip
.MaximumVPIndex
= viewport_count
- 1;
1358 if (_mesa_geometric_layers(fb
) == 0)
1359 clip
.ForceZeroRTAIndexEnable
= true;
1363 static const struct brw_tracked_state
genX(clip_state
) = {
1365 .mesa
= _NEW_BUFFERS
|
1369 .brw
= BRW_NEW_BLORP
|
1371 BRW_NEW_FS_PROG_DATA
|
1372 BRW_NEW_GS_PROG_DATA
|
1373 BRW_NEW_VS_PROG_DATA
|
1374 BRW_NEW_META_IN_PROGRESS
|
1376 BRW_NEW_RASTERIZER_DISCARD
|
1377 BRW_NEW_TES_PROG_DATA
|
1378 BRW_NEW_VIEWPORT_COUNT
,
1380 .emit
= genX(upload_clip_state
),
1384 /* ---------------------------------------------------------------------- */
1387 genX(upload_sf
)(struct brw_context
*brw
)
1389 struct gl_context
*ctx
= &brw
->ctx
;
1394 bool render_to_fbo
= _mesa_is_user_fbo(ctx
->DrawBuffer
);
1395 UNUSED
const bool multisampled_fbo
=
1396 _mesa_geometric_samples(ctx
->DrawBuffer
) > 1;
1400 const struct brw_sf_prog_data
*sf_prog_data
= brw
->sf
.prog_data
;
1402 ctx
->NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
1404 brw_state_emit(brw
, GENX(SF_STATE
), 64, &brw
->sf
.state_offset
, sf
) {
1405 sf
.KernelStartPointer
= KSP_ro(brw
, brw
->sf
.prog_offset
);
1406 sf
.FloatingPointMode
= FLOATING_POINT_MODE_Alternate
;
1407 sf
.GRFRegisterCount
= DIV_ROUND_UP(sf_prog_data
->total_grf
, 16) - 1;
1408 sf
.DispatchGRFStartRegisterForURBData
= 3;
1409 sf
.VertexURBEntryReadOffset
= BRW_SF_URB_ENTRY_READ_OFFSET
;
1410 sf
.VertexURBEntryReadLength
= sf_prog_data
->urb_read_length
;
1411 sf
.NumberofURBEntries
= brw
->urb
.nr_sf_entries
;
1412 sf
.URBEntryAllocationSize
= brw
->urb
.sfsize
- 1;
1414 /* STATE_PREFETCH command description describes this state as being
1415 * something loaded through the GPE (L2 ISC), so it's INSTRUCTION
1418 sf
.SetupViewportStateOffset
=
1419 instruction_ro_bo(brw
->batch
.bo
, brw
->sf
.vp_offset
);
1421 sf
.PointRasterizationRule
= RASTRULE_UPPER_RIGHT
;
1423 /* sf.ConstantURBEntryReadLength = stage_prog_data->curb_read_length; */
1424 /* sf.ConstantURBEntryReadOffset = brw->curbe.vs_start * 2; */
1426 sf
.MaximumNumberofThreads
=
1427 MIN2(GEN_GEN
== 5 ? 48 : 24, brw
->urb
.nr_sf_entries
) - 1;
1429 sf
.SpritePointEnable
= ctx
->Point
.PointSprite
;
1431 sf
.DestinationOriginHorizontalBias
= 0.5;
1432 sf
.DestinationOriginVerticalBias
= 0.5;
1434 brw_batch_emit(brw
, GENX(3DSTATE_SF
), sf
) {
1435 sf
.StatisticsEnable
= true;
1437 sf
.ViewportTransformEnable
= true;
1441 sf
.DepthBufferSurfaceFormat
= brw_depthbuffer_format(brw
);
1446 sf
.FrontWinding
= brw
->polygon_front_bit
== render_to_fbo
;
1448 sf
.GlobalDepthOffsetEnableSolid
= ctx
->Polygon
.OffsetFill
;
1449 sf
.GlobalDepthOffsetEnableWireframe
= ctx
->Polygon
.OffsetLine
;
1450 sf
.GlobalDepthOffsetEnablePoint
= ctx
->Polygon
.OffsetPoint
;
1452 switch (ctx
->Polygon
.FrontMode
) {
1454 sf
.FrontFaceFillMode
= FILL_MODE_SOLID
;
1457 sf
.FrontFaceFillMode
= FILL_MODE_WIREFRAME
;
1460 sf
.FrontFaceFillMode
= FILL_MODE_POINT
;
1463 unreachable("not reached");
1466 switch (ctx
->Polygon
.BackMode
) {
1468 sf
.BackFaceFillMode
= FILL_MODE_SOLID
;
1471 sf
.BackFaceFillMode
= FILL_MODE_WIREFRAME
;
1474 sf
.BackFaceFillMode
= FILL_MODE_POINT
;
1477 unreachable("not reached");
1480 if (multisampled_fbo
&& ctx
->Multisample
.Enabled
)
1481 sf
.MultisampleRasterizationMode
= MSRASTMODE_ON_PATTERN
;
1483 sf
.GlobalDepthOffsetConstant
= ctx
->Polygon
.OffsetUnits
* 2;
1484 sf
.GlobalDepthOffsetScale
= ctx
->Polygon
.OffsetFactor
;
1485 sf
.GlobalDepthOffsetClamp
= ctx
->Polygon
.OffsetClamp
;
1488 sf
.ScissorRectangleEnable
= true;
1490 if (ctx
->Polygon
.CullFlag
) {
1491 switch (ctx
->Polygon
.CullFaceMode
) {
1493 sf
.CullMode
= CULLMODE_FRONT
;
1496 sf
.CullMode
= CULLMODE_BACK
;
1498 case GL_FRONT_AND_BACK
:
1499 sf
.CullMode
= CULLMODE_BOTH
;
1502 unreachable("not reached");
1505 sf
.CullMode
= CULLMODE_NONE
;
1509 sf
.LineStippleEnable
= ctx
->Line
.StippleFlag
;
1516 if (brw
->is_cherryview
)
1517 sf
.CHVLineWidth
= brw_get_line_width(brw
);
1519 sf
.LineWidth
= brw_get_line_width(brw
);
1521 sf
.LineWidth
= brw_get_line_width(brw
);
1524 if (ctx
->Line
.SmoothFlag
) {
1525 sf
.LineEndCapAntialiasingRegionWidth
= _10pixels
;
1527 sf
.AntiAliasingEnable
= true;
1531 /* _NEW_POINT - Clamp to ARB_point_parameters user limits */
1532 point_size
= CLAMP(ctx
->Point
.Size
, ctx
->Point
.MinSize
, ctx
->Point
.MaxSize
);
1533 /* Clamp to the hardware limits */
1534 sf
.PointWidth
= CLAMP(point_size
, 0.125f
, 255.875f
);
1536 /* _NEW_PROGRAM | _NEW_POINT, BRW_NEW_VUE_MAP_GEOM_OUT */
1537 if (use_state_point_size(brw
))
1538 sf
.PointWidthSource
= State
;
1541 /* _NEW_POINT | _NEW_MULTISAMPLE */
1542 if ((ctx
->Point
.SmoothFlag
|| _mesa_is_multisample_enabled(ctx
)) &&
1543 !ctx
->Point
.PointSprite
)
1544 sf
.SmoothPointEnable
= true;
1547 #if GEN_IS_G4X || GEN_GEN >= 5
1548 sf
.AALineDistanceMode
= AALINEDISTANCE_TRUE
;
1552 if (ctx
->Light
.ProvokingVertex
!= GL_FIRST_VERTEX_CONVENTION
) {
1553 sf
.TriangleStripListProvokingVertexSelect
= 2;
1554 sf
.TriangleFanProvokingVertexSelect
= 2;
1555 sf
.LineStripListProvokingVertexSelect
= 1;
1557 sf
.TriangleFanProvokingVertexSelect
= 1;
1561 /* BRW_NEW_FS_PROG_DATA */
1562 const struct brw_wm_prog_data
*wm_prog_data
=
1563 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1565 sf
.AttributeSwizzleEnable
= true;
1566 sf
.NumberofSFOutputAttributes
= wm_prog_data
->num_varying_inputs
;
1569 * Window coordinates in an FBO are inverted, which means point
1570 * sprite origin must be inverted, too.
1572 if ((ctx
->Point
.SpriteOrigin
== GL_LOWER_LEFT
) != render_to_fbo
) {
1573 sf
.PointSpriteTextureCoordinateOrigin
= LOWERLEFT
;
1575 sf
.PointSpriteTextureCoordinateOrigin
= UPPERLEFT
;
1578 /* BRW_NEW_VUE_MAP_GEOM_OUT | BRW_NEW_FRAGMENT_PROGRAM |
1579 * _NEW_POINT | _NEW_LIGHT | _NEW_PROGRAM | BRW_NEW_FS_PROG_DATA
1581 uint32_t urb_entry_read_length
;
1582 uint32_t urb_entry_read_offset
;
1583 uint32_t point_sprite_enables
;
1584 genX(calculate_attr_overrides
)(brw
, sf
.Attribute
, &point_sprite_enables
,
1585 &urb_entry_read_length
,
1586 &urb_entry_read_offset
);
1587 sf
.VertexURBEntryReadLength
= urb_entry_read_length
;
1588 sf
.VertexURBEntryReadOffset
= urb_entry_read_offset
;
1589 sf
.PointSpriteTextureCoordinateEnable
= point_sprite_enables
;
1590 sf
.ConstantInterpolationEnable
= wm_prog_data
->flat_inputs
;
1595 static const struct brw_tracked_state
genX(sf_state
) = {
1597 .mesa
= _NEW_LIGHT
|
1601 (GEN_GEN
>= 6 ? _NEW_MULTISAMPLE
: 0) |
1602 (GEN_GEN
<= 7 ? _NEW_BUFFERS
| _NEW_POLYGON
: 0),
1603 .brw
= BRW_NEW_BLORP
|
1604 BRW_NEW_VUE_MAP_GEOM_OUT
|
1605 (GEN_GEN
<= 5 ? BRW_NEW_BATCH
|
1606 BRW_NEW_PROGRAM_CACHE
|
1607 BRW_NEW_SF_PROG_DATA
|
1611 (GEN_GEN
>= 6 ? BRW_NEW_CONTEXT
: 0) |
1612 (GEN_GEN
>= 6 && GEN_GEN
<= 7 ?
1613 BRW_NEW_GS_PROG_DATA
|
1615 BRW_NEW_TES_PROG_DATA
1617 (GEN_GEN
== 6 ? BRW_NEW_FS_PROG_DATA
|
1618 BRW_NEW_FRAGMENT_PROGRAM
1621 .emit
= genX(upload_sf
),
1624 /* ---------------------------------------------------------------------- */
1628 genX(upload_wm
)(struct brw_context
*brw
)
1630 struct gl_context
*ctx
= &brw
->ctx
;
1632 /* BRW_NEW_FS_PROG_DATA */
1633 const struct brw_wm_prog_data
*wm_prog_data
=
1634 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1636 UNUSED
bool writes_depth
=
1637 wm_prog_data
->computed_depth_mode
!= BRW_PSCDEPTH_OFF
;
1640 const struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
1641 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1643 /* We can't fold this into gen6_upload_wm_push_constants(), because
1644 * according to the SNB PRM, vol 2 part 1 section 7.2.2
1645 * (3DSTATE_CONSTANT_PS [DevSNB]):
1647 * "[DevSNB]: This packet must be followed by WM_STATE."
1649 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_PS
), wmcp
) {
1650 if (wm_prog_data
->base
.nr_params
!= 0) {
1651 wmcp
.Buffer0Valid
= true;
1652 /* Pointer to the WM constant buffer. Covered by the set of
1653 * state flags from gen6_upload_wm_push_constants.
1655 wmcp
.PointertoPSConstantBuffer0
= stage_state
->push_const_offset
;
1656 wmcp
.PSConstantBuffer0ReadLength
= stage_state
->push_const_size
- 1;
1661 brw_batch_emit(brw
, GENX(3DSTATE_WM
), wm
) {
1662 wm
.StatisticsEnable
= true;
1663 wm
.LineAntialiasingRegionWidth
= _10pixels
;
1664 wm
.LineEndCapAntialiasingRegionWidth
= _05pixels
;
1667 if (wm_prog_data
->base
.use_alt_mode
)
1668 wm
.FloatingPointMode
= Alternate
;
1670 wm
.SamplerCount
= DIV_ROUND_UP(stage_state
->sampler_count
, 4);
1671 wm
.BindingTableEntryCount
= wm_prog_data
->base
.binding_table
.size_bytes
/ 4;
1672 wm
.MaximumNumberofThreads
= devinfo
->max_wm_threads
- 1;
1673 wm
._8PixelDispatchEnable
= wm_prog_data
->dispatch_8
;
1674 wm
._16PixelDispatchEnable
= wm_prog_data
->dispatch_16
;
1675 wm
.DispatchGRFStartRegisterForConstantSetupData0
=
1676 wm_prog_data
->base
.dispatch_grf_start_reg
;
1677 wm
.DispatchGRFStartRegisterForConstantSetupData2
=
1678 wm_prog_data
->dispatch_grf_start_reg_2
;
1679 wm
.KernelStartPointer0
= stage_state
->prog_offset
;
1680 wm
.KernelStartPointer2
= stage_state
->prog_offset
+
1681 wm_prog_data
->prog_offset_2
;
1682 wm
.DualSourceBlendEnable
=
1683 wm_prog_data
->dual_src_blend
&& (ctx
->Color
.BlendEnabled
& 1) &&
1684 ctx
->Color
.Blend
[0]._UsesDualSrc
;
1685 wm
.oMaskPresenttoRenderTarget
= wm_prog_data
->uses_omask
;
1686 wm
.NumberofSFOutputAttributes
= wm_prog_data
->num_varying_inputs
;
1688 /* From the SNB PRM, volume 2 part 1, page 281:
1689 * "If the PS kernel does not need the Position XY Offsets
1690 * to compute a Position XY value, then this field should be
1691 * programmed to POSOFFSET_NONE."
1693 * "SW Recommendation: If the PS kernel needs the Position Offsets
1694 * to compute a Position XY value, this field should match Position
1695 * ZW Interpolation Mode to ensure a consistent position.xyzw
1697 * We only require XY sample offsets. So, this recommendation doesn't
1698 * look useful at the moment. We might need this in future.
1700 if (wm_prog_data
->uses_pos_offset
)
1701 wm
.PositionXYOffsetSelect
= POSOFFSET_SAMPLE
;
1703 wm
.PositionXYOffsetSelect
= POSOFFSET_NONE
;
1705 if (wm_prog_data
->base
.total_scratch
) {
1706 wm
.ScratchSpaceBasePointer
=
1707 render_bo(stage_state
->scratch_bo
,
1708 ffs(stage_state
->per_thread_scratch
) - 11);
1711 wm
.PixelShaderComputedDepth
= writes_depth
;
1714 wm
.PointRasterizationRule
= RASTRULE_UPPER_RIGHT
;
1717 wm
.LineStippleEnable
= ctx
->Line
.StippleFlag
;
1720 wm
.PolygonStippleEnable
= ctx
->Polygon
.StippleFlag
;
1721 wm
.BarycentricInterpolationMode
= wm_prog_data
->barycentric_interp_modes
;
1725 const bool multisampled_fbo
= _mesa_geometric_samples(ctx
->DrawBuffer
) > 1;
1727 wm
.PixelShaderUsesSourceDepth
= wm_prog_data
->uses_src_depth
;
1728 wm
.PixelShaderUsesSourceW
= wm_prog_data
->uses_src_w
;
1729 if (wm_prog_data
->uses_kill
||
1730 _mesa_is_alpha_test_enabled(ctx
) ||
1731 _mesa_is_alpha_to_coverage_enabled(ctx
) ||
1732 wm_prog_data
->uses_omask
) {
1733 wm
.PixelShaderKillsPixel
= true;
1736 /* _NEW_BUFFERS | _NEW_COLOR */
1737 if (brw_color_buffer_write_enabled(brw
) || writes_depth
||
1738 wm_prog_data
->has_side_effects
|| wm
.PixelShaderKillsPixel
) {
1739 wm
.ThreadDispatchEnable
= true;
1741 if (multisampled_fbo
) {
1742 /* _NEW_MULTISAMPLE */
1743 if (ctx
->Multisample
.Enabled
)
1744 wm
.MultisampleRasterizationMode
= MSRASTMODE_ON_PATTERN
;
1746 wm
.MultisampleRasterizationMode
= MSRASTMODE_OFF_PIXEL
;
1748 if (wm_prog_data
->persample_dispatch
)
1749 wm
.MultisampleDispatchMode
= MSDISPMODE_PERSAMPLE
;
1751 wm
.MultisampleDispatchMode
= MSDISPMODE_PERPIXEL
;
1753 wm
.MultisampleRasterizationMode
= MSRASTMODE_OFF_PIXEL
;
1754 wm
.MultisampleDispatchMode
= MSDISPMODE_PERSAMPLE
;
1758 wm
.PixelShaderComputedDepthMode
= wm_prog_data
->computed_depth_mode
;
1759 wm
.PixelShaderUsesInputCoverageMask
= wm_prog_data
->uses_sample_mask
;
1762 /* The "UAV access enable" bits are unnecessary on HSW because they only
1763 * seem to have an effect on the HW-assisted coherency mechanism which we
1764 * don't need, and the rasterization-related UAV_ONLY flag and the
1765 * DISPATCH_ENABLE bit can be set independently from it.
1766 * C.f. gen8_upload_ps_extra().
1768 * BRW_NEW_FRAGMENT_PROGRAM | BRW_NEW_FS_PROG_DATA | _NEW_BUFFERS |
1772 if (!(brw_color_buffer_write_enabled(brw
) || writes_depth
) &&
1773 wm_prog_data
->has_side_effects
)
1779 /* BRW_NEW_FS_PROG_DATA */
1780 if (wm_prog_data
->early_fragment_tests
)
1781 wm
.EarlyDepthStencilControl
= EDSC_PREPS
;
1782 else if (wm_prog_data
->has_side_effects
)
1783 wm
.EarlyDepthStencilControl
= EDSC_PSEXEC
;
1788 static const struct brw_tracked_state
genX(wm_state
) = {
1792 (GEN_GEN
< 8 ? _NEW_BUFFERS
|
1796 (GEN_GEN
< 7 ? _NEW_PROGRAM_CONSTANTS
: 0),
1797 .brw
= BRW_NEW_BLORP
|
1798 BRW_NEW_FS_PROG_DATA
|
1799 (GEN_GEN
< 7 ? BRW_NEW_BATCH
: BRW_NEW_CONTEXT
),
1801 .emit
= genX(upload_wm
),
1805 /* ---------------------------------------------------------------------- */
1807 #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix) \
1808 pkt.KernelStartPointer = KSP(brw, stage_state->prog_offset); \
1809 pkt.SamplerCount = \
1810 DIV_ROUND_UP(CLAMP(stage_state->sampler_count, 0, 16), 4); \
1811 pkt.BindingTableEntryCount = \
1812 stage_prog_data->binding_table.size_bytes / 4; \
1813 pkt.FloatingPointMode = stage_prog_data->use_alt_mode; \
1815 if (stage_prog_data->total_scratch) { \
1816 pkt.ScratchSpaceBasePointer = \
1817 render_bo(stage_state->scratch_bo, 0); \
1818 pkt.PerThreadScratchSpace = \
1819 ffs(stage_state->per_thread_scratch) - 11; \
1822 pkt.DispatchGRFStartRegisterForURBData = \
1823 stage_prog_data->dispatch_grf_start_reg; \
1824 pkt.prefix##URBEntryReadLength = vue_prog_data->urb_read_length; \
1825 pkt.prefix##URBEntryReadOffset = 0; \
1827 pkt.StatisticsEnable = true; \
1831 genX(upload_vs_state
)(struct brw_context
*brw
)
1833 UNUSED
struct gl_context
*ctx
= &brw
->ctx
;
1834 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1835 struct brw_stage_state
*stage_state
= &brw
->vs
.base
;
1837 /* BRW_NEW_VS_PROG_DATA */
1838 const struct brw_vue_prog_data
*vue_prog_data
=
1839 brw_vue_prog_data(brw
->vs
.base
.prog_data
);
1840 const struct brw_stage_prog_data
*stage_prog_data
= &vue_prog_data
->base
;
1842 assert(vue_prog_data
->dispatch_mode
== DISPATCH_MODE_SIMD8
||
1843 vue_prog_data
->dispatch_mode
== DISPATCH_MODE_4X2_DUAL_OBJECT
);
1846 /* From the BSpec, 3D Pipeline > Geometry > Vertex Shader > State,
1847 * 3DSTATE_VS, Dword 5.0 "VS Function Enable":
1849 * [DevSNB] A pipeline flush must be programmed prior to a 3DSTATE_VS
1850 * command that causes the VS Function Enable to toggle. Pipeline
1851 * flush can be executed by sending a PIPE_CONTROL command with CS
1852 * stall bit set and a post sync operation.
1854 * We've already done such a flush at the start of state upload, so we
1855 * don't need to do another one here.
1857 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_VS
), cvs
) {
1858 if (stage_state
->push_const_size
!= 0) {
1859 cvs
.Buffer0Valid
= true;
1860 cvs
.PointertoVSConstantBuffer0
= stage_state
->push_const_offset
;
1861 cvs
.VSConstantBuffer0ReadLength
= stage_state
->push_const_size
- 1;
1866 if (GEN_GEN
== 7 && devinfo
->is_ivybridge
)
1867 gen7_emit_vs_workaround_flush(brw
);
1870 brw_batch_emit(brw
, GENX(3DSTATE_VS
), vs
) {
1872 ctx
->NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
1873 brw_state_emit(brw
, GENX(VS_STATE
), 32, &stage_state
->state_offset
, vs
) {
1875 INIT_THREAD_DISPATCH_FIELDS(vs
, Vertex
);
1877 vs
.MaximumNumberofThreads
= devinfo
->max_vs_threads
- 1;
1880 vs
.GRFRegisterCount
= DIV_ROUND_UP(vue_prog_data
->total_grf
, 16) - 1;
1881 vs
.ConstantURBEntryReadLength
= stage_prog_data
->curb_read_length
;
1882 vs
.ConstantURBEntryReadOffset
= brw
->curbe
.vs_start
* 2;
1884 vs
.NumberofURBEntries
= brw
->urb
.nr_vs_entries
>> (GEN_GEN
== 5 ? 2 : 0);
1885 vs
.URBEntryAllocationSize
= brw
->urb
.vsize
- 1;
1887 vs
.MaximumNumberofThreads
=
1888 CLAMP(brw
->urb
.nr_vs_entries
/ 2, 1, devinfo
->max_vs_threads
) - 1;
1890 vs
.StatisticsEnable
= false;
1891 vs
.SamplerStatePointer
=
1892 instruction_ro_bo(brw
->batch
.bo
, stage_state
->sampler_offset
);
1896 /* Force single program flow on Ironlake. We cannot reliably get
1897 * all applications working without it. See:
1898 * https://bugs.freedesktop.org/show_bug.cgi?id=29172
1900 * The most notable and reliably failing application is the Humus
1903 vs
.SingleProgramFlow
= true;
1904 vs
.SamplerCount
= 0; /* hardware requirement */
1908 vs
.SIMD8DispatchEnable
=
1909 vue_prog_data
->dispatch_mode
== DISPATCH_MODE_SIMD8
;
1911 vs
.UserClipDistanceCullTestEnableBitmask
=
1912 vue_prog_data
->cull_distance_mask
;
1917 /* Based on my reading of the simulator, the VS constants don't get
1918 * pulled into the VS FF unit until an appropriate pipeline flush
1919 * happens, and instead the 3DSTATE_CONSTANT_VS packet just adds
1920 * references to them into a little FIFO. The flushes are common,
1921 * but don't reliably happen between this and a 3DPRIMITIVE, causing
1922 * the primitive to use the wrong constants. Then the FIFO
1923 * containing the constant setup gets added to again on the next
1924 * constants change, and eventually when a flush does happen the
1925 * unit is overwhelmed by constant changes and dies.
1927 * To avoid this, send a PIPE_CONTROL down the line that will
1928 * update the unit immediately loading the constants. The flush
1929 * type bits here were those set by the STATE_BASE_ADDRESS whose
1930 * move in a82a43e8d99e1715dd11c9c091b5ab734079b6a6 triggered the
1931 * bug reports that led to this workaround, and may be more than
1932 * what is strictly required to avoid the issue.
1934 brw_emit_pipe_control_flush(brw
,
1935 PIPE_CONTROL_DEPTH_STALL
|
1936 PIPE_CONTROL_INSTRUCTION_INVALIDATE
|
1937 PIPE_CONTROL_STATE_CACHE_INVALIDATE
);
1941 static const struct brw_tracked_state
genX(vs_state
) = {
1943 .mesa
= (GEN_GEN
== 6 ? (_NEW_PROGRAM_CONSTANTS
| _NEW_TRANSFORM
) : 0),
1944 .brw
= BRW_NEW_BATCH
|
1947 BRW_NEW_VS_PROG_DATA
|
1948 (GEN_GEN
== 6 ? BRW_NEW_VERTEX_PROGRAM
: 0) |
1949 (GEN_GEN
<= 5 ? BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
1950 BRW_NEW_PROGRAM_CACHE
|
1951 BRW_NEW_SAMPLER_STATE_TABLE
|
1955 .emit
= genX(upload_vs_state
),
1958 /* ---------------------------------------------------------------------- */
1961 genX(upload_cc_viewport
)(struct brw_context
*brw
)
1963 struct gl_context
*ctx
= &brw
->ctx
;
1965 /* BRW_NEW_VIEWPORT_COUNT */
1966 const unsigned viewport_count
= brw
->clip
.viewport_count
;
1968 struct GENX(CC_VIEWPORT
) ccv
;
1969 uint32_t cc_vp_offset
;
1971 brw_state_batch(brw
, 4 * GENX(CC_VIEWPORT_length
) * viewport_count
,
1974 for (unsigned i
= 0; i
< viewport_count
; i
++) {
1975 /* _NEW_VIEWPORT | _NEW_TRANSFORM */
1976 const struct gl_viewport_attrib
*vp
= &ctx
->ViewportArray
[i
];
1977 if (ctx
->Transform
.DepthClamp
) {
1978 ccv
.MinimumDepth
= MIN2(vp
->Near
, vp
->Far
);
1979 ccv
.MaximumDepth
= MAX2(vp
->Near
, vp
->Far
);
1981 ccv
.MinimumDepth
= 0.0;
1982 ccv
.MaximumDepth
= 1.0;
1984 GENX(CC_VIEWPORT_pack
)(NULL
, cc_map
, &ccv
);
1985 cc_map
+= GENX(CC_VIEWPORT_length
);
1989 brw_batch_emit(brw
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC
), ptr
) {
1990 ptr
.CCViewportPointer
= cc_vp_offset
;
1993 brw_batch_emit(brw
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS
), vp
) {
1994 vp
.CCViewportStateChange
= 1;
1995 vp
.PointertoCC_VIEWPORT
= cc_vp_offset
;
1998 brw
->cc
.vp_offset
= cc_vp_offset
;
1999 ctx
->NewDriverState
|= BRW_NEW_CC_VP
;
2003 const struct brw_tracked_state
genX(cc_vp
) = {
2005 .mesa
= _NEW_TRANSFORM
|
2007 .brw
= BRW_NEW_BATCH
|
2009 BRW_NEW_VIEWPORT_COUNT
,
2011 .emit
= genX(upload_cc_viewport
)
2014 /* ---------------------------------------------------------------------- */
2017 set_scissor_bits(const struct gl_context
*ctx
, int i
,
2018 bool render_to_fbo
, unsigned fb_width
, unsigned fb_height
,
2019 struct GENX(SCISSOR_RECT
) *sc
)
2023 bbox
[0] = MAX2(ctx
->ViewportArray
[i
].X
, 0);
2024 bbox
[1] = MIN2(bbox
[0] + ctx
->ViewportArray
[i
].Width
, fb_width
);
2025 bbox
[2] = MAX2(ctx
->ViewportArray
[i
].Y
, 0);
2026 bbox
[3] = MIN2(bbox
[2] + ctx
->ViewportArray
[i
].Height
, fb_height
);
2027 _mesa_intersect_scissor_bounding_box(ctx
, i
, bbox
);
2029 if (bbox
[0] == bbox
[1] || bbox
[2] == bbox
[3]) {
2030 /* If the scissor was out of bounds and got clamped to 0 width/height
2031 * at the bounds, the subtraction of 1 from maximums could produce a
2032 * negative number and thus not clip anything. Instead, just provide
2033 * a min > max scissor inside the bounds, which produces the expected
2036 sc
->ScissorRectangleXMin
= 1;
2037 sc
->ScissorRectangleXMax
= 0;
2038 sc
->ScissorRectangleYMin
= 1;
2039 sc
->ScissorRectangleYMax
= 0;
2040 } else if (render_to_fbo
) {
2041 /* texmemory: Y=0=bottom */
2042 sc
->ScissorRectangleXMin
= bbox
[0];
2043 sc
->ScissorRectangleXMax
= bbox
[1] - 1;
2044 sc
->ScissorRectangleYMin
= bbox
[2];
2045 sc
->ScissorRectangleYMax
= bbox
[3] - 1;
2047 /* memory: Y=0=top */
2048 sc
->ScissorRectangleXMin
= bbox
[0];
2049 sc
->ScissorRectangleXMax
= bbox
[1] - 1;
2050 sc
->ScissorRectangleYMin
= fb_height
- bbox
[3];
2051 sc
->ScissorRectangleYMax
= fb_height
- bbox
[2] - 1;
2057 genX(upload_scissor_state
)(struct brw_context
*brw
)
2059 struct gl_context
*ctx
= &brw
->ctx
;
2060 const bool render_to_fbo
= _mesa_is_user_fbo(ctx
->DrawBuffer
);
2061 struct GENX(SCISSOR_RECT
) scissor
;
2062 uint32_t scissor_state_offset
;
2063 const unsigned int fb_width
= _mesa_geometric_width(ctx
->DrawBuffer
);
2064 const unsigned int fb_height
= _mesa_geometric_height(ctx
->DrawBuffer
);
2065 uint32_t *scissor_map
;
2067 /* BRW_NEW_VIEWPORT_COUNT */
2068 const unsigned viewport_count
= brw
->clip
.viewport_count
;
2070 scissor_map
= brw_state_batch(
2071 brw
, GENX(SCISSOR_RECT_length
) * sizeof(uint32_t) * viewport_count
,
2072 32, &scissor_state_offset
);
2074 /* _NEW_SCISSOR | _NEW_BUFFERS | _NEW_VIEWPORT */
2076 /* The scissor only needs to handle the intersection of drawable and
2077 * scissor rect. Clipping to the boundaries of static shared buffers
2078 * for front/back/depth is covered by looping over cliprects in brw_draw.c.
2080 * Note that the hardware's coordinates are inclusive, while Mesa's min is
2081 * inclusive but max is exclusive.
2083 for (unsigned i
= 0; i
< viewport_count
; i
++) {
2084 set_scissor_bits(ctx
, i
, render_to_fbo
, fb_width
, fb_height
, &scissor
);
2085 GENX(SCISSOR_RECT_pack
)(
2086 NULL
, scissor_map
+ i
* GENX(SCISSOR_RECT_length
), &scissor
);
2089 brw_batch_emit(brw
, GENX(3DSTATE_SCISSOR_STATE_POINTERS
), ptr
) {
2090 ptr
.ScissorRectPointer
= scissor_state_offset
;
2094 static const struct brw_tracked_state
genX(scissor_state
) = {
2096 .mesa
= _NEW_BUFFERS
|
2099 .brw
= BRW_NEW_BATCH
|
2101 BRW_NEW_VIEWPORT_COUNT
,
2103 .emit
= genX(upload_scissor_state
),
2107 /* ---------------------------------------------------------------------- */
2110 brw_calculate_guardband_size(uint32_t fb_width
, uint32_t fb_height
,
2111 float m00
, float m11
, float m30
, float m31
,
2112 float *xmin
, float *xmax
,
2113 float *ymin
, float *ymax
)
2115 /* According to the "Vertex X,Y Clamping and Quantization" section of the
2116 * Strips and Fans documentation:
2118 * "The vertex X and Y screen-space coordinates are also /clamped/ to the
2119 * fixed-point "guardband" range supported by the rasterization hardware"
2123 * "In almost all circumstances, if an object’s vertices are actually
2124 * modified by this clamping (i.e., had X or Y coordinates outside of
2125 * the guardband extent the rendered object will not match the intended
2126 * result. Therefore software should take steps to ensure that this does
2127 * not happen - e.g., by clipping objects such that they do not exceed
2128 * these limits after the Drawing Rectangle is applied."
2130 * I believe the fundamental restriction is that the rasterizer (in
2131 * the SF/WM stages) have a limit on the number of pixels that can be
2132 * rasterized. We need to ensure any coordinates beyond the rasterizer
2133 * limit are handled by the clipper. So effectively that limit becomes
2134 * the clipper's guardband size.
2136 * It goes on to say:
2138 * "In addition, in order to be correctly rendered, objects must have a
2139 * screenspace bounding box not exceeding 8K in the X or Y direction.
2140 * This additional restriction must also be comprehended by software,
2141 * i.e., enforced by use of clipping."
2143 * This makes no sense. Gen7+ hardware supports 16K render targets,
2144 * and you definitely need to be able to draw polygons that fill the
2145 * surface. Our assumption is that the rasterizer was limited to 8K
2146 * on Sandybridge, which only supports 8K surfaces, and it was actually
2147 * increased to 16K on Ivybridge and later.
2149 * So, limit the guardband to 16K on Gen7+ and 8K on Sandybridge.
2151 const float gb_size
= GEN_GEN
>= 7 ? 16384.0f
: 8192.0f
;
2153 if (m00
!= 0 && m11
!= 0) {
2154 /* First, we compute the screen-space render area */
2155 const float ss_ra_xmin
= MIN3( 0, m30
+ m00
, m30
- m00
);
2156 const float ss_ra_xmax
= MAX3( fb_width
, m30
+ m00
, m30
- m00
);
2157 const float ss_ra_ymin
= MIN3( 0, m31
+ m11
, m31
- m11
);
2158 const float ss_ra_ymax
= MAX3(fb_height
, m31
+ m11
, m31
- m11
);
2160 /* We want the guardband to be centered on that */
2161 const float ss_gb_xmin
= (ss_ra_xmin
+ ss_ra_xmax
) / 2 - gb_size
;
2162 const float ss_gb_xmax
= (ss_ra_xmin
+ ss_ra_xmax
) / 2 + gb_size
;
2163 const float ss_gb_ymin
= (ss_ra_ymin
+ ss_ra_ymax
) / 2 - gb_size
;
2164 const float ss_gb_ymax
= (ss_ra_ymin
+ ss_ra_ymax
) / 2 + gb_size
;
2166 /* Now we need it in native device coordinates */
2167 const float ndc_gb_xmin
= (ss_gb_xmin
- m30
) / m00
;
2168 const float ndc_gb_xmax
= (ss_gb_xmax
- m30
) / m00
;
2169 const float ndc_gb_ymin
= (ss_gb_ymin
- m31
) / m11
;
2170 const float ndc_gb_ymax
= (ss_gb_ymax
- m31
) / m11
;
2172 /* Thanks to Y-flipping and ORIGIN_UPPER_LEFT, the Y coordinates may be
2173 * flipped upside-down. X should be fine though.
2175 assert(ndc_gb_xmin
<= ndc_gb_xmax
);
2176 *xmin
= ndc_gb_xmin
;
2177 *xmax
= ndc_gb_xmax
;
2178 *ymin
= MIN2(ndc_gb_ymin
, ndc_gb_ymax
);
2179 *ymax
= MAX2(ndc_gb_ymin
, ndc_gb_ymax
);
2181 /* The viewport scales to 0, so nothing will be rendered. */
2190 genX(upload_sf_clip_viewport
)(struct brw_context
*brw
)
2192 struct gl_context
*ctx
= &brw
->ctx
;
2193 float y_scale
, y_bias
;
2195 /* BRW_NEW_VIEWPORT_COUNT */
2196 const unsigned viewport_count
= brw
->clip
.viewport_count
;
2199 const bool render_to_fbo
= _mesa_is_user_fbo(ctx
->DrawBuffer
);
2200 const uint32_t fb_width
= (float)_mesa_geometric_width(ctx
->DrawBuffer
);
2201 const uint32_t fb_height
= (float)_mesa_geometric_height(ctx
->DrawBuffer
);
2205 struct GENX(SF_CLIP_VIEWPORT
) sfv
;
2206 uint32_t sf_clip_vp_offset
;
2207 uint32_t *sf_clip_map
=
2208 brw_state_batch(brw
, GENX(SF_CLIP_VIEWPORT_length
) * 4 * viewport_count
,
2209 64, &sf_clip_vp_offset
);
2211 struct GENX(SF_VIEWPORT
) sfv
;
2212 struct GENX(CLIP_VIEWPORT
) clv
;
2213 uint32_t sf_vp_offset
, clip_vp_offset
;
2215 brw_state_batch(brw
, GENX(SF_VIEWPORT_length
) * 4 * viewport_count
,
2217 uint32_t *clip_map
=
2218 brw_state_batch(brw
, GENX(CLIP_VIEWPORT_length
) * 4 * viewport_count
,
2219 32, &clip_vp_offset
);
2223 if (render_to_fbo
) {
2228 y_bias
= (float)fb_height
;
2231 for (unsigned i
= 0; i
< brw
->clip
.viewport_count
; i
++) {
2232 /* _NEW_VIEWPORT: Guardband Clipping */
2233 float scale
[3], translate
[3], gb_xmin
, gb_xmax
, gb_ymin
, gb_ymax
;
2234 _mesa_get_viewport_xform(ctx
, i
, scale
, translate
);
2236 sfv
.ViewportMatrixElementm00
= scale
[0];
2237 sfv
.ViewportMatrixElementm11
= scale
[1] * y_scale
,
2238 sfv
.ViewportMatrixElementm22
= scale
[2],
2239 sfv
.ViewportMatrixElementm30
= translate
[0],
2240 sfv
.ViewportMatrixElementm31
= translate
[1] * y_scale
+ y_bias
,
2241 sfv
.ViewportMatrixElementm32
= translate
[2],
2242 brw_calculate_guardband_size(fb_width
, fb_height
,
2243 sfv
.ViewportMatrixElementm00
,
2244 sfv
.ViewportMatrixElementm11
,
2245 sfv
.ViewportMatrixElementm30
,
2246 sfv
.ViewportMatrixElementm31
,
2247 &gb_xmin
, &gb_xmax
, &gb_ymin
, &gb_ymax
);
2250 clv
.XMinClipGuardband
= gb_xmin
;
2251 clv
.XMaxClipGuardband
= gb_xmax
;
2252 clv
.YMinClipGuardband
= gb_ymin
;
2253 clv
.YMaxClipGuardband
= gb_ymax
;
2256 set_scissor_bits(ctx
, i
, render_to_fbo
, fb_width
, fb_height
,
2257 &sfv
.ScissorRectangle
);
2259 /* _NEW_VIEWPORT | _NEW_BUFFERS: Screen Space Viewport
2260 * The hardware will take the intersection of the drawing rectangle,
2261 * scissor rectangle, and the viewport extents. We don't need to be
2262 * smart, and can therefore just program the viewport extents.
2264 const float viewport_Xmax
=
2265 ctx
->ViewportArray
[i
].X
+ ctx
->ViewportArray
[i
].Width
;
2266 const float viewport_Ymax
=
2267 ctx
->ViewportArray
[i
].Y
+ ctx
->ViewportArray
[i
].Height
;
2269 if (render_to_fbo
) {
2270 sfv
.XMinViewPort
= ctx
->ViewportArray
[i
].X
;
2271 sfv
.XMaxViewPort
= viewport_Xmax
- 1;
2272 sfv
.YMinViewPort
= ctx
->ViewportArray
[i
].Y
;
2273 sfv
.YMaxViewPort
= viewport_Ymax
- 1;
2275 sfv
.XMinViewPort
= ctx
->ViewportArray
[i
].X
;
2276 sfv
.XMaxViewPort
= viewport_Xmax
- 1;
2277 sfv
.YMinViewPort
= fb_height
- viewport_Ymax
;
2278 sfv
.YMaxViewPort
= fb_height
- ctx
->ViewportArray
[i
].Y
- 1;
2283 GENX(SF_CLIP_VIEWPORT_pack
)(NULL
, sf_clip_map
, &sfv
);
2284 sf_clip_map
+= GENX(SF_CLIP_VIEWPORT_length
);
2286 GENX(SF_VIEWPORT_pack
)(NULL
, sf_map
, &sfv
);
2287 GENX(CLIP_VIEWPORT_pack
)(NULL
, clip_map
, &clv
);
2288 sf_map
+= GENX(SF_VIEWPORT_length
);
2289 clip_map
+= GENX(CLIP_VIEWPORT_length
);
2294 brw_batch_emit(brw
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
), ptr
) {
2295 ptr
.SFClipViewportPointer
= sf_clip_vp_offset
;
2298 brw_batch_emit(brw
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS
), vp
) {
2299 vp
.SFViewportStateChange
= 1;
2300 vp
.CLIPViewportStateChange
= 1;
2301 vp
.PointertoCLIP_VIEWPORT
= clip_vp_offset
;
2302 vp
.PointertoSF_VIEWPORT
= sf_vp_offset
;
2305 brw
->sf
.vp_offset
= sf_vp_offset
;
2306 brw
->clip
.vp_offset
= clip_vp_offset
;
2307 brw
->ctx
.NewDriverState
|= BRW_NEW_SF_VP
| BRW_NEW_CLIP_VP
;
2311 static const struct brw_tracked_state
genX(sf_clip_viewport
) = {
2313 .mesa
= _NEW_BUFFERS
|
2315 (GEN_GEN
<= 5 ? _NEW_SCISSOR
: 0),
2316 .brw
= BRW_NEW_BATCH
|
2318 BRW_NEW_VIEWPORT_COUNT
,
2320 .emit
= genX(upload_sf_clip_viewport
),
2323 /* ---------------------------------------------------------------------- */
2327 genX(upload_gs_state
)(struct brw_context
*brw
)
2329 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
2330 const struct brw_stage_state
*stage_state
= &brw
->gs
.base
;
2331 /* BRW_NEW_GEOMETRY_PROGRAM */
2332 bool active
= brw
->geometry_program
;
2334 /* BRW_NEW_GS_PROG_DATA */
2335 struct brw_stage_prog_data
*stage_prog_data
= stage_state
->prog_data
;
2336 const struct brw_vue_prog_data
*vue_prog_data
=
2337 brw_vue_prog_data(stage_prog_data
);
2339 const struct brw_gs_prog_data
*gs_prog_data
=
2340 brw_gs_prog_data(stage_prog_data
);
2344 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_GS
), cgs
) {
2345 if (active
&& stage_state
->push_const_size
!= 0) {
2346 cgs
.Buffer0Valid
= true;
2347 cgs
.PointertoGSConstantBuffer0
= stage_state
->push_const_offset
;
2348 cgs
.GSConstantBuffer0ReadLength
= stage_state
->push_const_size
- 1;
2353 #if GEN_GEN == 7 && !GEN_IS_HASWELL
2355 * From Graphics BSpec: 3D-Media-GPGPU Engine > 3D Pipeline Stages >
2356 * Geometry > Geometry Shader > State:
2358 * "Note: Because of corruption in IVB:GT2, software needs to flush the
2359 * whole fixed function pipeline when the GS enable changes value in
2362 * The hardware architects have clarified that in this context "flush the
2363 * whole fixed function pipeline" means to emit a PIPE_CONTROL with the "CS
2366 if (brw
->gt
== 2 && brw
->gs
.enabled
!= active
)
2367 gen7_emit_cs_stall_flush(brw
);
2371 brw_batch_emit(brw
, GENX(3DSTATE_GS
), gs
) {
2372 INIT_THREAD_DISPATCH_FIELDS(gs
, Vertex
);
2375 gs
.OutputVertexSize
= gs_prog_data
->output_vertex_size_hwords
* 2 - 1;
2376 gs
.OutputTopology
= gs_prog_data
->output_topology
;
2377 gs
.ControlDataHeaderSize
=
2378 gs_prog_data
->control_data_header_size_hwords
;
2380 gs
.InstanceControl
= gs_prog_data
->invocations
- 1;
2381 gs
.DispatchMode
= vue_prog_data
->dispatch_mode
;
2383 gs
.IncludePrimitiveID
= gs_prog_data
->include_primitive_id
;
2385 gs
.ControlDataFormat
= gs_prog_data
->control_data_format
;
2388 /* Note: the meaning of the GEN7_GS_REORDER_TRAILING bit changes between
2389 * Ivy Bridge and Haswell.
2391 * On Ivy Bridge, setting this bit causes the vertices of a triangle
2392 * strip to be delivered to the geometry shader in an order that does
2393 * not strictly follow the OpenGL spec, but preserves triangle
2394 * orientation. For example, if the vertices are (1, 2, 3, 4, 5), then
2395 * the geometry shader sees triangles:
2397 * (1, 2, 3), (2, 4, 3), (3, 4, 5)
2399 * (Clearing the bit is even worse, because it fails to preserve
2402 * Triangle strips with adjacency always ordered in a way that preserves
2403 * triangle orientation but does not strictly follow the OpenGL spec,
2404 * regardless of the setting of this bit.
2406 * On Haswell, both triangle strips and triangle strips with adjacency
2407 * are always ordered in a way that preserves triangle orientation.
2408 * Setting this bit causes the ordering to strictly follow the OpenGL
2411 * So in either case we want to set the bit. Unfortunately on Ivy
2412 * Bridge this will get the order close to correct but not perfect.
2414 gs
.ReorderMode
= TRAILING
;
2415 gs
.MaximumNumberofThreads
=
2416 GEN_GEN
== 8 ? (devinfo
->max_gs_threads
/ 2 - 1)
2417 : (devinfo
->max_gs_threads
- 1);
2420 gs
.SOStatisticsEnable
= true;
2421 gs
.RenderingEnabled
= 1;
2422 if (brw
->geometry_program
->info
.has_transform_feedback_varyings
)
2423 gs
.SVBIPayloadEnable
= true;
2425 /* GEN6_GS_SPF_MODE and GEN6_GS_VECTOR_MASK_ENABLE are enabled as it
2426 * was previously done for gen6.
2428 * TODO: test with both disabled to see if the HW is behaving
2429 * as expected, like in gen7.
2431 gs
.SingleProgramFlow
= true;
2432 gs
.VectorMaskEnable
= true;
2436 gs
.ExpectedVertexCount
= gs_prog_data
->vertices_in
;
2438 if (gs_prog_data
->static_vertex_count
!= -1) {
2439 gs
.StaticOutput
= true;
2440 gs
.StaticOutputVertexCount
= gs_prog_data
->static_vertex_count
;
2442 gs
.IncludeVertexHandles
= vue_prog_data
->include_vue_handles
;
2444 gs
.UserClipDistanceCullTestEnableBitmask
=
2445 vue_prog_data
->cull_distance_mask
;
2447 const int urb_entry_write_offset
= 1;
2448 const uint32_t urb_entry_output_length
=
2449 DIV_ROUND_UP(vue_prog_data
->vue_map
.num_slots
, 2) -
2450 urb_entry_write_offset
;
2452 gs
.VertexURBEntryOutputReadOffset
= urb_entry_write_offset
;
2453 gs
.VertexURBEntryOutputLength
= MAX2(urb_entry_output_length
, 1);
2457 } else if (brw
->ff_gs
.prog_active
) {
2458 /* In gen6, transform feedback for the VS stage is done with an ad-hoc GS
2459 * program. This function provides the needed 3DSTATE_GS for this.
2461 upload_gs_state_for_tf(brw
);
2464 brw_batch_emit(brw
, GENX(3DSTATE_GS
), gs
) {
2465 gs
.StatisticsEnable
= true;
2467 gs
.RenderingEnabled
= true;
2471 gs
.DispatchGRFStartRegisterForURBData
= 1;
2473 gs
.IncludeVertexHandles
= true;
2479 brw
->gs
.enabled
= active
;
2483 static const struct brw_tracked_state
genX(gs_state
) = {
2485 .mesa
= (GEN_GEN
< 7 ? _NEW_PROGRAM_CONSTANTS
: 0),
2486 .brw
= BRW_NEW_BATCH
|
2489 BRW_NEW_GEOMETRY_PROGRAM
|
2490 BRW_NEW_GS_PROG_DATA
|
2491 (GEN_GEN
< 7 ? BRW_NEW_FF_GS_PROG_DATA
: 0),
2493 .emit
= genX(upload_gs_state
),
2497 /* ---------------------------------------------------------------------- */
2499 UNUSED
static GLenum
2500 fix_dual_blend_alpha_to_one(GLenum function
)
2506 case GL_ONE_MINUS_SRC1_ALPHA
:
2513 #define blend_factor(x) brw_translate_blend_factor(x)
2514 #define blend_eqn(x) brw_translate_blend_equation(x)
2518 genX(upload_blend_state
)(struct brw_context
*brw
)
2520 struct gl_context
*ctx
= &brw
->ctx
;
2523 /* We need at least one BLEND_STATE written, because we might do
2524 * thread dispatch even if _NumColorDrawBuffers is 0 (for example
2525 * for computed depth or alpha test), which will do an FB write
2526 * with render target 0, which will reference BLEND_STATE[0] for
2527 * alpha test enable.
2529 int nr_draw_buffers
= ctx
->DrawBuffer
->_NumColorDrawBuffers
;
2530 if (nr_draw_buffers
== 0 && ctx
->Color
.AlphaEnabled
)
2531 nr_draw_buffers
= 1;
2533 size
= GENX(BLEND_STATE_ENTRY_length
) * 4 * nr_draw_buffers
;
2535 size
+= GENX(BLEND_STATE_length
) * 4;
2538 uint32_t *blend_map
;
2539 blend_map
= brw_state_batch(brw
, size
, 64, &brw
->cc
.blend_state_offset
);
2542 struct GENX(BLEND_STATE
) blend
= { 0 };
2545 for (int i
= 0; i
< nr_draw_buffers
; i
++) {
2546 struct GENX(BLEND_STATE_ENTRY
) entry
= { 0 };
2549 /* OpenGL specification 3.3 (page 196), section 4.1.3 says:
2550 * "If drawbuffer zero is not NONE and the buffer it references has an
2551 * integer format, the SAMPLE_ALPHA_TO_COVERAGE and SAMPLE_ALPHA_TO_ONE
2552 * operations are skipped."
2554 if (!(ctx
->DrawBuffer
->_IntegerBuffers
& 0x1)) {
2555 /* _NEW_MULTISAMPLE */
2556 if (_mesa_is_multisample_enabled(ctx
)) {
2557 if (ctx
->Multisample
.SampleAlphaToCoverage
) {
2558 blend
.AlphaToCoverageEnable
= true;
2559 blend
.AlphaToCoverageDitherEnable
= GEN_GEN
>= 7;
2561 if (ctx
->Multisample
.SampleAlphaToOne
)
2562 blend
.AlphaToOneEnable
= true;
2566 if (ctx
->Color
.AlphaEnabled
) {
2567 blend
.AlphaTestEnable
= true;
2568 blend
.AlphaTestFunction
=
2569 intel_translate_compare_func(ctx
->Color
.AlphaFunc
);
2572 if (ctx
->Color
.DitherFlag
) {
2573 blend
.ColorDitherEnable
= true;
2578 for (int i
= 0; i
< nr_draw_buffers
; i
++) {
2579 struct GENX(BLEND_STATE_ENTRY
) entry
= { 0 };
2585 struct gl_renderbuffer
*rb
= ctx
->DrawBuffer
->_ColorDrawBuffers
[i
];
2587 /* Used for implementing the following bit of GL_EXT_texture_integer:
2588 * "Per-fragment operations that require floating-point color
2589 * components, including multisample alpha operations, alpha test,
2590 * blending, and dithering, have no effect when the corresponding
2591 * colors are written to an integer color buffer."
2593 bool integer
= ctx
->DrawBuffer
->_IntegerBuffers
& (0x1 << i
);
2596 if (ctx
->Color
.ColorLogicOpEnabled
) {
2597 GLenum rb_type
= rb
? _mesa_get_format_datatype(rb
->Format
)
2598 : GL_UNSIGNED_NORMALIZED
;
2599 WARN_ONCE(ctx
->Color
.LogicOp
!= GL_COPY
&&
2600 rb_type
!= GL_UNSIGNED_NORMALIZED
&&
2601 rb_type
!= GL_FLOAT
, "Ignoring %s logic op on %s "
2603 _mesa_enum_to_string(ctx
->Color
.LogicOp
),
2604 _mesa_enum_to_string(rb_type
));
2605 if (GEN_GEN
>= 8 || rb_type
== GL_UNSIGNED_NORMALIZED
) {
2606 entry
.LogicOpEnable
= true;
2607 entry
.LogicOpFunction
=
2608 intel_translate_logic_op(ctx
->Color
.LogicOp
);
2610 } else if (ctx
->Color
.BlendEnabled
& (1 << i
) && !integer
&&
2611 !ctx
->Color
._AdvancedBlendMode
) {
2612 GLenum eqRGB
= ctx
->Color
.Blend
[i
].EquationRGB
;
2613 GLenum eqA
= ctx
->Color
.Blend
[i
].EquationA
;
2614 GLenum srcRGB
= ctx
->Color
.Blend
[i
].SrcRGB
;
2615 GLenum dstRGB
= ctx
->Color
.Blend
[i
].DstRGB
;
2616 GLenum srcA
= ctx
->Color
.Blend
[i
].SrcA
;
2617 GLenum dstA
= ctx
->Color
.Blend
[i
].DstA
;
2619 if (eqRGB
== GL_MIN
|| eqRGB
== GL_MAX
)
2620 srcRGB
= dstRGB
= GL_ONE
;
2622 if (eqA
== GL_MIN
|| eqA
== GL_MAX
)
2623 srcA
= dstA
= GL_ONE
;
2625 /* Due to hardware limitations, the destination may have information
2626 * in an alpha channel even when the format specifies no alpha
2627 * channel. In order to avoid getting any incorrect blending due to
2628 * that alpha channel, coerce the blend factors to values that will
2629 * not read the alpha channel, but will instead use the correct
2630 * implicit value for alpha.
2632 if (rb
&& !_mesa_base_format_has_channel(rb
->_BaseFormat
,
2633 GL_TEXTURE_ALPHA_TYPE
)) {
2634 srcRGB
= brw_fix_xRGB_alpha(srcRGB
);
2635 srcA
= brw_fix_xRGB_alpha(srcA
);
2636 dstRGB
= brw_fix_xRGB_alpha(dstRGB
);
2637 dstA
= brw_fix_xRGB_alpha(dstA
);
2640 /* From the BLEND_STATE docs, DWord 0, Bit 29 (AlphaToOne Enable):
2641 * "If Dual Source Blending is enabled, this bit must be disabled."
2643 * We override SRC1_ALPHA to ONE and ONE_MINUS_SRC1_ALPHA to ZERO,
2644 * and leave it enabled anyway.
2646 if (ctx
->Color
.Blend
[i
]._UsesDualSrc
&& blend
.AlphaToOneEnable
) {
2647 srcRGB
= fix_dual_blend_alpha_to_one(srcRGB
);
2648 srcA
= fix_dual_blend_alpha_to_one(srcA
);
2649 dstRGB
= fix_dual_blend_alpha_to_one(dstRGB
);
2650 dstA
= fix_dual_blend_alpha_to_one(dstA
);
2653 entry
.ColorBufferBlendEnable
= true;
2654 entry
.DestinationBlendFactor
= blend_factor(dstRGB
);
2655 entry
.SourceBlendFactor
= blend_factor(srcRGB
);
2656 entry
.DestinationAlphaBlendFactor
= blend_factor(dstA
);
2657 entry
.SourceAlphaBlendFactor
= blend_factor(srcA
);
2658 entry
.ColorBlendFunction
= blend_eqn(eqRGB
);
2659 entry
.AlphaBlendFunction
= blend_eqn(eqA
);
2661 if (srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
)
2662 blend
.IndependentAlphaBlendEnable
= true;
2665 /* See section 8.1.6 "Pre-Blend Color Clamping" of the
2666 * SandyBridge PRM Volume 2 Part 1 for HW requirements.
2668 * We do our ARB_color_buffer_float CLAMP_FRAGMENT_COLOR
2669 * clamping in the fragment shader. For its clamping of
2670 * blending, the spec says:
2672 * "RESOLVED: For fixed-point color buffers, the inputs and
2673 * the result of the blending equation are clamped. For
2674 * floating-point color buffers, no clamping occurs."
2676 * So, generally, we want clamping to the render target's range.
2677 * And, good news, the hardware tables for both pre- and
2678 * post-blend color clamping are either ignored, or any are
2679 * allowed, or clamping is required but RT range clamping is a
2682 entry
.PreBlendColorClampEnable
= true;
2683 entry
.PostBlendColorClampEnable
= true;
2684 entry
.ColorClampRange
= COLORCLAMP_RTFORMAT
;
2686 entry
.WriteDisableRed
= !ctx
->Color
.ColorMask
[i
][0];
2687 entry
.WriteDisableGreen
= !ctx
->Color
.ColorMask
[i
][1];
2688 entry
.WriteDisableBlue
= !ctx
->Color
.ColorMask
[i
][2];
2689 entry
.WriteDisableAlpha
= !ctx
->Color
.ColorMask
[i
][3];
2692 GENX(BLEND_STATE_ENTRY_pack
)(NULL
, &blend_map
[1 + i
* 2], &entry
);
2694 GENX(BLEND_STATE_ENTRY_pack
)(NULL
, &blend_map
[i
* 2], &entry
);
2700 GENX(BLEND_STATE_pack
)(NULL
, blend_map
, &blend
);
2704 brw_batch_emit(brw
, GENX(3DSTATE_CC_STATE_POINTERS
), ptr
) {
2705 ptr
.PointertoBLEND_STATE
= brw
->cc
.blend_state_offset
;
2706 ptr
.BLEND_STATEChange
= true;
2709 brw_batch_emit(brw
, GENX(3DSTATE_BLEND_STATE_POINTERS
), ptr
) {
2710 ptr
.BlendStatePointer
= brw
->cc
.blend_state_offset
;
2712 ptr
.BlendStatePointerValid
= true;
2718 static const struct brw_tracked_state
genX(blend_state
) = {
2720 .mesa
= _NEW_BUFFERS
|
2723 .brw
= BRW_NEW_BATCH
|
2725 BRW_NEW_STATE_BASE_ADDRESS
,
2727 .emit
= genX(upload_blend_state
),
2731 /* ---------------------------------------------------------------------- */
2734 UNUSED
static const uint32_t push_constant_opcodes
[] = {
2735 [MESA_SHADER_VERTEX
] = 21,
2736 [MESA_SHADER_TESS_CTRL
] = 25, /* HS */
2737 [MESA_SHADER_TESS_EVAL
] = 26, /* DS */
2738 [MESA_SHADER_GEOMETRY
] = 22,
2739 [MESA_SHADER_FRAGMENT
] = 23,
2740 [MESA_SHADER_COMPUTE
] = 0,
2744 upload_constant_state(struct brw_context
*brw
,
2745 struct brw_stage_state
*stage_state
,
2746 bool active
, uint32_t stage
)
2748 UNUSED
uint32_t mocs
= GEN_GEN
< 8 ? GEN7_MOCS_L3
: 0;
2749 active
= active
&& stage_state
->push_const_size
!= 0;
2751 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_VS
), pkt
) {
2752 pkt
._3DCommandSubOpcode
= push_constant_opcodes
[stage
];
2754 #if GEN_GEN >= 8 || GEN_IS_HASWELL
2755 pkt
.ConstantBody
.ReadLength
[2] = stage_state
->push_const_size
;
2756 pkt
.ConstantBody
.Buffer
[2] =
2757 render_ro_bo(brw
->curbe
.curbe_bo
, stage_state
->push_const_offset
);
2759 pkt
.ConstantBody
.ReadLength
[0] = stage_state
->push_const_size
;
2760 pkt
.ConstantBody
.Buffer
[0].offset
=
2761 stage_state
->push_const_offset
| mocs
;
2766 brw
->ctx
.NewDriverState
|= GEN_GEN
>= 9 ? BRW_NEW_SURFACES
: 0;
2772 genX(upload_vs_push_constants
)(struct brw_context
*brw
)
2774 struct brw_stage_state
*stage_state
= &brw
->vs
.base
;
2776 /* _BRW_NEW_VERTEX_PROGRAM */
2777 const struct brw_program
*vp
= brw_program_const(brw
->vertex_program
);
2778 /* BRW_NEW_VS_PROG_DATA */
2779 const struct brw_stage_prog_data
*prog_data
= brw
->vs
.base
.prog_data
;
2781 _mesa_shader_write_subroutine_indices(&brw
->ctx
, MESA_SHADER_VERTEX
);
2782 gen6_upload_push_constants(brw
, &vp
->program
, prog_data
, stage_state
);
2785 if (GEN_GEN
== 7 && !GEN_IS_HASWELL
&& !brw
->is_baytrail
)
2786 gen7_emit_vs_workaround_flush(brw
);
2788 upload_constant_state(brw
, stage_state
, true /* active */,
2789 MESA_SHADER_VERTEX
);
2793 static const struct brw_tracked_state
genX(vs_push_constants
) = {
2795 .mesa
= _NEW_PROGRAM_CONSTANTS
|
2797 .brw
= BRW_NEW_BATCH
|
2799 BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
2800 BRW_NEW_VERTEX_PROGRAM
|
2801 BRW_NEW_VS_PROG_DATA
,
2803 .emit
= genX(upload_vs_push_constants
),
2807 genX(upload_gs_push_constants
)(struct brw_context
*brw
)
2809 struct brw_stage_state
*stage_state
= &brw
->gs
.base
;
2811 /* BRW_NEW_GEOMETRY_PROGRAM */
2812 const struct brw_program
*gp
= brw_program_const(brw
->geometry_program
);
2815 /* BRW_NEW_GS_PROG_DATA */
2816 struct brw_stage_prog_data
*prog_data
= brw
->gs
.base
.prog_data
;
2818 _mesa_shader_write_subroutine_indices(&brw
->ctx
, MESA_SHADER_GEOMETRY
);
2819 gen6_upload_push_constants(brw
, &gp
->program
, prog_data
, stage_state
);
2823 upload_constant_state(brw
, stage_state
, gp
, MESA_SHADER_GEOMETRY
);
2827 static const struct brw_tracked_state
genX(gs_push_constants
) = {
2829 .mesa
= _NEW_PROGRAM_CONSTANTS
|
2831 .brw
= BRW_NEW_BATCH
|
2833 BRW_NEW_GEOMETRY_PROGRAM
|
2834 BRW_NEW_GS_PROG_DATA
|
2835 BRW_NEW_PUSH_CONSTANT_ALLOCATION
,
2837 .emit
= genX(upload_gs_push_constants
),
2841 genX(upload_wm_push_constants
)(struct brw_context
*brw
)
2843 struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
2844 /* BRW_NEW_FRAGMENT_PROGRAM */
2845 const struct brw_program
*fp
= brw_program_const(brw
->fragment_program
);
2846 /* BRW_NEW_FS_PROG_DATA */
2847 const struct brw_stage_prog_data
*prog_data
= brw
->wm
.base
.prog_data
;
2849 _mesa_shader_write_subroutine_indices(&brw
->ctx
, MESA_SHADER_FRAGMENT
);
2851 gen6_upload_push_constants(brw
, &fp
->program
, prog_data
, stage_state
);
2854 upload_constant_state(brw
, stage_state
, true, MESA_SHADER_FRAGMENT
);
2858 static const struct brw_tracked_state
genX(wm_push_constants
) = {
2860 .mesa
= _NEW_PROGRAM_CONSTANTS
,
2861 .brw
= BRW_NEW_BATCH
|
2863 BRW_NEW_FRAGMENT_PROGRAM
|
2864 BRW_NEW_FS_PROG_DATA
|
2865 BRW_NEW_PUSH_CONSTANT_ALLOCATION
,
2867 .emit
= genX(upload_wm_push_constants
),
2871 /* ---------------------------------------------------------------------- */
2875 genX(determine_sample_mask
)(struct brw_context
*brw
)
2877 struct gl_context
*ctx
= &brw
->ctx
;
2878 float coverage
= 1.0f
;
2879 float coverage_invert
= false;
2880 unsigned sample_mask
= ~0u;
2882 /* BRW_NEW_NUM_SAMPLES */
2883 unsigned num_samples
= brw
->num_samples
;
2885 if (_mesa_is_multisample_enabled(ctx
)) {
2886 if (ctx
->Multisample
.SampleCoverage
) {
2887 coverage
= ctx
->Multisample
.SampleCoverageValue
;
2888 coverage_invert
= ctx
->Multisample
.SampleCoverageInvert
;
2890 if (ctx
->Multisample
.SampleMask
) {
2891 sample_mask
= ctx
->Multisample
.SampleMaskValue
;
2895 if (num_samples
> 1) {
2896 int coverage_int
= (int) (num_samples
* coverage
+ 0.5f
);
2897 uint32_t coverage_bits
= (1 << coverage_int
) - 1;
2898 if (coverage_invert
)
2899 coverage_bits
^= (1 << num_samples
) - 1;
2900 return coverage_bits
& sample_mask
;
2907 genX(emit_3dstate_multisample2
)(struct brw_context
*brw
,
2908 unsigned num_samples
)
2910 assert(brw
->num_samples
<= 16);
2912 unsigned log2_samples
= ffs(MAX2(num_samples
, 1)) - 1;
2914 brw_batch_emit(brw
, GENX(3DSTATE_MULTISAMPLE
), multi
) {
2915 multi
.PixelLocation
= CENTER
;
2916 multi
.NumberofMultisamples
= log2_samples
;
2918 GEN_SAMPLE_POS_4X(multi
.Sample
);
2920 switch (num_samples
) {
2922 GEN_SAMPLE_POS_1X(multi
.Sample
);
2925 GEN_SAMPLE_POS_2X(multi
.Sample
);
2928 GEN_SAMPLE_POS_4X(multi
.Sample
);
2931 GEN_SAMPLE_POS_8X(multi
.Sample
);
2941 genX(upload_multisample_state
)(struct brw_context
*brw
)
2943 genX(emit_3dstate_multisample2
)(brw
, brw
->num_samples
);
2945 brw_batch_emit(brw
, GENX(3DSTATE_SAMPLE_MASK
), sm
) {
2946 sm
.SampleMask
= genX(determine_sample_mask
)(brw
);
2950 static const struct brw_tracked_state
genX(multisample_state
) = {
2952 .mesa
= _NEW_MULTISAMPLE
,
2953 .brw
= BRW_NEW_BLORP
|
2955 BRW_NEW_NUM_SAMPLES
,
2957 .emit
= genX(upload_multisample_state
)
2961 /* ---------------------------------------------------------------------- */
2965 genX(upload_color_calc_state
)(struct brw_context
*brw
)
2967 struct gl_context
*ctx
= &brw
->ctx
;
2969 brw_state_emit(brw
, GENX(COLOR_CALC_STATE
), 64, &brw
->cc
.state_offset
, cc
) {
2971 cc
.AlphaTestFormat
= ALPHATEST_UNORM8
;
2972 UNCLAMPED_FLOAT_TO_UBYTE(cc
.AlphaReferenceValueAsUNORM8
,
2973 ctx
->Color
.AlphaRef
);
2977 cc
.StencilReferenceValue
= _mesa_get_stencil_ref(ctx
, 0);
2978 cc
.BackfaceStencilReferenceValue
=
2979 _mesa_get_stencil_ref(ctx
, ctx
->Stencil
._BackFace
);
2983 cc
.BlendConstantColorRed
= ctx
->Color
.BlendColorUnclamped
[0];
2984 cc
.BlendConstantColorGreen
= ctx
->Color
.BlendColorUnclamped
[1];
2985 cc
.BlendConstantColorBlue
= ctx
->Color
.BlendColorUnclamped
[2];
2986 cc
.BlendConstantColorAlpha
= ctx
->Color
.BlendColorUnclamped
[3];
2989 brw_batch_emit(brw
, GENX(3DSTATE_CC_STATE_POINTERS
), ptr
) {
2990 ptr
.ColorCalcStatePointer
= brw
->cc
.state_offset
;
2992 ptr
.ColorCalcStatePointerValid
= true;
2997 static const struct brw_tracked_state
genX(color_calc_state
) = {
2999 .mesa
= _NEW_COLOR
|
3001 .brw
= BRW_NEW_BATCH
|
3004 BRW_NEW_STATE_BASE_ADDRESS
,
3006 .emit
= genX(upload_color_calc_state
),
3011 /* ---------------------------------------------------------------------- */
3015 genX(upload_sbe
)(struct brw_context
*brw
)
3017 struct gl_context
*ctx
= &brw
->ctx
;
3018 /* BRW_NEW_FS_PROG_DATA */
3019 const struct brw_wm_prog_data
*wm_prog_data
=
3020 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
3022 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) attr_overrides
[16] = { { 0 } };
3024 #define attr_overrides sbe.Attribute
3026 uint32_t urb_entry_read_length
;
3027 uint32_t urb_entry_read_offset
;
3028 uint32_t point_sprite_enables
;
3030 brw_batch_emit(brw
, GENX(3DSTATE_SBE
), sbe
) {
3031 sbe
.AttributeSwizzleEnable
= true;
3032 sbe
.NumberofSFOutputAttributes
= wm_prog_data
->num_varying_inputs
;
3035 bool render_to_fbo
= _mesa_is_user_fbo(ctx
->DrawBuffer
);
3039 * Window coordinates in an FBO are inverted, which means point
3040 * sprite origin must be inverted.
3042 if ((ctx
->Point
.SpriteOrigin
== GL_LOWER_LEFT
) != render_to_fbo
)
3043 sbe
.PointSpriteTextureCoordinateOrigin
= LOWERLEFT
;
3045 sbe
.PointSpriteTextureCoordinateOrigin
= UPPERLEFT
;
3047 /* _NEW_POINT | _NEW_LIGHT | _NEW_PROGRAM,
3048 * BRW_NEW_FS_PROG_DATA | BRW_NEW_FRAGMENT_PROGRAM |
3049 * BRW_NEW_GS_PROG_DATA | BRW_NEW_PRIMITIVE | BRW_NEW_TES_PROG_DATA |
3050 * BRW_NEW_VUE_MAP_GEOM_OUT
3052 genX(calculate_attr_overrides
)(brw
,
3054 &point_sprite_enables
,
3055 &urb_entry_read_length
,
3056 &urb_entry_read_offset
);
3058 /* Typically, the URB entry read length and offset should be programmed
3059 * in 3DSTATE_VS and 3DSTATE_GS; SBE inherits it from the last active
3060 * stage which produces geometry. However, we don't know the proper
3061 * value until we call calculate_attr_overrides().
3063 * To fit with our existing code, we override the inherited values and
3064 * specify it here directly, as we did on previous generations.
3066 sbe
.VertexURBEntryReadLength
= urb_entry_read_length
;
3067 sbe
.VertexURBEntryReadOffset
= urb_entry_read_offset
;
3068 sbe
.PointSpriteTextureCoordinateEnable
= point_sprite_enables
;
3069 sbe
.ConstantInterpolationEnable
= wm_prog_data
->flat_inputs
;
3072 sbe
.ForceVertexURBEntryReadLength
= true;
3073 sbe
.ForceVertexURBEntryReadOffset
= true;
3077 /* prepare the active component dwords */
3078 int input_index
= 0;
3079 for (int attr
= 0; attr
< VARYING_SLOT_MAX
; attr
++) {
3080 if (!(brw
->fragment_program
->info
.inputs_read
&
3081 BITFIELD64_BIT(attr
))) {
3085 assert(input_index
< 32);
3087 sbe
.AttributeActiveComponentFormat
[input_index
] = ACTIVE_COMPONENT_XYZW
;
3094 brw_batch_emit(brw
, GENX(3DSTATE_SBE_SWIZ
), sbes
) {
3095 for (int i
= 0; i
< 16; i
++)
3096 sbes
.Attribute
[i
] = attr_overrides
[i
];
3100 #undef attr_overrides
3103 static const struct brw_tracked_state
genX(sbe_state
) = {
3105 .mesa
= _NEW_BUFFERS
|
3110 .brw
= BRW_NEW_BLORP
|
3112 BRW_NEW_FRAGMENT_PROGRAM
|
3113 BRW_NEW_FS_PROG_DATA
|
3114 BRW_NEW_GS_PROG_DATA
|
3115 BRW_NEW_TES_PROG_DATA
|
3116 BRW_NEW_VUE_MAP_GEOM_OUT
|
3117 (GEN_GEN
== 7 ? BRW_NEW_PRIMITIVE
3120 .emit
= genX(upload_sbe
),
3124 /* ---------------------------------------------------------------------- */
3128 * Outputs the 3DSTATE_SO_DECL_LIST command.
3130 * The data output is a series of 64-bit entries containing a SO_DECL per
3131 * stream. We only have one stream of rendering coming out of the GS unit, so
3132 * we only emit stream 0 (low 16 bits) SO_DECLs.
3135 genX(upload_3dstate_so_decl_list
)(struct brw_context
*brw
,
3136 const struct brw_vue_map
*vue_map
)
3138 struct gl_context
*ctx
= &brw
->ctx
;
3139 /* BRW_NEW_TRANSFORM_FEEDBACK */
3140 struct gl_transform_feedback_object
*xfb_obj
=
3141 ctx
->TransformFeedback
.CurrentObject
;
3142 const struct gl_transform_feedback_info
*linked_xfb_info
=
3143 xfb_obj
->program
->sh
.LinkedTransformFeedback
;
3144 struct GENX(SO_DECL
) so_decl
[MAX_VERTEX_STREAMS
][128];
3145 int buffer_mask
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
3146 int next_offset
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
3147 int decls
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
3149 STATIC_ASSERT(ARRAY_SIZE(so_decl
[0]) >= MAX_PROGRAM_OUTPUTS
);
3151 memset(so_decl
, 0, sizeof(so_decl
));
3153 /* Construct the list of SO_DECLs to be emitted. The formatting of the
3154 * command feels strange -- each dword pair contains a SO_DECL per stream.
3156 for (unsigned i
= 0; i
< linked_xfb_info
->NumOutputs
; i
++) {
3157 const struct gl_transform_feedback_output
*output
=
3158 &linked_xfb_info
->Outputs
[i
];
3159 const int buffer
= output
->OutputBuffer
;
3160 const int varying
= output
->OutputRegister
;
3161 const unsigned stream_id
= output
->StreamId
;
3162 assert(stream_id
< MAX_VERTEX_STREAMS
);
3164 buffer_mask
[stream_id
] |= 1 << buffer
;
3166 assert(vue_map
->varying_to_slot
[varying
] >= 0);
3168 /* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
3169 * array. Instead, it simply increments DstOffset for the following
3170 * input by the number of components that should be skipped.
3172 * Our hardware is unusual in that it requires us to program SO_DECLs
3173 * for fake "hole" components, rather than simply taking the offset
3174 * for each real varying. Each hole can have size 1, 2, 3, or 4; we
3175 * program as many size = 4 holes as we can, then a final hole to
3176 * accommodate the final 1, 2, or 3 remaining.
3178 int skip_components
= output
->DstOffset
- next_offset
[buffer
];
3180 while (skip_components
> 0) {
3181 so_decl
[stream_id
][decls
[stream_id
]++] = (struct GENX(SO_DECL
)) {
3183 .OutputBufferSlot
= output
->OutputBuffer
,
3184 .ComponentMask
= (1 << MIN2(skip_components
, 4)) - 1,
3186 skip_components
-= 4;
3189 next_offset
[buffer
] = output
->DstOffset
+ output
->NumComponents
;
3191 so_decl
[stream_id
][decls
[stream_id
]++] = (struct GENX(SO_DECL
)) {
3192 .OutputBufferSlot
= output
->OutputBuffer
,
3193 .RegisterIndex
= vue_map
->varying_to_slot
[varying
],
3195 ((1 << output
->NumComponents
) - 1) << output
->ComponentOffset
,
3198 if (decls
[stream_id
] > max_decls
)
3199 max_decls
= decls
[stream_id
];
3203 dw
= brw_batch_emitn(brw
, GENX(3DSTATE_SO_DECL_LIST
), 3 + 2 * max_decls
,
3204 .StreamtoBufferSelects0
= buffer_mask
[0],
3205 .StreamtoBufferSelects1
= buffer_mask
[1],
3206 .StreamtoBufferSelects2
= buffer_mask
[2],
3207 .StreamtoBufferSelects3
= buffer_mask
[3],
3208 .NumEntries0
= decls
[0],
3209 .NumEntries1
= decls
[1],
3210 .NumEntries2
= decls
[2],
3211 .NumEntries3
= decls
[3]);
3213 for (int i
= 0; i
< max_decls
; i
++) {
3214 GENX(SO_DECL_ENTRY_pack
)(
3215 brw
, dw
+ 2 + i
* 2,
3216 &(struct GENX(SO_DECL_ENTRY
)) {
3217 .Stream0Decl
= so_decl
[0][i
],
3218 .Stream1Decl
= so_decl
[1][i
],
3219 .Stream2Decl
= so_decl
[2][i
],
3220 .Stream3Decl
= so_decl
[3][i
],
3226 genX(upload_3dstate_so_buffers
)(struct brw_context
*brw
)
3228 struct gl_context
*ctx
= &brw
->ctx
;
3229 /* BRW_NEW_TRANSFORM_FEEDBACK */
3230 struct gl_transform_feedback_object
*xfb_obj
=
3231 ctx
->TransformFeedback
.CurrentObject
;
3233 const struct gl_transform_feedback_info
*linked_xfb_info
=
3234 xfb_obj
->program
->sh
.LinkedTransformFeedback
;
3236 struct brw_transform_feedback_object
*brw_obj
=
3237 (struct brw_transform_feedback_object
*) xfb_obj
;
3238 uint32_t mocs_wb
= GEN_GEN
>= 9 ? SKL_MOCS_WB
: BDW_MOCS_WB
;
3241 /* Set up the up to 4 output buffers. These are the ranges defined in the
3242 * gl_transform_feedback_object.
3244 for (int i
= 0; i
< 4; i
++) {
3245 struct intel_buffer_object
*bufferobj
=
3246 intel_buffer_object(xfb_obj
->Buffers
[i
]);
3249 brw_batch_emit(brw
, GENX(3DSTATE_SO_BUFFER
), sob
) {
3250 sob
.SOBufferIndex
= i
;
3255 uint32_t start
= xfb_obj
->Offset
[i
];
3256 assert(start
% 4 == 0);
3257 uint32_t end
= ALIGN(start
+ xfb_obj
->Size
[i
], 4);
3259 intel_bufferobj_buffer(brw
, bufferobj
, start
, end
- start
);
3260 assert(end
<= bo
->size
);
3262 brw_batch_emit(brw
, GENX(3DSTATE_SO_BUFFER
), sob
) {
3263 sob
.SOBufferIndex
= i
;
3265 sob
.SurfaceBaseAddress
= render_bo(bo
, start
);
3267 sob
.SurfacePitch
= linked_xfb_info
->Buffers
[i
].Stride
* 4;
3268 sob
.SurfaceEndAddress
= render_bo(bo
, end
);
3270 sob
.SOBufferEnable
= true;
3271 sob
.StreamOffsetWriteEnable
= true;
3272 sob
.StreamOutputBufferOffsetAddressEnable
= true;
3273 sob
.SOBufferMOCS
= mocs_wb
;
3275 sob
.SurfaceSize
= MAX2(xfb_obj
->Size
[i
] / 4, 1) - 1;
3276 sob
.StreamOutputBufferOffsetAddress
=
3277 instruction_bo(brw_obj
->offset_bo
, i
* sizeof(uint32_t));
3279 if (brw_obj
->zero_offsets
) {
3280 /* Zero out the offset and write that to offset_bo */
3281 sob
.StreamOffset
= 0;
3283 /* Use offset_bo as the "Stream Offset." */
3284 sob
.StreamOffset
= 0xFFFFFFFF;
3291 brw_obj
->zero_offsets
= false;
3296 query_active(struct gl_query_object
*q
)
3298 return q
&& q
->Active
;
3302 genX(upload_3dstate_streamout
)(struct brw_context
*brw
, bool active
,
3303 const struct brw_vue_map
*vue_map
)
3305 struct gl_context
*ctx
= &brw
->ctx
;
3306 /* BRW_NEW_TRANSFORM_FEEDBACK */
3307 struct gl_transform_feedback_object
*xfb_obj
=
3308 ctx
->TransformFeedback
.CurrentObject
;
3310 brw_batch_emit(brw
, GENX(3DSTATE_STREAMOUT
), sos
) {
3312 int urb_entry_read_offset
= 0;
3313 int urb_entry_read_length
= (vue_map
->num_slots
+ 1) / 2 -
3314 urb_entry_read_offset
;
3316 sos
.SOFunctionEnable
= true;
3317 sos
.SOStatisticsEnable
= true;
3319 /* BRW_NEW_RASTERIZER_DISCARD */
3320 if (ctx
->RasterDiscard
) {
3321 if (!query_active(ctx
->Query
.PrimitivesGenerated
[0])) {
3322 sos
.RenderingDisable
= true;
3324 perf_debug("Rasterizer discard with a GL_PRIMITIVES_GENERATED "
3325 "query active relies on the clipper.");
3330 if (ctx
->Light
.ProvokingVertex
!= GL_FIRST_VERTEX_CONVENTION
)
3331 sos
.ReorderMode
= TRAILING
;
3334 sos
.SOBufferEnable0
= xfb_obj
->Buffers
[0] != NULL
;
3335 sos
.SOBufferEnable1
= xfb_obj
->Buffers
[1] != NULL
;
3336 sos
.SOBufferEnable2
= xfb_obj
->Buffers
[2] != NULL
;
3337 sos
.SOBufferEnable3
= xfb_obj
->Buffers
[3] != NULL
;
3339 const struct gl_transform_feedback_info
*linked_xfb_info
=
3340 xfb_obj
->program
->sh
.LinkedTransformFeedback
;
3341 /* Set buffer pitches; 0 means unbound. */
3342 if (xfb_obj
->Buffers
[0])
3343 sos
.Buffer0SurfacePitch
= linked_xfb_info
->Buffers
[0].Stride
* 4;
3344 if (xfb_obj
->Buffers
[1])
3345 sos
.Buffer1SurfacePitch
= linked_xfb_info
->Buffers
[1].Stride
* 4;
3346 if (xfb_obj
->Buffers
[2])
3347 sos
.Buffer2SurfacePitch
= linked_xfb_info
->Buffers
[2].Stride
* 4;
3348 if (xfb_obj
->Buffers
[3])
3349 sos
.Buffer3SurfacePitch
= linked_xfb_info
->Buffers
[3].Stride
* 4;
3352 /* We always read the whole vertex. This could be reduced at some
3353 * point by reading less and offsetting the register index in the
3356 sos
.Stream0VertexReadOffset
= urb_entry_read_offset
;
3357 sos
.Stream0VertexReadLength
= urb_entry_read_length
- 1;
3358 sos
.Stream1VertexReadOffset
= urb_entry_read_offset
;
3359 sos
.Stream1VertexReadLength
= urb_entry_read_length
- 1;
3360 sos
.Stream2VertexReadOffset
= urb_entry_read_offset
;
3361 sos
.Stream2VertexReadLength
= urb_entry_read_length
- 1;
3362 sos
.Stream3VertexReadOffset
= urb_entry_read_offset
;
3363 sos
.Stream3VertexReadLength
= urb_entry_read_length
- 1;
3369 genX(upload_sol
)(struct brw_context
*brw
)
3371 struct gl_context
*ctx
= &brw
->ctx
;
3372 /* BRW_NEW_TRANSFORM_FEEDBACK */
3373 bool active
= _mesa_is_xfb_active_and_unpaused(ctx
);
3376 genX(upload_3dstate_so_buffers
)(brw
);
3378 /* BRW_NEW_VUE_MAP_GEOM_OUT */
3379 genX(upload_3dstate_so_decl_list
)(brw
, &brw
->vue_map_geom_out
);
3382 /* Finally, set up the SOL stage. This command must always follow updates to
3383 * the nonpipelined SOL state (3DSTATE_SO_BUFFER, 3DSTATE_SO_DECL_LIST) or
3384 * MMIO register updates (current performed by the kernel at each batch
3387 genX(upload_3dstate_streamout
)(brw
, active
, &brw
->vue_map_geom_out
);
3390 static const struct brw_tracked_state
genX(sol_state
) = {
3393 .brw
= BRW_NEW_BATCH
|
3395 BRW_NEW_RASTERIZER_DISCARD
|
3396 BRW_NEW_VUE_MAP_GEOM_OUT
|
3397 BRW_NEW_TRANSFORM_FEEDBACK
,
3399 .emit
= genX(upload_sol
),
3403 /* ---------------------------------------------------------------------- */
3407 genX(upload_ps
)(struct brw_context
*brw
)
3409 UNUSED
const struct gl_context
*ctx
= &brw
->ctx
;
3410 UNUSED
const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
3412 /* BRW_NEW_FS_PROG_DATA */
3413 const struct brw_wm_prog_data
*prog_data
=
3414 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
3415 const struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
3420 brw_batch_emit(brw
, GENX(3DSTATE_PS
), ps
) {
3421 /* Initialize the execution mask with VMask. Otherwise, derivatives are
3422 * incorrect for subspans where some of the pixels are unlit. We believe
3423 * the bit just didn't take effect in previous generations.
3425 ps
.VectorMaskEnable
= GEN_GEN
>= 8;
3428 DIV_ROUND_UP(CLAMP(stage_state
->sampler_count
, 0, 16), 4);
3430 /* BRW_NEW_FS_PROG_DATA */
3431 ps
.BindingTableEntryCount
= prog_data
->base
.binding_table
.size_bytes
/ 4;
3433 if (prog_data
->base
.use_alt_mode
)
3434 ps
.FloatingPointMode
= Alternate
;
3436 /* Haswell requires the sample mask to be set in this packet as well as
3437 * in 3DSTATE_SAMPLE_MASK; the values should match.
3440 /* _NEW_BUFFERS, _NEW_MULTISAMPLE */
3442 ps
.SampleMask
= genX(determine_sample_mask(brw
));
3445 /* 3DSTATE_PS expects the number of threads per PSD, which is always 64;
3446 * it implicitly scales for different GT levels (which have some # of
3449 * In Gen8 the format is U8-2 whereas in Gen9 it is U8-1.
3452 ps
.MaximumNumberofThreadsPerPSD
= 64 - 1;
3454 ps
.MaximumNumberofThreadsPerPSD
= 64 - 2;
3456 ps
.MaximumNumberofThreads
= devinfo
->max_wm_threads
- 1;
3459 if (prog_data
->base
.nr_params
> 0)
3460 ps
.PushConstantEnable
= true;
3463 /* From the IVB PRM, volume 2 part 1, page 287:
3464 * "This bit is inserted in the PS payload header and made available to
3465 * the DataPort (either via the message header or via header bypass) to
3466 * indicate that oMask data (one or two phases) is included in Render
3467 * Target Write messages. If present, the oMask data is used to mask off
3470 ps
.oMaskPresenttoRenderTarget
= prog_data
->uses_omask
;
3472 /* The hardware wedges if you have this bit set but don't turn on any
3473 * dual source blend factors.
3475 * BRW_NEW_FS_PROG_DATA | _NEW_COLOR
3477 ps
.DualSourceBlendEnable
= prog_data
->dual_src_blend
&&
3478 (ctx
->Color
.BlendEnabled
& 1) &&
3479 ctx
->Color
.Blend
[0]._UsesDualSrc
;
3481 /* BRW_NEW_FS_PROG_DATA */
3482 ps
.AttributeEnable
= (prog_data
->num_varying_inputs
!= 0);
3485 /* From the documentation for this packet:
3486 * "If the PS kernel does not need the Position XY Offsets to
3487 * compute a Position Value, then this field should be programmed
3488 * to POSOFFSET_NONE."
3490 * "SW Recommendation: If the PS kernel needs the Position Offsets
3491 * to compute a Position XY value, this field should match Position
3492 * ZW Interpolation Mode to ensure a consistent position.xyzw
3495 * We only require XY sample offsets. So, this recommendation doesn't
3496 * look useful at the moment. We might need this in future.
3498 if (prog_data
->uses_pos_offset
)
3499 ps
.PositionXYOffsetSelect
= POSOFFSET_SAMPLE
;
3501 ps
.PositionXYOffsetSelect
= POSOFFSET_NONE
;
3503 ps
.RenderTargetFastClearEnable
= brw
->wm
.fast_clear_op
;
3504 ps
._8PixelDispatchEnable
= prog_data
->dispatch_8
;
3505 ps
._16PixelDispatchEnable
= prog_data
->dispatch_16
;
3506 ps
.DispatchGRFStartRegisterForConstantSetupData0
=
3507 prog_data
->base
.dispatch_grf_start_reg
;
3508 ps
.DispatchGRFStartRegisterForConstantSetupData2
=
3509 prog_data
->dispatch_grf_start_reg_2
;
3511 ps
.KernelStartPointer0
= stage_state
->prog_offset
;
3512 ps
.KernelStartPointer2
= stage_state
->prog_offset
+
3513 prog_data
->prog_offset_2
;
3515 if (prog_data
->base
.total_scratch
) {
3516 ps
.ScratchSpaceBasePointer
=
3517 render_bo(stage_state
->scratch_bo
,
3518 ffs(stage_state
->per_thread_scratch
) - 11);
3523 static const struct brw_tracked_state
genX(ps_state
) = {
3525 .mesa
= _NEW_MULTISAMPLE
|
3526 (GEN_GEN
< 8 ? _NEW_BUFFERS
|
3529 .brw
= BRW_NEW_BATCH
|
3531 BRW_NEW_FS_PROG_DATA
,
3533 .emit
= genX(upload_ps
),
3537 /* ---------------------------------------------------------------------- */
3541 genX(upload_hs_state
)(struct brw_context
*brw
)
3543 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
3544 struct brw_stage_state
*stage_state
= &brw
->tcs
.base
;
3545 struct brw_stage_prog_data
*stage_prog_data
= stage_state
->prog_data
;
3546 const struct brw_vue_prog_data
*vue_prog_data
=
3547 brw_vue_prog_data(stage_prog_data
);
3549 /* BRW_NEW_TES_PROG_DATA */
3550 struct brw_tcs_prog_data
*tcs_prog_data
=
3551 brw_tcs_prog_data(stage_prog_data
);
3553 if (!tcs_prog_data
) {
3554 brw_batch_emit(brw
, GENX(3DSTATE_HS
), hs
);
3556 brw_batch_emit(brw
, GENX(3DSTATE_HS
), hs
) {
3557 INIT_THREAD_DISPATCH_FIELDS(hs
, Vertex
);
3559 hs
.InstanceCount
= tcs_prog_data
->instances
- 1;
3560 hs
.IncludeVertexHandles
= true;
3562 hs
.MaximumNumberofThreads
= devinfo
->max_tcs_threads
- 1;
3567 static const struct brw_tracked_state
genX(hs_state
) = {
3570 .brw
= BRW_NEW_BATCH
|
3572 BRW_NEW_TCS_PROG_DATA
|
3573 BRW_NEW_TESS_PROGRAMS
,
3575 .emit
= genX(upload_hs_state
),
3579 genX(upload_ds_state
)(struct brw_context
*brw
)
3581 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
3582 const struct brw_stage_state
*stage_state
= &brw
->tes
.base
;
3583 struct brw_stage_prog_data
*stage_prog_data
= stage_state
->prog_data
;
3585 /* BRW_NEW_TES_PROG_DATA */
3586 const struct brw_tes_prog_data
*tes_prog_data
=
3587 brw_tes_prog_data(stage_prog_data
);
3588 const struct brw_vue_prog_data
*vue_prog_data
=
3589 brw_vue_prog_data(stage_prog_data
);
3591 if (!tes_prog_data
) {
3592 brw_batch_emit(brw
, GENX(3DSTATE_DS
), ds
);
3594 brw_batch_emit(brw
, GENX(3DSTATE_DS
), ds
) {
3595 INIT_THREAD_DISPATCH_FIELDS(ds
, Patch
);
3597 ds
.MaximumNumberofThreads
= devinfo
->max_tes_threads
- 1;
3598 ds
.ComputeWCoordinateEnable
=
3599 tes_prog_data
->domain
== BRW_TESS_DOMAIN_TRI
;
3602 if (vue_prog_data
->dispatch_mode
== DISPATCH_MODE_SIMD8
)
3603 ds
.DispatchMode
= DISPATCH_MODE_SIMD8_SINGLE_PATCH
;
3604 ds
.UserClipDistanceCullTestEnableBitmask
=
3605 vue_prog_data
->cull_distance_mask
;
3611 static const struct brw_tracked_state
genX(ds_state
) = {
3614 .brw
= BRW_NEW_BATCH
|
3616 BRW_NEW_TESS_PROGRAMS
|
3617 BRW_NEW_TES_PROG_DATA
,
3619 .emit
= genX(upload_ds_state
),
3622 /* ---------------------------------------------------------------------- */
3625 upload_te_state(struct brw_context
*brw
)
3627 /* BRW_NEW_TESS_PROGRAMS */
3628 bool active
= brw
->tess_eval_program
;
3630 /* BRW_NEW_TES_PROG_DATA */
3631 const struct brw_tes_prog_data
*tes_prog_data
=
3632 brw_tes_prog_data(brw
->tes
.base
.prog_data
);
3635 brw_batch_emit(brw
, GENX(3DSTATE_TE
), te
) {
3636 te
.Partitioning
= tes_prog_data
->partitioning
;
3637 te
.OutputTopology
= tes_prog_data
->output_topology
;
3638 te
.TEDomain
= tes_prog_data
->domain
;
3640 te
.MaximumTessellationFactorOdd
= 63.0;
3641 te
.MaximumTessellationFactorNotOdd
= 64.0;
3644 brw_batch_emit(brw
, GENX(3DSTATE_TE
), te
);
3648 static const struct brw_tracked_state
genX(te_state
) = {
3651 .brw
= BRW_NEW_BLORP
|
3653 BRW_NEW_TES_PROG_DATA
|
3654 BRW_NEW_TESS_PROGRAMS
,
3656 .emit
= upload_te_state
,
3659 /* ---------------------------------------------------------------------- */
3662 genX(upload_tes_push_constants
)(struct brw_context
*brw
)
3664 struct brw_stage_state
*stage_state
= &brw
->tes
.base
;
3665 /* BRW_NEW_TESS_PROGRAMS */
3666 const struct brw_program
*tep
= brw_program_const(brw
->tess_eval_program
);
3669 /* BRW_NEW_TES_PROG_DATA */
3670 const struct brw_stage_prog_data
*prog_data
= brw
->tes
.base
.prog_data
;
3671 _mesa_shader_write_subroutine_indices(&brw
->ctx
, MESA_SHADER_TESS_EVAL
);
3672 gen6_upload_push_constants(brw
, &tep
->program
, prog_data
, stage_state
);
3675 upload_constant_state(brw
, stage_state
, tep
, MESA_SHADER_TESS_EVAL
);
3678 static const struct brw_tracked_state
genX(tes_push_constants
) = {
3680 .mesa
= _NEW_PROGRAM_CONSTANTS
,
3681 .brw
= BRW_NEW_BATCH
|
3683 BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
3684 BRW_NEW_TESS_PROGRAMS
|
3685 BRW_NEW_TES_PROG_DATA
,
3687 .emit
= genX(upload_tes_push_constants
),
3691 genX(upload_tcs_push_constants
)(struct brw_context
*brw
)
3693 struct brw_stage_state
*stage_state
= &brw
->tcs
.base
;
3694 /* BRW_NEW_TESS_PROGRAMS */
3695 const struct brw_program
*tcp
= brw_program_const(brw
->tess_ctrl_program
);
3696 bool active
= brw
->tess_eval_program
;
3699 /* BRW_NEW_TCS_PROG_DATA */
3700 const struct brw_stage_prog_data
*prog_data
= brw
->tcs
.base
.prog_data
;
3702 _mesa_shader_write_subroutine_indices(&brw
->ctx
, MESA_SHADER_TESS_CTRL
);
3703 gen6_upload_push_constants(brw
, &tcp
->program
, prog_data
, stage_state
);
3706 upload_constant_state(brw
, stage_state
, active
, MESA_SHADER_TESS_CTRL
);
3709 static const struct brw_tracked_state
genX(tcs_push_constants
) = {
3711 .mesa
= _NEW_PROGRAM_CONSTANTS
,
3712 .brw
= BRW_NEW_BATCH
|
3714 BRW_NEW_DEFAULT_TESS_LEVELS
|
3715 BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
3716 BRW_NEW_TESS_PROGRAMS
|
3717 BRW_NEW_TCS_PROG_DATA
,
3719 .emit
= genX(upload_tcs_push_constants
),
3724 /* ---------------------------------------------------------------------- */
3728 genX(upload_cs_state
)(struct brw_context
*brw
)
3730 if (!brw
->cs
.base
.prog_data
)
3734 uint32_t *desc
= (uint32_t*) brw_state_batch(
3735 brw
, GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t), 64,
3738 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
3739 struct brw_stage_prog_data
*prog_data
= stage_state
->prog_data
;
3740 struct brw_cs_prog_data
*cs_prog_data
= brw_cs_prog_data(prog_data
);
3741 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
3743 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
3744 brw_emit_buffer_surface_state(
3745 brw
, &stage_state
->surf_offset
[
3746 prog_data
->binding_table
.shader_time_start
],
3747 brw
->shader_time
.bo
, 0, ISL_FORMAT_RAW
,
3748 brw
->shader_time
.bo
->size
, 1, true);
3751 uint32_t *bind
= brw_state_batch(brw
, prog_data
->binding_table
.size_bytes
,
3752 32, &stage_state
->bind_bo_offset
);
3754 brw_batch_emit(brw
, GENX(MEDIA_VFE_STATE
), vfe
) {
3755 if (prog_data
->total_scratch
) {
3759 /* Broadwell's Per Thread Scratch Space is in the range [0, 11]
3760 * where 0 = 1k, 1 = 2k, 2 = 4k, ..., 11 = 2M.
3762 bo_offset
= ffs(stage_state
->per_thread_scratch
) - 11;
3763 } else if (GEN_IS_HASWELL
) {
3764 /* Haswell's Per Thread Scratch Space is in the range [0, 10]
3765 * where 0 = 2k, 1 = 4k, 2 = 8k, ..., 10 = 2M.
3767 bo_offset
= ffs(stage_state
->per_thread_scratch
) - 12;
3769 /* Earlier platforms use the range [0, 11] to mean [1kB, 12kB]
3770 * where 0 = 1kB, 1 = 2kB, 2 = 3kB, ..., 11 = 12kB.
3772 bo_offset
= stage_state
->per_thread_scratch
/ 1024 - 1;
3774 vfe
.ScratchSpaceBasePointer
=
3775 render_bo(stage_state
->scratch_bo
, bo_offset
);
3778 const uint32_t subslices
= MAX2(brw
->screen
->subslice_total
, 1);
3779 vfe
.MaximumNumberofThreads
= devinfo
->max_cs_threads
* subslices
- 1;
3780 vfe
.NumberofURBEntries
= GEN_GEN
>= 8 ? 2 : 0;
3781 vfe
.ResetGatewayTimer
=
3782 Resettingrelativetimerandlatchingtheglobaltimestamp
;
3784 vfe
.BypassGatewayControl
= BypassingOpenGatewayCloseGatewayprotocol
;
3790 /* We are uploading duplicated copies of push constant uniforms for each
3791 * thread. Although the local id data needs to vary per thread, it won't
3792 * change for other uniform data. Unfortunately this duplication is
3793 * required for gen7. As of Haswell, this duplication can be avoided,
3794 * but this older mechanism with duplicated data continues to work.
3796 * FINISHME: As of Haswell, we could make use of the
3797 * INTERFACE_DESCRIPTOR_DATA "Cross-Thread Constant Data Read Length"
3798 * field to only store one copy of uniform data.
3800 * FINISHME: Broadwell adds a new alternative "Indirect Payload Storage"
3801 * which is described in the GPGPU_WALKER command and in the Broadwell
3802 * PRM Volume 7: 3D Media GPGPU, under Media GPGPU Pipeline => Mode of
3803 * Operations => GPGPU Mode => Indirect Payload Storage.
3805 * Note: The constant data is built in brw_upload_cs_push_constants
3808 vfe
.URBEntryAllocationSize
= GEN_GEN
>= 8 ? 2 : 0;
3810 const uint32_t vfe_curbe_allocation
=
3811 ALIGN(cs_prog_data
->push
.per_thread
.regs
* cs_prog_data
->threads
+
3812 cs_prog_data
->push
.cross_thread
.regs
, 2);
3813 vfe
.CURBEAllocationSize
= vfe_curbe_allocation
;
3816 if (cs_prog_data
->push
.total
.size
> 0) {
3817 brw_batch_emit(brw
, GENX(MEDIA_CURBE_LOAD
), curbe
) {
3818 curbe
.CURBETotalDataLength
=
3819 ALIGN(cs_prog_data
->push
.total
.size
, 64);
3820 curbe
.CURBEDataStartAddress
= stage_state
->push_const_offset
;
3824 /* BRW_NEW_SURFACES and BRW_NEW_*_CONSTBUF */
3825 memcpy(bind
, stage_state
->surf_offset
,
3826 prog_data
->binding_table
.size_bytes
);
3827 const struct GENX(INTERFACE_DESCRIPTOR_DATA
) idd
= {
3828 .KernelStartPointer
= brw
->cs
.base
.prog_offset
,
3829 .SamplerStatePointer
= stage_state
->sampler_offset
,
3830 .SamplerCount
= DIV_ROUND_UP(stage_state
->sampler_count
, 4) >> 2,
3831 .BindingTablePointer
= stage_state
->bind_bo_offset
,
3832 .ConstantURBEntryReadLength
= cs_prog_data
->push
.per_thread
.regs
,
3833 .NumberofThreadsinGPGPUThreadGroup
= cs_prog_data
->threads
,
3834 .SharedLocalMemorySize
= encode_slm_size(devinfo
->gen
,
3835 prog_data
->total_shared
),
3836 .BarrierEnable
= cs_prog_data
->uses_barrier
,
3837 #if GEN_GEN >= 8 || GEN_IS_HASWELL
3838 .CrossThreadConstantDataReadLength
=
3839 cs_prog_data
->push
.cross_thread
.regs
,
3843 GENX(INTERFACE_DESCRIPTOR_DATA_pack
)(brw
, desc
, &idd
);
3845 brw_batch_emit(brw
, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
), load
) {
3846 load
.InterfaceDescriptorTotalLength
=
3847 GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
3848 load
.InterfaceDescriptorDataStartAddress
= offset
;
3852 static const struct brw_tracked_state
genX(cs_state
) = {
3854 .mesa
= _NEW_PROGRAM_CONSTANTS
,
3855 .brw
= BRW_NEW_BATCH
|
3857 BRW_NEW_CS_PROG_DATA
|
3858 BRW_NEW_SAMPLER_STATE_TABLE
|
3861 .emit
= genX(upload_cs_state
)
3866 /* ---------------------------------------------------------------------- */
3870 genX(upload_raster
)(struct brw_context
*brw
)
3872 struct gl_context
*ctx
= &brw
->ctx
;
3875 bool render_to_fbo
= _mesa_is_user_fbo(ctx
->DrawBuffer
);
3878 struct gl_polygon_attrib
*polygon
= &ctx
->Polygon
;
3881 struct gl_point_attrib
*point
= &ctx
->Point
;
3883 brw_batch_emit(brw
, GENX(3DSTATE_RASTER
), raster
) {
3884 if (brw
->polygon_front_bit
== render_to_fbo
)
3885 raster
.FrontWinding
= CounterClockwise
;
3887 if (polygon
->CullFlag
) {
3888 switch (polygon
->CullFaceMode
) {
3890 raster
.CullMode
= CULLMODE_FRONT
;
3893 raster
.CullMode
= CULLMODE_BACK
;
3895 case GL_FRONT_AND_BACK
:
3896 raster
.CullMode
= CULLMODE_BOTH
;
3899 unreachable("not reached");
3902 raster
.CullMode
= CULLMODE_NONE
;
3905 point
->SmoothFlag
= raster
.SmoothPointEnable
;
3907 raster
.DXMultisampleRasterizationEnable
=
3908 _mesa_is_multisample_enabled(ctx
);
3910 raster
.GlobalDepthOffsetEnableSolid
= polygon
->OffsetFill
;
3911 raster
.GlobalDepthOffsetEnableWireframe
= polygon
->OffsetLine
;
3912 raster
.GlobalDepthOffsetEnablePoint
= polygon
->OffsetPoint
;
3914 switch (polygon
->FrontMode
) {
3916 raster
.FrontFaceFillMode
= FILL_MODE_SOLID
;
3919 raster
.FrontFaceFillMode
= FILL_MODE_WIREFRAME
;
3922 raster
.FrontFaceFillMode
= FILL_MODE_POINT
;
3925 unreachable("not reached");
3928 switch (polygon
->BackMode
) {
3930 raster
.BackFaceFillMode
= FILL_MODE_SOLID
;
3933 raster
.BackFaceFillMode
= FILL_MODE_WIREFRAME
;
3936 raster
.BackFaceFillMode
= FILL_MODE_POINT
;
3939 unreachable("not reached");
3943 raster
.AntialiasingEnable
= ctx
->Line
.SmoothFlag
;
3946 raster
.ScissorRectangleEnable
= ctx
->Scissor
.EnableFlags
;
3948 /* _NEW_TRANSFORM */
3949 if (!ctx
->Transform
.DepthClamp
) {
3951 raster
.ViewportZFarClipTestEnable
= true;
3952 raster
.ViewportZNearClipTestEnable
= true;
3954 raster
.ViewportZClipTestEnable
= true;
3958 /* BRW_NEW_CONSERVATIVE_RASTERIZATION */
3960 raster
.ConservativeRasterizationEnable
=
3961 ctx
->IntelConservativeRasterization
;
3964 raster
.GlobalDepthOffsetClamp
= polygon
->OffsetClamp
;
3965 raster
.GlobalDepthOffsetScale
= polygon
->OffsetFactor
;
3967 raster
.GlobalDepthOffsetConstant
= polygon
->OffsetUnits
* 2;
3971 static const struct brw_tracked_state
genX(raster_state
) = {
3973 .mesa
= _NEW_BUFFERS
|
3980 .brw
= BRW_NEW_BLORP
|
3982 BRW_NEW_CONSERVATIVE_RASTERIZATION
,
3984 .emit
= genX(upload_raster
),
3988 /* ---------------------------------------------------------------------- */
3992 genX(upload_ps_extra
)(struct brw_context
*brw
)
3994 UNUSED
struct gl_context
*ctx
= &brw
->ctx
;
3996 const struct brw_wm_prog_data
*prog_data
=
3997 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
3999 brw_batch_emit(brw
, GENX(3DSTATE_PS_EXTRA
), psx
) {
4000 psx
.PixelShaderValid
= true;
4001 psx
.PixelShaderComputedDepthMode
= prog_data
->computed_depth_mode
;
4002 psx
.PixelShaderKillsPixel
= prog_data
->uses_kill
;
4003 psx
.AttributeEnable
= prog_data
->num_varying_inputs
!= 0;
4004 psx
.PixelShaderUsesSourceDepth
= prog_data
->uses_src_depth
;
4005 psx
.PixelShaderUsesSourceW
= prog_data
->uses_src_w
;
4006 psx
.PixelShaderIsPerSample
= prog_data
->persample_dispatch
;
4008 /* _NEW_MULTISAMPLE | BRW_NEW_CONSERVATIVE_RASTERIZATION */
4009 if (prog_data
->uses_sample_mask
) {
4011 if (prog_data
->post_depth_coverage
)
4012 psx
.InputCoverageMaskState
= ICMS_DEPTH_COVERAGE
;
4013 else if (prog_data
->inner_coverage
&& ctx
->IntelConservativeRasterization
)
4014 psx
.InputCoverageMaskState
= ICMS_INNER_CONSERVATIVE
;
4016 psx
.InputCoverageMaskState
= ICMS_NORMAL
;
4018 psx
.PixelShaderUsesInputCoverageMask
= true;
4022 psx
.oMaskPresenttoRenderTarget
= prog_data
->uses_omask
;
4024 psx
.PixelShaderPullsBary
= prog_data
->pulls_bary
;
4025 psx
.PixelShaderComputesStencil
= prog_data
->computed_stencil
;
4028 /* The stricter cross-primitive coherency guarantees that the hardware
4029 * gives us with the "Accesses UAV" bit set for at least one shader stage
4030 * and the "UAV coherency required" bit set on the 3DPRIMITIVE command
4031 * are redundant within the current image, atomic counter and SSBO GL
4032 * APIs, which all have very loose ordering and coherency requirements
4033 * and generally rely on the application to insert explicit barriers when
4034 * a shader invocation is expected to see the memory writes performed by
4035 * the invocations of some previous primitive. Regardless of the value
4036 * of "UAV coherency required", the "Accesses UAV" bits will implicitly
4037 * cause an in most cases useless DC flush when the lowermost stage with
4038 * the bit set finishes execution.
4040 * It would be nice to disable it, but in some cases we can't because on
4041 * Gen8+ it also has an influence on rasterization via the PS UAV-only
4042 * signal (which could be set independently from the coherency mechanism
4043 * in the 3DSTATE_WM command on Gen7), and because in some cases it will
4044 * determine whether the hardware skips execution of the fragment shader
4045 * or not via the ThreadDispatchEnable signal. However if we know that
4046 * GEN8_PS_BLEND_HAS_WRITEABLE_RT is going to be set and
4047 * GEN8_PSX_PIXEL_SHADER_NO_RT_WRITE is not set it shouldn't make any
4048 * difference so we may just disable it here.
4050 * Gen8 hardware tries to compute ThreadDispatchEnable for us but doesn't
4051 * take into account KillPixels when no depth or stencil writes are
4052 * enabled. In order for occlusion queries to work correctly with no
4053 * attachments, we need to force-enable here.
4055 * BRW_NEW_FS_PROG_DATA | BRW_NEW_FRAGMENT_PROGRAM | _NEW_BUFFERS |
4058 if ((prog_data
->has_side_effects
|| prog_data
->uses_kill
) &&
4059 !brw_color_buffer_write_enabled(brw
))
4060 psx
.PixelShaderHasUAV
= true;
4064 const struct brw_tracked_state
genX(ps_extra
) = {
4066 .mesa
= _NEW_BUFFERS
| _NEW_COLOR
,
4067 .brw
= BRW_NEW_BLORP
|
4069 BRW_NEW_FRAGMENT_PROGRAM
|
4070 BRW_NEW_FS_PROG_DATA
|
4071 BRW_NEW_CONSERVATIVE_RASTERIZATION
,
4073 .emit
= genX(upload_ps_extra
),
4077 /* ---------------------------------------------------------------------- */
4081 genX(upload_ps_blend
)(struct brw_context
*brw
)
4083 struct gl_context
*ctx
= &brw
->ctx
;
4086 struct gl_renderbuffer
*rb
= ctx
->DrawBuffer
->_ColorDrawBuffers
[0];
4087 const bool buffer0_is_integer
= ctx
->DrawBuffer
->_IntegerBuffers
& 0x1;
4090 struct gl_colorbuffer_attrib
*color
= &ctx
->Color
;
4092 brw_batch_emit(brw
, GENX(3DSTATE_PS_BLEND
), pb
) {
4093 /* BRW_NEW_FRAGMENT_PROGRAM | _NEW_BUFFERS | _NEW_COLOR */
4094 pb
.HasWriteableRT
= brw_color_buffer_write_enabled(brw
);
4096 bool alpha_to_one
= false;
4098 if (!buffer0_is_integer
) {
4099 /* _NEW_MULTISAMPLE */
4101 if (_mesa_is_multisample_enabled(ctx
)) {
4102 pb
.AlphaToCoverageEnable
= ctx
->Multisample
.SampleAlphaToCoverage
;
4103 alpha_to_one
= ctx
->Multisample
.SampleAlphaToOne
;
4106 pb
.AlphaTestEnable
= color
->AlphaEnabled
;
4109 /* Used for implementing the following bit of GL_EXT_texture_integer:
4110 * "Per-fragment operations that require floating-point color
4111 * components, including multisample alpha operations, alpha test,
4112 * blending, and dithering, have no effect when the corresponding
4113 * colors are written to an integer color buffer."
4115 * The OpenGL specification 3.3 (page 196), section 4.1.3 says:
4116 * "If drawbuffer zero is not NONE and the buffer it references has an
4117 * integer format, the SAMPLE_ALPHA_TO_COVERAGE and SAMPLE_ALPHA_TO_ONE
4118 * operations are skipped."
4120 if (rb
&& !buffer0_is_integer
&& (color
->BlendEnabled
& 1)) {
4121 GLenum eqRGB
= color
->Blend
[0].EquationRGB
;
4122 GLenum eqA
= color
->Blend
[0].EquationA
;
4123 GLenum srcRGB
= color
->Blend
[0].SrcRGB
;
4124 GLenum dstRGB
= color
->Blend
[0].DstRGB
;
4125 GLenum srcA
= color
->Blend
[0].SrcA
;
4126 GLenum dstA
= color
->Blend
[0].DstA
;
4128 if (eqRGB
== GL_MIN
|| eqRGB
== GL_MAX
)
4129 srcRGB
= dstRGB
= GL_ONE
;
4131 if (eqA
== GL_MIN
|| eqA
== GL_MAX
)
4132 srcA
= dstA
= GL_ONE
;
4134 /* Due to hardware limitations, the destination may have information
4135 * in an alpha channel even when the format specifies no alpha
4136 * channel. In order to avoid getting any incorrect blending due to
4137 * that alpha channel, coerce the blend factors to values that will
4138 * not read the alpha channel, but will instead use the correct
4139 * implicit value for alpha.
4141 if (!_mesa_base_format_has_channel(rb
->_BaseFormat
,
4142 GL_TEXTURE_ALPHA_TYPE
)) {
4143 srcRGB
= brw_fix_xRGB_alpha(srcRGB
);
4144 srcA
= brw_fix_xRGB_alpha(srcA
);
4145 dstRGB
= brw_fix_xRGB_alpha(dstRGB
);
4146 dstA
= brw_fix_xRGB_alpha(dstA
);
4149 /* Alpha to One doesn't work with Dual Color Blending. Override
4150 * SRC1_ALPHA to ONE and ONE_MINUS_SRC1_ALPHA to ZERO.
4152 if (alpha_to_one
&& color
->Blend
[0]._UsesDualSrc
) {
4153 srcRGB
= fix_dual_blend_alpha_to_one(srcRGB
);
4154 srcA
= fix_dual_blend_alpha_to_one(srcA
);
4155 dstRGB
= fix_dual_blend_alpha_to_one(dstRGB
);
4156 dstA
= fix_dual_blend_alpha_to_one(dstA
);
4159 pb
.ColorBufferBlendEnable
= true;
4160 pb
.SourceAlphaBlendFactor
= brw_translate_blend_factor(srcA
);
4161 pb
.DestinationAlphaBlendFactor
= brw_translate_blend_factor(dstA
);
4162 pb
.SourceBlendFactor
= brw_translate_blend_factor(srcRGB
);
4163 pb
.DestinationBlendFactor
= brw_translate_blend_factor(dstRGB
);
4165 pb
.IndependentAlphaBlendEnable
=
4166 srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
;
4171 static const struct brw_tracked_state
genX(ps_blend
) = {
4173 .mesa
= _NEW_BUFFERS
|
4176 .brw
= BRW_NEW_BLORP
|
4178 BRW_NEW_FRAGMENT_PROGRAM
,
4180 .emit
= genX(upload_ps_blend
)
4184 /* ---------------------------------------------------------------------- */
4188 genX(emit_vf_topology
)(struct brw_context
*brw
)
4190 brw_batch_emit(brw
, GENX(3DSTATE_VF_TOPOLOGY
), vftopo
) {
4191 vftopo
.PrimitiveTopologyType
= brw
->primitive
;
4195 static const struct brw_tracked_state
genX(vf_topology
) = {
4198 .brw
= BRW_NEW_BLORP
|
4201 .emit
= genX(emit_vf_topology
),
4205 /* ---------------------------------------------------------------------- */
4209 genX(emit_mi_report_perf_count
)(struct brw_context
*brw
,
4211 uint32_t offset_in_bytes
,
4214 brw_batch_emit(brw
, GENX(MI_REPORT_PERF_COUNT
), mi_rpc
) {
4215 mi_rpc
.MemoryAddress
= instruction_bo(bo
, offset_in_bytes
);
4216 mi_rpc
.ReportID
= report_id
;
4221 /* ---------------------------------------------------------------------- */
4224 * Emit a 3DSTATE_SAMPLER_STATE_POINTERS_{VS,HS,GS,DS,PS} packet.
4227 genX(emit_sampler_state_pointers_xs
)(struct brw_context
*brw
,
4228 struct brw_stage_state
*stage_state
)
4231 static const uint16_t packet_headers
[] = {
4232 [MESA_SHADER_VERTEX
] = 43,
4233 [MESA_SHADER_TESS_CTRL
] = 44,
4234 [MESA_SHADER_TESS_EVAL
] = 45,
4235 [MESA_SHADER_GEOMETRY
] = 46,
4236 [MESA_SHADER_FRAGMENT
] = 47,
4239 /* Ivybridge requires a workaround flush before VS packets. */
4240 if (GEN_GEN
== 7 && !GEN_IS_HASWELL
&&
4241 stage_state
->stage
== MESA_SHADER_VERTEX
) {
4242 gen7_emit_vs_workaround_flush(brw
);
4245 brw_batch_emit(brw
, GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS
), ptr
) {
4246 ptr
._3DCommandSubOpcode
= packet_headers
[stage_state
->stage
];
4247 ptr
.PointertoVSSamplerState
= stage_state
->sampler_offset
;
4253 has_component(mesa_format format
, int i
)
4255 if (_mesa_is_format_color_format(format
))
4256 return _mesa_format_has_color_component(format
, i
);
4258 /* depth and stencil have only one component */
4263 * Upload SAMPLER_BORDER_COLOR_STATE.
4266 upload_default_color(struct brw_context
*brw
,
4267 const struct gl_sampler_object
*sampler
,
4268 mesa_format format
, GLenum base_format
,
4269 bool is_integer_format
, bool is_stencil_sampling
,
4270 uint32_t *sdc_offset
)
4272 union gl_color_union color
;
4274 switch (base_format
) {
4275 case GL_DEPTH_COMPONENT
:
4276 /* GL specs that border color for depth textures is taken from the
4277 * R channel, while the hardware uses A. Spam R into all the
4278 * channels for safety.
4280 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4281 color
.ui
[1] = sampler
->BorderColor
.ui
[0];
4282 color
.ui
[2] = sampler
->BorderColor
.ui
[0];
4283 color
.ui
[3] = sampler
->BorderColor
.ui
[0];
4289 color
.ui
[3] = sampler
->BorderColor
.ui
[3];
4292 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4293 color
.ui
[1] = sampler
->BorderColor
.ui
[0];
4294 color
.ui
[2] = sampler
->BorderColor
.ui
[0];
4295 color
.ui
[3] = sampler
->BorderColor
.ui
[0];
4298 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4299 color
.ui
[1] = sampler
->BorderColor
.ui
[0];
4300 color
.ui
[2] = sampler
->BorderColor
.ui
[0];
4301 color
.ui
[3] = float_as_int(1.0);
4303 case GL_LUMINANCE_ALPHA
:
4304 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4305 color
.ui
[1] = sampler
->BorderColor
.ui
[0];
4306 color
.ui
[2] = sampler
->BorderColor
.ui
[0];
4307 color
.ui
[3] = sampler
->BorderColor
.ui
[3];
4310 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4311 color
.ui
[1] = sampler
->BorderColor
.ui
[1];
4312 color
.ui
[2] = sampler
->BorderColor
.ui
[2];
4313 color
.ui
[3] = sampler
->BorderColor
.ui
[3];
4317 /* In some cases we use an RGBA surface format for GL RGB textures,
4318 * where we've initialized the A channel to 1.0. We also have to set
4319 * the border color alpha to 1.0 in that case.
4321 if (base_format
== GL_RGB
)
4322 color
.ui
[3] = float_as_int(1.0);
4324 if (brw
->gen
>= 8) {
4325 /* On Broadwell, the border color is represented as four 32-bit floats,
4326 * integers, or unsigned values, interpreted according to the surface
4327 * format. This matches the sampler->BorderColor union exactly; just
4328 * memcpy the values.
4330 uint32_t *sdc
= brw_state_batch(brw
, 4 * 4, 64, sdc_offset
);
4331 memcpy(sdc
, color
.ui
, 4 * 4);
4332 } else if (brw
->is_haswell
&& (is_integer_format
|| is_stencil_sampling
)) {
4333 /* Haswell's integer border color support is completely insane:
4334 * SAMPLER_BORDER_COLOR_STATE is 20 DWords. The first four are
4335 * for float colors. The next 12 DWords are MBZ and only exist to
4336 * pad it out to a 64 byte cacheline boundary. DWords 16-19 then
4337 * contain integer colors; these are only used if SURFACE_STATE
4338 * has the "Integer Surface Format" bit set. Even then, the
4339 * arrangement of the RGBA data devolves into madness.
4341 uint32_t *sdc
= brw_state_batch(brw
, 20 * 4, 512, sdc_offset
);
4342 memset(sdc
, 0, 20 * 4);
4345 bool stencil
= format
== MESA_FORMAT_S_UINT8
|| is_stencil_sampling
;
4346 const int bits_per_channel
=
4347 _mesa_get_format_bits(format
, stencil
? GL_STENCIL_BITS
: GL_RED_BITS
);
4349 /* From the Haswell PRM, "Command Reference: Structures", Page 36:
4350 * "If any color channel is missing from the surface format,
4351 * corresponding border color should be programmed as zero and if
4352 * alpha channel is missing, corresponding Alpha border color should
4353 * be programmed as 1."
4355 unsigned c
[4] = { 0, 0, 0, 1 };
4356 for (int i
= 0; i
< 4; i
++) {
4357 if (has_component(format
, i
))
4361 switch (bits_per_channel
) {
4363 /* Copy RGBA in order. */
4364 for (int i
= 0; i
< 4; i
++)
4365 ((uint8_t *) sdc
)[i
] = c
[i
];
4368 /* R10G10B10A2_UINT is treated like a 16-bit format. */
4370 ((uint16_t *) sdc
)[0] = c
[0]; /* R -> DWord 0, bits 15:0 */
4371 ((uint16_t *) sdc
)[1] = c
[1]; /* G -> DWord 0, bits 31:16 */
4372 /* DWord 1 is Reserved/MBZ! */
4373 ((uint16_t *) sdc
)[4] = c
[2]; /* B -> DWord 2, bits 15:0 */
4374 ((uint16_t *) sdc
)[5] = c
[3]; /* A -> DWord 3, bits 31:16 */
4377 if (base_format
== GL_RG
) {
4378 /* Careful inspection of the tables reveals that for RG32 formats,
4379 * the green channel needs to go where blue normally belongs.
4385 /* Copy RGBA in order. */
4386 for (int i
= 0; i
< 4; i
++)
4391 assert(!"Invalid number of bits per channel in integer format.");
4394 } else if (brw
->gen
== 5 || brw
->gen
== 6) {
4395 struct gen5_sampler_default_color
*sdc
;
4397 sdc
= brw_state_batch(brw
, sizeof(*sdc
), 32, sdc_offset
);
4399 memset(sdc
, 0, sizeof(*sdc
));
4401 UNCLAMPED_FLOAT_TO_UBYTE(sdc
->ub
[0], color
.f
[0]);
4402 UNCLAMPED_FLOAT_TO_UBYTE(sdc
->ub
[1], color
.f
[1]);
4403 UNCLAMPED_FLOAT_TO_UBYTE(sdc
->ub
[2], color
.f
[2]);
4404 UNCLAMPED_FLOAT_TO_UBYTE(sdc
->ub
[3], color
.f
[3]);
4406 UNCLAMPED_FLOAT_TO_USHORT(sdc
->us
[0], color
.f
[0]);
4407 UNCLAMPED_FLOAT_TO_USHORT(sdc
->us
[1], color
.f
[1]);
4408 UNCLAMPED_FLOAT_TO_USHORT(sdc
->us
[2], color
.f
[2]);
4409 UNCLAMPED_FLOAT_TO_USHORT(sdc
->us
[3], color
.f
[3]);
4411 UNCLAMPED_FLOAT_TO_SHORT(sdc
->s
[0], color
.f
[0]);
4412 UNCLAMPED_FLOAT_TO_SHORT(sdc
->s
[1], color
.f
[1]);
4413 UNCLAMPED_FLOAT_TO_SHORT(sdc
->s
[2], color
.f
[2]);
4414 UNCLAMPED_FLOAT_TO_SHORT(sdc
->s
[3], color
.f
[3]);
4416 sdc
->hf
[0] = _mesa_float_to_half(color
.f
[0]);
4417 sdc
->hf
[1] = _mesa_float_to_half(color
.f
[1]);
4418 sdc
->hf
[2] = _mesa_float_to_half(color
.f
[2]);
4419 sdc
->hf
[3] = _mesa_float_to_half(color
.f
[3]);
4421 sdc
->b
[0] = sdc
->s
[0] >> 8;
4422 sdc
->b
[1] = sdc
->s
[1] >> 8;
4423 sdc
->b
[2] = sdc
->s
[2] >> 8;
4424 sdc
->b
[3] = sdc
->s
[3] >> 8;
4426 sdc
->f
[0] = color
.f
[0];
4427 sdc
->f
[1] = color
.f
[1];
4428 sdc
->f
[2] = color
.f
[2];
4429 sdc
->f
[3] = color
.f
[3];
4431 float *sdc
= brw_state_batch(brw
, 4 * 4, 32, sdc_offset
);
4432 memcpy(sdc
, color
.f
, 4 * 4);
4437 translate_wrap_mode(struct brw_context
*brw
, GLenum wrap
, bool using_nearest
)
4444 /* GL_CLAMP is the weird mode where coordinates are clamped to
4445 * [0.0, 1.0], so linear filtering of coordinates outside of
4446 * [0.0, 1.0] give you half edge texel value and half border
4449 * Gen8+ supports this natively.
4451 return TCM_HALF_BORDER
;
4454 /* On Gen4-7.5, we clamp the coordinates in the fragment shader
4455 * and set clamp_border here, which gets the result desired.
4456 * We just use clamp(_to_edge) for nearest, because for nearest
4457 * clamping to 1.0 gives border color instead of the desired
4463 return TCM_CLAMP_BORDER
;
4464 case GL_CLAMP_TO_EDGE
:
4466 case GL_CLAMP_TO_BORDER
:
4467 return TCM_CLAMP_BORDER
;
4468 case GL_MIRRORED_REPEAT
:
4470 case GL_MIRROR_CLAMP_TO_EDGE
:
4471 return TCM_MIRROR_ONCE
;
4478 * Return true if the given wrap mode requires the border color to exist.
4481 wrap_mode_needs_border_color(unsigned wrap_mode
)
4484 return wrap_mode
== TCM_CLAMP_BORDER
||
4485 wrap_mode
== TCM_HALF_BORDER
;
4487 return wrap_mode
== TCM_CLAMP_BORDER
;
4492 * Sets the sampler state for a single unit based off of the sampler key
4496 genX(update_sampler_state
)(struct brw_context
*brw
,
4497 GLenum target
, bool tex_cube_map_seamless
,
4498 GLfloat tex_unit_lod_bias
,
4499 mesa_format format
, GLenum base_format
,
4500 const struct gl_texture_object
*texObj
,
4501 const struct gl_sampler_object
*sampler
,
4502 uint32_t *sampler_state
,
4503 uint32_t batch_offset_for_sampler_state
)
4505 struct GENX(SAMPLER_STATE
) samp_st
= { 0 };
4507 /* Select min and mip filters. */
4508 switch (sampler
->MinFilter
) {
4510 samp_st
.MinModeFilter
= MAPFILTER_NEAREST
;
4511 samp_st
.MipModeFilter
= MIPFILTER_NONE
;
4514 samp_st
.MinModeFilter
= MAPFILTER_LINEAR
;
4515 samp_st
.MipModeFilter
= MIPFILTER_NONE
;
4517 case GL_NEAREST_MIPMAP_NEAREST
:
4518 samp_st
.MinModeFilter
= MAPFILTER_NEAREST
;
4519 samp_st
.MipModeFilter
= MIPFILTER_NEAREST
;
4521 case GL_LINEAR_MIPMAP_NEAREST
:
4522 samp_st
.MinModeFilter
= MAPFILTER_LINEAR
;
4523 samp_st
.MipModeFilter
= MIPFILTER_NEAREST
;
4525 case GL_NEAREST_MIPMAP_LINEAR
:
4526 samp_st
.MinModeFilter
= MAPFILTER_NEAREST
;
4527 samp_st
.MipModeFilter
= MIPFILTER_LINEAR
;
4529 case GL_LINEAR_MIPMAP_LINEAR
:
4530 samp_st
.MinModeFilter
= MAPFILTER_LINEAR
;
4531 samp_st
.MipModeFilter
= MIPFILTER_LINEAR
;
4534 unreachable("not reached");
4537 /* Select mag filter. */
4538 samp_st
.MagModeFilter
= sampler
->MagFilter
== GL_LINEAR
?
4539 MAPFILTER_LINEAR
: MAPFILTER_NEAREST
;
4541 /* Enable anisotropic filtering if desired. */
4542 samp_st
.MaximumAnisotropy
= RATIO21
;
4544 if (sampler
->MaxAnisotropy
> 1.0f
) {
4545 if (samp_st
.MinModeFilter
== MAPFILTER_LINEAR
)
4546 samp_st
.MinModeFilter
= MAPFILTER_ANISOTROPIC
;
4547 if (samp_st
.MinModeFilter
== MAPFILTER_LINEAR
)
4548 samp_st
.MagModeFilter
= MAPFILTER_ANISOTROPIC
;
4550 if (sampler
->MaxAnisotropy
> 2.0f
) {
4551 samp_st
.MaximumAnisotropy
=
4552 MIN2((sampler
->MaxAnisotropy
- 2) / 2, RATIO161
);
4556 /* Set address rounding bits if not using nearest filtering. */
4557 if (samp_st
.MinModeFilter
!= MAPFILTER_NEAREST
) {
4558 samp_st
.UAddressMinFilterRoundingEnable
= true;
4559 samp_st
.VAddressMinFilterRoundingEnable
= true;
4560 samp_st
.RAddressMinFilterRoundingEnable
= true;
4563 if (samp_st
.MagModeFilter
!= MAPFILTER_NEAREST
) {
4564 samp_st
.UAddressMagFilterRoundingEnable
= true;
4565 samp_st
.VAddressMagFilterRoundingEnable
= true;
4566 samp_st
.RAddressMagFilterRoundingEnable
= true;
4569 bool either_nearest
=
4570 sampler
->MinFilter
== GL_NEAREST
|| sampler
->MagFilter
== GL_NEAREST
;
4571 unsigned wrap_s
= translate_wrap_mode(brw
, sampler
->WrapS
, either_nearest
);
4572 unsigned wrap_t
= translate_wrap_mode(brw
, sampler
->WrapT
, either_nearest
);
4573 unsigned wrap_r
= translate_wrap_mode(brw
, sampler
->WrapR
, either_nearest
);
4575 if (target
== GL_TEXTURE_CUBE_MAP
||
4576 target
== GL_TEXTURE_CUBE_MAP_ARRAY
) {
4577 /* Cube maps must use the same wrap mode for all three coordinate
4578 * dimensions. Prior to Haswell, only CUBE and CLAMP are valid.
4580 * Ivybridge and Baytrail seem to have problems with CUBE mode and
4581 * integer formats. Fall back to CLAMP for now.
4583 if ((tex_cube_map_seamless
|| sampler
->CubeMapSeamless
) &&
4584 !(GEN_GEN
== 7 && !GEN_IS_HASWELL
&& texObj
->_IsIntegerFormat
)) {
4593 } else if (target
== GL_TEXTURE_1D
) {
4594 /* There's a bug in 1D texture sampling - it actually pays
4595 * attention to the wrap_t value, though it should not.
4596 * Override the wrap_t value here to GL_REPEAT to keep
4597 * any nonexistent border pixels from floating in.
4602 samp_st
.TCXAddressControlMode
= wrap_s
;
4603 samp_st
.TCYAddressControlMode
= wrap_t
;
4604 samp_st
.TCZAddressControlMode
= wrap_r
;
4606 samp_st
.ShadowFunction
=
4607 sampler
->CompareMode
== GL_COMPARE_R_TO_TEXTURE_ARB
?
4608 intel_translate_shadow_compare_func(sampler
->CompareFunc
) : 0;
4611 /* Set shadow function. */
4612 samp_st
.AnisotropicAlgorithm
=
4613 samp_st
.MinModeFilter
== MAPFILTER_ANISOTROPIC
?
4614 EWAApproximation
: LEGACY
;
4618 samp_st
.NonnormalizedCoordinateEnable
= target
== GL_TEXTURE_RECTANGLE
;
4621 const float hw_max_lod
= GEN_GEN
>= 7 ? 14 : 13;
4622 samp_st
.MinLOD
= CLAMP(sampler
->MinLod
, 0, hw_max_lod
);
4623 samp_st
.MaxLOD
= CLAMP(sampler
->MaxLod
, 0, hw_max_lod
);
4624 samp_st
.TextureLODBias
=
4625 CLAMP(tex_unit_lod_bias
+ sampler
->LodBias
, -16, 15);
4628 samp_st
.BaseMipLevel
=
4629 CLAMP(texObj
->MinLevel
+ texObj
->BaseLevel
, 0, hw_max_lod
);
4630 samp_st
.MinandMagStateNotEqual
=
4631 samp_st
.MinModeFilter
!= samp_st
.MagModeFilter
;
4634 /* Upload the border color if necessary. If not, just point it at
4635 * offset 0 (the start of the batch) - the color should be ignored,
4636 * but that address won't fault in case something reads it anyway.
4638 uint32_t border_color_offset
= 0;
4639 if (wrap_mode_needs_border_color(wrap_s
) ||
4640 wrap_mode_needs_border_color(wrap_t
) ||
4641 wrap_mode_needs_border_color(wrap_r
)) {
4642 upload_default_color(brw
, sampler
, format
, base_format
,
4643 texObj
->_IsIntegerFormat
, texObj
->StencilSampling
,
4644 &border_color_offset
);
4647 samp_st
.BorderColorPointer
= border_color_offset
;
4650 samp_st
.BorderColorPointer
+= brw
->batch
.bo
->offset64
; /* reloc */
4651 brw_emit_reloc(&brw
->batch
, batch_offset_for_sampler_state
+ 8,
4652 brw
->batch
.bo
, border_color_offset
,
4653 I915_GEM_DOMAIN_SAMPLER
, 0);
4657 samp_st
.LODPreClampMode
= CLAMP_MODE_OGL
;
4659 samp_st
.LODPreClampEnable
= true;
4662 GENX(SAMPLER_STATE_pack
)(brw
, sampler_state
, &samp_st
);
4666 update_sampler_state(struct brw_context
*brw
,
4668 uint32_t *sampler_state
,
4669 uint32_t batch_offset_for_sampler_state
)
4671 struct gl_context
*ctx
= &brw
->ctx
;
4672 const struct gl_texture_unit
*texUnit
= &ctx
->Texture
.Unit
[unit
];
4673 const struct gl_texture_object
*texObj
= texUnit
->_Current
;
4674 const struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, unit
);
4676 /* These don't use samplers at all. */
4677 if (texObj
->Target
== GL_TEXTURE_BUFFER
)
4680 struct gl_texture_image
*firstImage
= texObj
->Image
[0][texObj
->BaseLevel
];
4681 genX(update_sampler_state
)(brw
, texObj
->Target
,
4682 ctx
->Texture
.CubeMapSeamless
,
4684 firstImage
->TexFormat
, firstImage
->_BaseFormat
,
4686 sampler_state
, batch_offset_for_sampler_state
);
4690 genX(upload_sampler_state_table
)(struct brw_context
*brw
,
4691 struct gl_program
*prog
,
4692 struct brw_stage_state
*stage_state
)
4694 struct gl_context
*ctx
= &brw
->ctx
;
4695 uint32_t sampler_count
= stage_state
->sampler_count
;
4697 GLbitfield SamplersUsed
= prog
->SamplersUsed
;
4699 if (sampler_count
== 0)
4702 /* SAMPLER_STATE is 4 DWords on all platforms. */
4703 const int dwords
= GENX(SAMPLER_STATE_length
);
4704 const int size_in_bytes
= dwords
* sizeof(uint32_t);
4706 uint32_t *sampler_state
= brw_state_batch(brw
,
4707 sampler_count
* size_in_bytes
,
4708 32, &stage_state
->sampler_offset
);
4709 /* memset(sampler_state, 0, sampler_count * size_in_bytes); */
4711 uint32_t batch_offset_for_sampler_state
= stage_state
->sampler_offset
;
4713 for (unsigned s
= 0; s
< sampler_count
; s
++) {
4714 if (SamplersUsed
& (1 << s
)) {
4715 const unsigned unit
= prog
->SamplerUnits
[s
];
4716 if (ctx
->Texture
.Unit
[unit
]._Current
) {
4717 update_sampler_state(brw
, unit
, sampler_state
,
4718 batch_offset_for_sampler_state
);
4722 sampler_state
+= dwords
;
4723 batch_offset_for_sampler_state
+= size_in_bytes
;
4726 if (GEN_GEN
>= 7 && stage_state
->stage
!= MESA_SHADER_COMPUTE
) {
4727 /* Emit a 3DSTATE_SAMPLER_STATE_POINTERS_XS packet. */
4728 genX(emit_sampler_state_pointers_xs
)(brw
, stage_state
);
4730 /* Flag that the sampler state table pointer has changed; later atoms
4733 brw
->ctx
.NewDriverState
|= BRW_NEW_SAMPLER_STATE_TABLE
;
4738 genX(upload_fs_samplers
)(struct brw_context
*brw
)
4740 /* BRW_NEW_FRAGMENT_PROGRAM */
4741 struct gl_program
*fs
= (struct gl_program
*) brw
->fragment_program
;
4742 genX(upload_sampler_state_table
)(brw
, fs
, &brw
->wm
.base
);
4745 static const struct brw_tracked_state
genX(fs_samplers
) = {
4747 .mesa
= _NEW_TEXTURE
,
4748 .brw
= BRW_NEW_BATCH
|
4750 BRW_NEW_FRAGMENT_PROGRAM
,
4752 .emit
= genX(upload_fs_samplers
),
4756 genX(upload_vs_samplers
)(struct brw_context
*brw
)
4758 /* BRW_NEW_VERTEX_PROGRAM */
4759 struct gl_program
*vs
= (struct gl_program
*) brw
->vertex_program
;
4760 genX(upload_sampler_state_table
)(brw
, vs
, &brw
->vs
.base
);
4763 static const struct brw_tracked_state
genX(vs_samplers
) = {
4765 .mesa
= _NEW_TEXTURE
,
4766 .brw
= BRW_NEW_BATCH
|
4768 BRW_NEW_VERTEX_PROGRAM
,
4770 .emit
= genX(upload_vs_samplers
),
4775 genX(upload_gs_samplers
)(struct brw_context
*brw
)
4777 /* BRW_NEW_GEOMETRY_PROGRAM */
4778 struct gl_program
*gs
= (struct gl_program
*) brw
->geometry_program
;
4782 genX(upload_sampler_state_table
)(brw
, gs
, &brw
->gs
.base
);
4786 static const struct brw_tracked_state
genX(gs_samplers
) = {
4788 .mesa
= _NEW_TEXTURE
,
4789 .brw
= BRW_NEW_BATCH
|
4791 BRW_NEW_GEOMETRY_PROGRAM
,
4793 .emit
= genX(upload_gs_samplers
),
4799 genX(upload_tcs_samplers
)(struct brw_context
*brw
)
4801 /* BRW_NEW_TESS_PROGRAMS */
4802 struct gl_program
*tcs
= (struct gl_program
*) brw
->tess_ctrl_program
;
4806 genX(upload_sampler_state_table
)(brw
, tcs
, &brw
->tcs
.base
);
4809 static const struct brw_tracked_state
genX(tcs_samplers
) = {
4811 .mesa
= _NEW_TEXTURE
,
4812 .brw
= BRW_NEW_BATCH
|
4814 BRW_NEW_TESS_PROGRAMS
,
4816 .emit
= genX(upload_tcs_samplers
),
4822 genX(upload_tes_samplers
)(struct brw_context
*brw
)
4824 /* BRW_NEW_TESS_PROGRAMS */
4825 struct gl_program
*tes
= (struct gl_program
*) brw
->tess_eval_program
;
4829 genX(upload_sampler_state_table
)(brw
, tes
, &brw
->tes
.base
);
4832 static const struct brw_tracked_state
genX(tes_samplers
) = {
4834 .mesa
= _NEW_TEXTURE
,
4835 .brw
= BRW_NEW_BATCH
|
4837 BRW_NEW_TESS_PROGRAMS
,
4839 .emit
= genX(upload_tes_samplers
),
4845 genX(upload_cs_samplers
)(struct brw_context
*brw
)
4847 /* BRW_NEW_COMPUTE_PROGRAM */
4848 struct gl_program
*cs
= (struct gl_program
*) brw
->compute_program
;
4852 genX(upload_sampler_state_table
)(brw
, cs
, &brw
->cs
.base
);
4855 const struct brw_tracked_state
genX(cs_samplers
) = {
4857 .mesa
= _NEW_TEXTURE
,
4858 .brw
= BRW_NEW_BATCH
|
4860 BRW_NEW_COMPUTE_PROGRAM
,
4862 .emit
= genX(upload_cs_samplers
),
4866 /* ---------------------------------------------------------------------- */
4869 genX(init_atoms
)(struct brw_context
*brw
)
4872 static const struct brw_tracked_state
*render_atoms
[] =
4874 /* Once all the programs are done, we know how large urb entry
4875 * sizes need to be and can decide if we need to change the urb
4879 &brw_recalculate_urb_fence
,
4884 /* Surface state setup. Must come before the VS/WM unit. The binding
4885 * table upload must be last.
4887 &brw_vs_pull_constants
,
4888 &brw_wm_pull_constants
,
4889 &brw_renderbuffer_surfaces
,
4890 &brw_renderbuffer_read_surfaces
,
4891 &brw_texture_surfaces
,
4892 &brw_vs_binding_table
,
4893 &brw_wm_binding_table
,
4898 /* These set up state for brw_psp_urb_cbs */
4900 &genX(sf_clip_viewport
),
4902 &genX(vs_state
), /* always required, enabled or not */
4908 &brw_invariant_state
,
4910 &brw_binding_table_pointers
,
4911 &brw_blend_constant_color
,
4915 &genX(polygon_stipple
),
4916 &genX(polygon_stipple_offset
),
4918 &genX(line_stipple
),
4922 &genX(drawing_rect
),
4923 &brw_indices
, /* must come before brw_vertices */
4924 &genX(index_buffer
),
4927 &brw_constant_buffer
4930 static const struct brw_tracked_state
*render_atoms
[] =
4932 &genX(sf_clip_viewport
),
4934 /* Command packets: */
4939 &genX(blend_state
), /* must do before cc unit */
4940 &genX(color_calc_state
), /* must do before cc unit */
4941 &genX(depth_stencil_state
), /* must do before cc unit */
4943 &genX(vs_push_constants
), /* Before vs_state */
4944 &genX(gs_push_constants
), /* Before gs_state */
4945 &genX(wm_push_constants
), /* Before wm_state */
4947 /* Surface state setup. Must come before the VS/WM unit. The binding
4948 * table upload must be last.
4950 &brw_vs_pull_constants
,
4951 &brw_vs_ubo_surfaces
,
4952 &brw_gs_pull_constants
,
4953 &brw_gs_ubo_surfaces
,
4954 &brw_wm_pull_constants
,
4955 &brw_wm_ubo_surfaces
,
4956 &gen6_renderbuffer_surfaces
,
4957 &brw_renderbuffer_read_surfaces
,
4958 &brw_texture_surfaces
,
4960 &brw_vs_binding_table
,
4961 &gen6_gs_binding_table
,
4962 &brw_wm_binding_table
,
4967 &gen6_sampler_state
,
4968 &genX(multisample_state
),
4976 &genX(scissor_state
),
4978 &gen6_binding_table_pointers
,
4982 &genX(polygon_stipple
),
4983 &genX(polygon_stipple_offset
),
4985 &genX(line_stipple
),
4987 &genX(drawing_rect
),
4989 &brw_indices
, /* must come before brw_vertices */
4990 &genX(index_buffer
),
4994 static const struct brw_tracked_state
*render_atoms
[] =
4996 /* Command packets: */
4999 &genX(sf_clip_viewport
),
5002 &gen7_push_constant_space
,
5004 &genX(blend_state
), /* must do before cc unit */
5005 &genX(color_calc_state
), /* must do before cc unit */
5006 &genX(depth_stencil_state
), /* must do before cc unit */
5008 &brw_vs_image_surfaces
, /* Before vs push/pull constants and binding table */
5009 &brw_tcs_image_surfaces
, /* Before tcs push/pull constants and binding table */
5010 &brw_tes_image_surfaces
, /* Before tes push/pull constants and binding table */
5011 &brw_gs_image_surfaces
, /* Before gs push/pull constants and binding table */
5012 &brw_wm_image_surfaces
, /* Before wm push/pull constants and binding table */
5014 &genX(vs_push_constants
), /* Before vs_state */
5015 &genX(tcs_push_constants
),
5016 &genX(tes_push_constants
),
5017 &genX(gs_push_constants
), /* Before gs_state */
5018 &genX(wm_push_constants
), /* Before wm_surfaces and constant_buffer */
5020 /* Surface state setup. Must come before the VS/WM unit. The binding
5021 * table upload must be last.
5023 &brw_vs_pull_constants
,
5024 &brw_vs_ubo_surfaces
,
5025 &brw_vs_abo_surfaces
,
5026 &brw_tcs_pull_constants
,
5027 &brw_tcs_ubo_surfaces
,
5028 &brw_tcs_abo_surfaces
,
5029 &brw_tes_pull_constants
,
5030 &brw_tes_ubo_surfaces
,
5031 &brw_tes_abo_surfaces
,
5032 &brw_gs_pull_constants
,
5033 &brw_gs_ubo_surfaces
,
5034 &brw_gs_abo_surfaces
,
5035 &brw_wm_pull_constants
,
5036 &brw_wm_ubo_surfaces
,
5037 &brw_wm_abo_surfaces
,
5038 &gen6_renderbuffer_surfaces
,
5039 &brw_renderbuffer_read_surfaces
,
5040 &brw_texture_surfaces
,
5041 &brw_vs_binding_table
,
5042 &brw_tcs_binding_table
,
5043 &brw_tes_binding_table
,
5044 &brw_gs_binding_table
,
5045 &brw_wm_binding_table
,
5049 &genX(tcs_samplers
),
5050 &genX(tes_samplers
),
5052 &genX(multisample_state
),
5066 &genX(scissor_state
),
5070 &genX(polygon_stipple
),
5071 &genX(polygon_stipple_offset
),
5073 &genX(line_stipple
),
5075 &genX(drawing_rect
),
5077 &brw_indices
, /* must come before brw_vertices */
5078 &genX(index_buffer
),
5086 static const struct brw_tracked_state
*render_atoms
[] =
5089 &genX(sf_clip_viewport
),
5092 &gen7_push_constant_space
,
5095 &genX(color_calc_state
),
5097 &brw_vs_image_surfaces
, /* Before vs push/pull constants and binding table */
5098 &brw_tcs_image_surfaces
, /* Before tcs push/pull constants and binding table */
5099 &brw_tes_image_surfaces
, /* Before tes push/pull constants and binding table */
5100 &brw_gs_image_surfaces
, /* Before gs push/pull constants and binding table */
5101 &brw_wm_image_surfaces
, /* Before wm push/pull constants and binding table */
5103 &genX(vs_push_constants
), /* Before vs_state */
5104 &genX(tcs_push_constants
),
5105 &genX(tes_push_constants
),
5106 &genX(gs_push_constants
), /* Before gs_state */
5107 &genX(wm_push_constants
), /* Before wm_surfaces and constant_buffer */
5109 /* Surface state setup. Must come before the VS/WM unit. The binding
5110 * table upload must be last.
5112 &brw_vs_pull_constants
,
5113 &brw_vs_ubo_surfaces
,
5114 &brw_vs_abo_surfaces
,
5115 &brw_tcs_pull_constants
,
5116 &brw_tcs_ubo_surfaces
,
5117 &brw_tcs_abo_surfaces
,
5118 &brw_tes_pull_constants
,
5119 &brw_tes_ubo_surfaces
,
5120 &brw_tes_abo_surfaces
,
5121 &brw_gs_pull_constants
,
5122 &brw_gs_ubo_surfaces
,
5123 &brw_gs_abo_surfaces
,
5124 &brw_wm_pull_constants
,
5125 &brw_wm_ubo_surfaces
,
5126 &brw_wm_abo_surfaces
,
5127 &gen6_renderbuffer_surfaces
,
5128 &brw_renderbuffer_read_surfaces
,
5129 &brw_texture_surfaces
,
5130 &brw_vs_binding_table
,
5131 &brw_tcs_binding_table
,
5132 &brw_tes_binding_table
,
5133 &brw_gs_binding_table
,
5134 &brw_wm_binding_table
,
5138 &genX(tcs_samplers
),
5139 &genX(tes_samplers
),
5141 &genX(multisample_state
),
5150 &genX(raster_state
),
5156 &genX(depth_stencil_state
),
5159 &genX(scissor_state
),
5163 &genX(polygon_stipple
),
5164 &genX(polygon_stipple_offset
),
5166 &genX(line_stipple
),
5168 &genX(drawing_rect
),
5173 &genX(index_buffer
),
5181 STATIC_ASSERT(ARRAY_SIZE(render_atoms
) <= ARRAY_SIZE(brw
->render_atoms
));
5182 brw_copy_pipeline_atoms(brw
, BRW_RENDER_PIPELINE
,
5183 render_atoms
, ARRAY_SIZE(render_atoms
));
5186 static const struct brw_tracked_state
*compute_atoms
[] =
5189 &brw_cs_image_surfaces
,
5190 &gen7_cs_push_constants
,
5191 &brw_cs_pull_constants
,
5192 &brw_cs_ubo_surfaces
,
5193 &brw_cs_abo_surfaces
,
5194 &brw_cs_texture_surfaces
,
5195 &brw_cs_work_groups_surface
,
5200 STATIC_ASSERT(ARRAY_SIZE(compute_atoms
) <= ARRAY_SIZE(brw
->compute_atoms
));
5201 brw_copy_pipeline_atoms(brw
, BRW_COMPUTE_PIPELINE
,
5202 compute_atoms
, ARRAY_SIZE(compute_atoms
));
5204 brw
->vtbl
.emit_mi_report_perf_count
= genX(emit_mi_report_perf_count
);