2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "dev/gen_device_info.h"
27 #include "common/gen_sample_positions.h"
28 #include "genxml/gen_macros.h"
30 #include "main/bufferobj.h"
31 #include "main/context.h"
32 #include "main/enums.h"
33 #include "main/macros.h"
34 #include "main/state.h"
36 #include "brw_context.h"
38 #include "brw_multisample_state.h"
39 #include "brw_state.h"
43 #include "intel_batchbuffer.h"
44 #include "intel_buffer_objects.h"
45 #include "intel_fbo.h"
47 #include "main/enums.h"
48 #include "main/fbobject.h"
49 #include "main/framebuffer.h"
50 #include "main/glformats.h"
51 #include "main/samplerobj.h"
52 #include "main/shaderapi.h"
53 #include "main/stencil.h"
54 #include "main/transformfeedback.h"
55 #include "main/varray.h"
56 #include "main/viewport.h"
57 #include "util/half_float.h"
60 emit_dwords(struct brw_context
*brw
, unsigned n
)
62 intel_batchbuffer_begin(brw
, n
);
63 uint32_t *map
= brw
->batch
.map_next
;
64 brw
->batch
.map_next
+= n
;
65 intel_batchbuffer_advance(brw
);
75 #define __gen_address_type struct brw_address
76 #define __gen_user_data struct brw_context
79 __gen_combine_address(struct brw_context
*brw
, void *location
,
80 struct brw_address address
, uint32_t delta
)
82 struct intel_batchbuffer
*batch
= &brw
->batch
;
85 if (address
.bo
== NULL
) {
86 return address
.offset
+ delta
;
88 if (GEN_GEN
< 6 && brw_ptr_in_state_buffer(batch
, location
)) {
89 offset
= (char *) location
- (char *) brw
->batch
.state
.map
;
90 return brw_state_reloc(batch
, offset
, address
.bo
,
91 address
.offset
+ delta
,
95 assert(!brw_ptr_in_state_buffer(batch
, location
));
97 offset
= (char *) location
- (char *) brw
->batch
.batch
.map
;
98 return brw_batch_reloc(batch
, offset
, address
.bo
,
99 address
.offset
+ delta
,
100 address
.reloc_flags
);
104 UNUSED
static struct brw_address
105 rw_bo(struct brw_bo
*bo
, uint32_t offset
)
107 return (struct brw_address
) {
110 .reloc_flags
= RELOC_WRITE
,
114 static struct brw_address
115 ro_bo(struct brw_bo
*bo
, uint32_t offset
)
117 return (struct brw_address
) {
123 static struct brw_address
124 rw_32_bo(struct brw_bo
*bo
, uint32_t offset
)
126 return (struct brw_address
) {
129 .reloc_flags
= RELOC_WRITE
| RELOC_32BIT
,
133 static struct brw_address
134 ro_32_bo(struct brw_bo
*bo
, uint32_t offset
)
136 return (struct brw_address
) {
139 .reloc_flags
= RELOC_32BIT
,
143 UNUSED
static struct brw_address
144 ggtt_bo(struct brw_bo
*bo
, uint32_t offset
)
146 return (struct brw_address
) {
149 .reloc_flags
= RELOC_WRITE
| RELOC_NEEDS_GGTT
,
154 static struct brw_address
155 KSP(struct brw_context
*brw
, uint32_t offset
)
157 return ro_bo(brw
->cache
.bo
, offset
);
161 KSP(UNUSED
struct brw_context
*brw
, uint32_t offset
)
167 #include "genxml/genX_pack.h"
169 #define _brw_cmd_length(cmd) cmd ## _length
170 #define _brw_cmd_length_bias(cmd) cmd ## _length_bias
171 #define _brw_cmd_header(cmd) cmd ## _header
172 #define _brw_cmd_pack(cmd) cmd ## _pack
174 #define brw_batch_emit(brw, cmd, name) \
175 for (struct cmd name = { _brw_cmd_header(cmd) }, \
176 *_dst = emit_dwords(brw, _brw_cmd_length(cmd)); \
177 __builtin_expect(_dst != NULL, 1); \
178 _brw_cmd_pack(cmd)(brw, (void *)_dst, &name), \
181 #define brw_batch_emitn(brw, cmd, n, ...) ({ \
182 uint32_t *_dw = emit_dwords(brw, n); \
183 struct cmd template = { \
184 _brw_cmd_header(cmd), \
185 .DWordLength = n - _brw_cmd_length_bias(cmd), \
188 _brw_cmd_pack(cmd)(brw, _dw, &template); \
189 _dw + 1; /* Array starts at dw[1] */ \
192 #define brw_state_emit(brw, cmd, align, offset, name) \
193 for (struct cmd name = {}, \
194 *_dst = brw_state_batch(brw, _brw_cmd_length(cmd) * 4, \
196 __builtin_expect(_dst != NULL, 1); \
197 _brw_cmd_pack(cmd)(brw, (void *)_dst, &name), \
201 * Polygon stipple packet
204 genX(upload_polygon_stipple
)(struct brw_context
*brw
)
206 struct gl_context
*ctx
= &brw
->ctx
;
209 if (!ctx
->Polygon
.StippleFlag
)
212 brw_batch_emit(brw
, GENX(3DSTATE_POLY_STIPPLE_PATTERN
), poly
) {
213 /* Polygon stipple is provided in OpenGL order, i.e. bottom
214 * row first. If we're rendering to a window (i.e. the
215 * default frame buffer object, 0), then we need to invert
216 * it to match our pixel layout. But if we're rendering
217 * to a FBO (i.e. any named frame buffer object), we *don't*
218 * need to invert - we already match the layout.
220 if (ctx
->DrawBuffer
->FlipY
) {
221 for (unsigned i
= 0; i
< 32; i
++)
222 poly
.PatternRow
[i
] = ctx
->PolygonStipple
[31 - i
]; /* invert */
224 for (unsigned i
= 0; i
< 32; i
++)
225 poly
.PatternRow
[i
] = ctx
->PolygonStipple
[i
];
230 static const struct brw_tracked_state
genX(polygon_stipple
) = {
232 .mesa
= _NEW_POLYGON
|
234 .brw
= BRW_NEW_CONTEXT
,
236 .emit
= genX(upload_polygon_stipple
),
240 * Polygon stipple offset packet
243 genX(upload_polygon_stipple_offset
)(struct brw_context
*brw
)
245 struct gl_context
*ctx
= &brw
->ctx
;
248 if (!ctx
->Polygon
.StippleFlag
)
251 brw_batch_emit(brw
, GENX(3DSTATE_POLY_STIPPLE_OFFSET
), poly
) {
254 * If we're drawing to a system window we have to invert the Y axis
255 * in order to match the OpenGL pixel coordinate system, and our
256 * offset must be matched to the window position. If we're drawing
257 * to a user-created FBO then our native pixel coordinate system
258 * works just fine, and there's no window system to worry about.
260 if (ctx
->DrawBuffer
->FlipY
) {
261 poly
.PolygonStippleYOffset
=
262 (32 - (_mesa_geometric_height(ctx
->DrawBuffer
) & 31)) & 31;
267 static const struct brw_tracked_state
genX(polygon_stipple_offset
) = {
269 .mesa
= _NEW_BUFFERS
|
271 .brw
= BRW_NEW_CONTEXT
,
273 .emit
= genX(upload_polygon_stipple_offset
),
277 * Line stipple packet
280 genX(upload_line_stipple
)(struct brw_context
*brw
)
282 struct gl_context
*ctx
= &brw
->ctx
;
284 if (!ctx
->Line
.StippleFlag
)
287 brw_batch_emit(brw
, GENX(3DSTATE_LINE_STIPPLE
), line
) {
288 line
.LineStipplePattern
= ctx
->Line
.StipplePattern
;
290 line
.LineStippleInverseRepeatCount
= 1.0f
/ ctx
->Line
.StippleFactor
;
291 line
.LineStippleRepeatCount
= ctx
->Line
.StippleFactor
;
295 static const struct brw_tracked_state
genX(line_stipple
) = {
298 .brw
= BRW_NEW_CONTEXT
,
300 .emit
= genX(upload_line_stipple
),
303 /* Constant single cliprect for framebuffer object or DRI2 drawing */
305 genX(upload_drawing_rect
)(struct brw_context
*brw
)
307 struct gl_context
*ctx
= &brw
->ctx
;
308 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
309 const unsigned int fb_width
= _mesa_geometric_width(fb
);
310 const unsigned int fb_height
= _mesa_geometric_height(fb
);
312 brw_batch_emit(brw
, GENX(3DSTATE_DRAWING_RECTANGLE
), rect
) {
313 rect
.ClippedDrawingRectangleXMax
= fb_width
- 1;
314 rect
.ClippedDrawingRectangleYMax
= fb_height
- 1;
318 static const struct brw_tracked_state
genX(drawing_rect
) = {
320 .mesa
= _NEW_BUFFERS
,
321 .brw
= BRW_NEW_BLORP
|
324 .emit
= genX(upload_drawing_rect
),
328 genX(emit_vertex_buffer_state
)(struct brw_context
*brw
,
332 unsigned start_offset
,
333 MAYBE_UNUSED
unsigned end_offset
,
335 MAYBE_UNUSED
unsigned step_rate
)
337 struct GENX(VERTEX_BUFFER_STATE
) buf_state
= {
338 .VertexBufferIndex
= buffer_nr
,
339 .BufferPitch
= stride
,
341 /* The VF cache designers apparently cut corners, and made the cache
342 * only consider the bottom 32 bits of memory addresses. If you happen
343 * to have two vertex buffers which get placed exactly 4 GiB apart and
344 * use them in back-to-back draw calls, you can get collisions. To work
345 * around this problem, we restrict vertex buffers to the low 32 bits of
348 .BufferStartingAddress
= ro_32_bo(bo
, start_offset
),
350 .BufferSize
= end_offset
- start_offset
,
354 .AddressModifyEnable
= true,
358 .BufferAccessType
= step_rate
? INSTANCEDATA
: VERTEXDATA
,
359 .InstanceDataStepRate
= step_rate
,
361 .EndAddress
= ro_bo(bo
, end_offset
- 1),
366 .VertexBufferMOCS
= ICL_MOCS_WB
,
368 .VertexBufferMOCS
= CNL_MOCS_WB
,
370 .VertexBufferMOCS
= SKL_MOCS_WB
,
372 .VertexBufferMOCS
= BDW_MOCS_WB
,
374 .VertexBufferMOCS
= GEN7_MOCS_L3
,
378 GENX(VERTEX_BUFFER_STATE_pack
)(brw
, dw
, &buf_state
);
379 return dw
+ GENX(VERTEX_BUFFER_STATE_length
);
383 is_passthru_format(uint32_t format
)
386 case ISL_FORMAT_R64_PASSTHRU
:
387 case ISL_FORMAT_R64G64_PASSTHRU
:
388 case ISL_FORMAT_R64G64B64_PASSTHRU
:
389 case ISL_FORMAT_R64G64B64A64_PASSTHRU
:
397 uploads_needed(uint32_t format
,
400 if (!is_passthru_format(format
))
407 case ISL_FORMAT_R64_PASSTHRU
:
408 case ISL_FORMAT_R64G64_PASSTHRU
:
410 case ISL_FORMAT_R64G64B64_PASSTHRU
:
411 case ISL_FORMAT_R64G64B64A64_PASSTHRU
:
414 unreachable("not reached");
419 * Returns the format that we are finally going to use when upload a vertex
420 * element. It will only change if we are using *64*PASSTHRU formats, as for
421 * gen < 8 they need to be splitted on two *32*FLOAT formats.
423 * @upload points in which upload we are. Valid values are [0,1]
426 downsize_format_if_needed(uint32_t format
,
429 assert(upload
== 0 || upload
== 1);
431 if (!is_passthru_format(format
))
434 /* ISL_FORMAT_R64_PASSTHRU and ISL_FORMAT_R64G64_PASSTHRU with an upload ==
435 * 1 means that we have been forced to do 2 uploads for a size <= 2. This
436 * happens with gen < 8 and dvec3 or dvec4 vertex shader input
437 * variables. In those cases, we return ISL_FORMAT_R32_FLOAT as a way of
438 * flagging that we want to fill with zeroes this second forced upload.
441 case ISL_FORMAT_R64_PASSTHRU
:
442 return upload
== 0 ? ISL_FORMAT_R32G32_FLOAT
443 : ISL_FORMAT_R32_FLOAT
;
444 case ISL_FORMAT_R64G64_PASSTHRU
:
445 return upload
== 0 ? ISL_FORMAT_R32G32B32A32_FLOAT
446 : ISL_FORMAT_R32_FLOAT
;
447 case ISL_FORMAT_R64G64B64_PASSTHRU
:
448 return upload
== 0 ? ISL_FORMAT_R32G32B32A32_FLOAT
449 : ISL_FORMAT_R32G32_FLOAT
;
450 case ISL_FORMAT_R64G64B64A64_PASSTHRU
:
451 return ISL_FORMAT_R32G32B32A32_FLOAT
;
453 unreachable("not reached");
458 * Returns the number of componentes associated with a format that is used on
459 * a 64 to 32 format split. See downsize_format()
462 upload_format_size(uint32_t upload_format
)
464 switch (upload_format
) {
465 case ISL_FORMAT_R32_FLOAT
:
467 /* downsized_format has returned this one in order to flag that we are
468 * performing a second upload which we want to have filled with
469 * zeroes. This happens with gen < 8, a size <= 2, and dvec3 or dvec4
470 * vertex shader input variables.
474 case ISL_FORMAT_R32G32_FLOAT
:
476 case ISL_FORMAT_R32G32B32A32_FLOAT
:
479 unreachable("not reached");
483 static UNUSED
uint16_t
484 pinned_bo_high_bits(struct brw_bo
*bo
)
486 return (bo
->kflags
& EXEC_OBJECT_PINNED
) ? bo
->gtt_offset
>> 32ull : 0;
489 /* The VF cache designers apparently cut corners, and made the cache key's
490 * <VertexBufferIndex, Memory Address> tuple only consider the bottom 32 bits
491 * of the address. If you happen to have two vertex buffers which get placed
492 * exactly 4 GiB apart and use them in back-to-back draw calls, you can get
493 * collisions. (These collisions can happen within a single batch.)
495 * In the soft-pin world, we'd like to assign addresses up front, and never
496 * move buffers. So, we need to do a VF cache invalidate if the buffer for
497 * a particular VB slot has different [48:32] address bits than the last one.
499 * In the relocation world, we have no idea what the addresses will be, so
500 * we can't apply this workaround. Instead, we tell the kernel to move it
501 * to the low 4GB regardless.
504 vf_invalidate_for_vb_48bit_transitions(struct brw_context
*brw
)
507 bool need_invalidate
= false;
510 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
511 uint16_t high_bits
= pinned_bo_high_bits(brw
->vb
.buffers
[i
].bo
);
513 if (high_bits
!= brw
->vb
.last_bo_high_bits
[i
]) {
514 need_invalidate
= true;
515 brw
->vb
.last_bo_high_bits
[i
] = high_bits
;
519 /* Don't bother with draw parameter buffers - those are generated by
520 * the driver so we can select a consistent memory zone.
523 if (need_invalidate
) {
524 brw_emit_pipe_control_flush(brw
, PIPE_CONTROL_VF_CACHE_INVALIDATE
);
530 vf_invalidate_for_ib_48bit_transition(struct brw_context
*brw
)
533 uint16_t high_bits
= pinned_bo_high_bits(brw
->ib
.bo
);
535 if (high_bits
!= brw
->ib
.last_bo_high_bits
) {
536 brw_emit_pipe_control_flush(brw
, PIPE_CONTROL_VF_CACHE_INVALIDATE
);
537 brw
->ib
.last_bo_high_bits
= high_bits
;
543 genX(emit_vertices
)(struct brw_context
*brw
)
545 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
548 brw_prepare_vertices(brw
);
549 brw_prepare_shader_draw_parameters(brw
);
552 brw_emit_query_begin(brw
);
555 const struct brw_vs_prog_data
*vs_prog_data
=
556 brw_vs_prog_data(brw
->vs
.base
.prog_data
);
559 struct gl_context
*ctx
= &brw
->ctx
;
560 const bool uses_edge_flag
= (ctx
->Polygon
.FrontMode
!= GL_FILL
||
561 ctx
->Polygon
.BackMode
!= GL_FILL
);
563 if (vs_prog_data
->uses_vertexid
|| vs_prog_data
->uses_instanceid
) {
564 unsigned vue
= brw
->vb
.nr_enabled
;
566 /* The element for the edge flags must always be last, so we have to
567 * insert the SGVS before it in that case.
569 if (uses_edge_flag
) {
575 "Trying to insert VID/IID past 33rd vertex element, "
576 "need to reorder the vertex attrbutes.");
578 brw_batch_emit(brw
, GENX(3DSTATE_VF_SGVS
), vfs
) {
579 if (vs_prog_data
->uses_vertexid
) {
580 vfs
.VertexIDEnable
= true;
581 vfs
.VertexIDComponentNumber
= 2;
582 vfs
.VertexIDElementOffset
= vue
;
585 if (vs_prog_data
->uses_instanceid
) {
586 vfs
.InstanceIDEnable
= true;
587 vfs
.InstanceIDComponentNumber
= 3;
588 vfs
.InstanceIDElementOffset
= vue
;
592 brw_batch_emit(brw
, GENX(3DSTATE_VF_INSTANCING
), vfi
) {
593 vfi
.InstancingEnable
= true;
594 vfi
.VertexElementIndex
= vue
;
597 brw_batch_emit(brw
, GENX(3DSTATE_VF_SGVS
), vfs
);
601 const bool uses_draw_params
=
602 vs_prog_data
->uses_firstvertex
||
603 vs_prog_data
->uses_baseinstance
;
605 const bool uses_derived_draw_params
=
606 vs_prog_data
->uses_drawid
||
607 vs_prog_data
->uses_is_indexed_draw
;
609 const bool needs_sgvs_element
= (uses_draw_params
||
610 vs_prog_data
->uses_instanceid
||
611 vs_prog_data
->uses_vertexid
);
613 unsigned nr_elements
=
614 brw
->vb
.nr_enabled
+ needs_sgvs_element
+ uses_derived_draw_params
;
617 /* If any of the formats of vb.enabled needs more that one upload, we need
618 * to add it to nr_elements
620 for (unsigned i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
621 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
622 const struct gl_array_attributes
*glattrib
= input
->glattrib
;
623 uint32_t format
= brw_get_vertex_surface_type(brw
, glattrib
);
625 if (uploads_needed(format
, input
->is_dual_slot
) > 1)
630 /* If the VS doesn't read any inputs (calculating vertex position from
631 * a state variable for some reason, for example), emit a single pad
632 * VERTEX_ELEMENT struct and bail.
634 * The stale VB state stays in place, but they don't do anything unless
635 * a VE loads from them.
637 if (nr_elements
== 0) {
638 dw
= brw_batch_emitn(brw
, GENX(3DSTATE_VERTEX_ELEMENTS
),
639 1 + GENX(VERTEX_ELEMENT_STATE_length
));
640 struct GENX(VERTEX_ELEMENT_STATE
) elem
= {
642 .SourceElementFormat
= ISL_FORMAT_R32G32B32A32_FLOAT
,
643 .Component0Control
= VFCOMP_STORE_0
,
644 .Component1Control
= VFCOMP_STORE_0
,
645 .Component2Control
= VFCOMP_STORE_0
,
646 .Component3Control
= VFCOMP_STORE_1_FP
,
648 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem
);
652 /* Now emit 3DSTATE_VERTEX_BUFFERS and 3DSTATE_VERTEX_ELEMENTS packets. */
653 const unsigned nr_buffers
= brw
->vb
.nr_buffers
+
654 uses_draw_params
+ uses_derived_draw_params
;
656 vf_invalidate_for_vb_48bit_transitions(brw
);
659 assert(nr_buffers
<= (GEN_GEN
>= 6 ? 33 : 17));
661 dw
= brw_batch_emitn(brw
, GENX(3DSTATE_VERTEX_BUFFERS
),
662 1 + GENX(VERTEX_BUFFER_STATE_length
) * nr_buffers
);
664 for (unsigned i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
665 const struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[i
];
666 /* Prior to Haswell and Bay Trail we have to use 4-component formats
667 * to fake 3-component ones. In particular, we do this for
668 * half-float and 8 and 16-bit integer formats. This means that the
669 * vertex element may poke over the end of the buffer by 2 bytes.
671 const unsigned padding
=
672 (GEN_GEN
<= 7 && !GEN_IS_HASWELL
&& !devinfo
->is_baytrail
) * 2;
673 const unsigned end
= buffer
->offset
+ buffer
->size
+ padding
;
674 dw
= genX(emit_vertex_buffer_state
)(brw
, dw
, i
, buffer
->bo
,
681 if (uses_draw_params
) {
682 dw
= genX(emit_vertex_buffer_state
)(brw
, dw
, brw
->vb
.nr_buffers
,
683 brw
->draw
.draw_params_bo
,
684 brw
->draw
.draw_params_offset
,
685 brw
->draw
.draw_params_bo
->size
,
690 if (uses_derived_draw_params
) {
691 dw
= genX(emit_vertex_buffer_state
)(brw
, dw
, brw
->vb
.nr_buffers
+ 1,
692 brw
->draw
.derived_draw_params_bo
,
693 brw
->draw
.derived_draw_params_offset
,
694 brw
->draw
.derived_draw_params_bo
->size
,
700 /* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS,
701 * presumably for VertexID/InstanceID.
704 assert(nr_elements
<= 34);
705 const struct brw_vertex_element
*gen6_edgeflag_input
= NULL
;
707 assert(nr_elements
<= 18);
710 dw
= brw_batch_emitn(brw
, GENX(3DSTATE_VERTEX_ELEMENTS
),
711 1 + GENX(VERTEX_ELEMENT_STATE_length
) * nr_elements
);
713 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
714 const struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
715 const struct gl_array_attributes
*glattrib
= input
->glattrib
;
716 uint32_t format
= brw_get_vertex_surface_type(brw
, glattrib
);
717 uint32_t comp0
= VFCOMP_STORE_SRC
;
718 uint32_t comp1
= VFCOMP_STORE_SRC
;
719 uint32_t comp2
= VFCOMP_STORE_SRC
;
720 uint32_t comp3
= VFCOMP_STORE_SRC
;
721 const unsigned num_uploads
= GEN_GEN
< 8 ?
722 uploads_needed(format
, input
->is_dual_slot
) : 1;
725 /* From the BDW PRM, Volume 2d, page 588 (VERTEX_ELEMENT_STATE):
726 * "Any SourceElementFormat of *64*_PASSTHRU cannot be used with an
727 * element which has edge flag enabled."
729 assert(!(is_passthru_format(format
) && uses_edge_flag
));
732 /* The gen4 driver expects edgeflag to come in as a float, and passes
733 * that float on to the tests in the clipper. Mesa's current vertex
734 * attribute value for EdgeFlag is stored as a float, which works out.
735 * glEdgeFlagPointer, on the other hand, gives us an unnormalized
736 * integer ubyte. Just rewrite that to convert to a float.
738 * Gen6+ passes edgeflag as sideband along with the vertex, instead
739 * of in the VUE. We have to upload it sideband as the last vertex
740 * element according to the B-Spec.
743 if (input
== &brw
->vb
.inputs
[VERT_ATTRIB_EDGEFLAG
]) {
744 gen6_edgeflag_input
= input
;
749 for (unsigned c
= 0; c
< num_uploads
; c
++) {
750 const uint32_t upload_format
= GEN_GEN
>= 8 ? format
:
751 downsize_format_if_needed(format
, c
);
752 /* If we need more that one upload, the offset stride would be 128
753 * bits (16 bytes), as for previous uploads we are using the full
755 const unsigned offset
= input
->offset
+ c
* 16;
757 const struct gl_array_attributes
*glattrib
= input
->glattrib
;
758 const int size
= (GEN_GEN
< 8 && is_passthru_format(format
)) ?
759 upload_format_size(upload_format
) : glattrib
->Size
;
762 case 0: comp0
= VFCOMP_STORE_0
;
763 case 1: comp1
= VFCOMP_STORE_0
;
764 case 2: comp2
= VFCOMP_STORE_0
;
766 if (GEN_GEN
>= 8 && glattrib
->Doubles
) {
767 comp3
= VFCOMP_STORE_0
;
768 } else if (glattrib
->Integer
) {
769 comp3
= VFCOMP_STORE_1_INT
;
771 comp3
= VFCOMP_STORE_1_FP
;
778 /* From the BDW PRM, Volume 2d, page 586 (VERTEX_ELEMENT_STATE):
780 * "When SourceElementFormat is set to one of the *64*_PASSTHRU
781 * formats, 64-bit components are stored in the URB without any
782 * conversion. In this case, vertex elements must be written as 128
783 * or 256 bits, with VFCOMP_STORE_0 being used to pad the output as
784 * required. E.g., if R64_PASSTHRU is used to copy a 64-bit Red
785 * component into the URB, Component 1 must be specified as
786 * VFCOMP_STORE_0 (with Components 2,3 set to VFCOMP_NOSTORE) in
787 * order to output a 128-bit vertex element, or Components 1-3 must
788 * be specified as VFCOMP_STORE_0 in order to output a 256-bit vertex
789 * element. Likewise, use of R64G64B64_PASSTHRU requires Component 3
790 * to be specified as VFCOMP_STORE_0 in order to output a 256-bit
793 if (glattrib
->Doubles
&& !input
->is_dual_slot
) {
794 /* Store vertex elements which correspond to double and dvec2 vertex
795 * shader inputs as 128-bit vertex elements, instead of 256-bits.
797 comp2
= VFCOMP_NOSTORE
;
798 comp3
= VFCOMP_NOSTORE
;
802 struct GENX(VERTEX_ELEMENT_STATE
) elem_state
= {
803 .VertexBufferIndex
= input
->buffer
,
805 .SourceElementFormat
= upload_format
,
806 .SourceElementOffset
= offset
,
807 .Component0Control
= comp0
,
808 .Component1Control
= comp1
,
809 .Component2Control
= comp2
,
810 .Component3Control
= comp3
,
812 .DestinationElementOffset
= i
* 4,
816 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem_state
);
817 dw
+= GENX(VERTEX_ELEMENT_STATE_length
);
821 if (needs_sgvs_element
) {
822 struct GENX(VERTEX_ELEMENT_STATE
) elem_state
= {
824 .Component0Control
= VFCOMP_STORE_0
,
825 .Component1Control
= VFCOMP_STORE_0
,
826 .Component2Control
= VFCOMP_STORE_0
,
827 .Component3Control
= VFCOMP_STORE_0
,
829 .DestinationElementOffset
= i
* 4,
834 if (uses_draw_params
) {
835 elem_state
.VertexBufferIndex
= brw
->vb
.nr_buffers
;
836 elem_state
.SourceElementFormat
= ISL_FORMAT_R32G32_UINT
;
837 elem_state
.Component0Control
= VFCOMP_STORE_SRC
;
838 elem_state
.Component1Control
= VFCOMP_STORE_SRC
;
841 elem_state
.VertexBufferIndex
= brw
->vb
.nr_buffers
;
842 elem_state
.SourceElementFormat
= ISL_FORMAT_R32G32_UINT
;
843 if (uses_draw_params
) {
844 elem_state
.Component0Control
= VFCOMP_STORE_SRC
;
845 elem_state
.Component1Control
= VFCOMP_STORE_SRC
;
848 if (vs_prog_data
->uses_vertexid
)
849 elem_state
.Component2Control
= VFCOMP_STORE_VID
;
851 if (vs_prog_data
->uses_instanceid
)
852 elem_state
.Component3Control
= VFCOMP_STORE_IID
;
855 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem_state
);
856 dw
+= GENX(VERTEX_ELEMENT_STATE_length
);
859 if (uses_derived_draw_params
) {
860 struct GENX(VERTEX_ELEMENT_STATE
) elem_state
= {
862 .VertexBufferIndex
= brw
->vb
.nr_buffers
+ 1,
863 .SourceElementFormat
= ISL_FORMAT_R32G32_UINT
,
864 .Component0Control
= VFCOMP_STORE_SRC
,
865 .Component1Control
= VFCOMP_STORE_SRC
,
866 .Component2Control
= VFCOMP_STORE_0
,
867 .Component3Control
= VFCOMP_STORE_0
,
869 .DestinationElementOffset
= i
* 4,
873 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem_state
);
874 dw
+= GENX(VERTEX_ELEMENT_STATE_length
);
878 if (gen6_edgeflag_input
) {
879 const struct gl_array_attributes
*glattrib
= gen6_edgeflag_input
->glattrib
;
880 const uint32_t format
= brw_get_vertex_surface_type(brw
, glattrib
);
882 struct GENX(VERTEX_ELEMENT_STATE
) elem_state
= {
884 .VertexBufferIndex
= gen6_edgeflag_input
->buffer
,
885 .EdgeFlagEnable
= true,
886 .SourceElementFormat
= format
,
887 .SourceElementOffset
= gen6_edgeflag_input
->offset
,
888 .Component0Control
= VFCOMP_STORE_SRC
,
889 .Component1Control
= VFCOMP_STORE_0
,
890 .Component2Control
= VFCOMP_STORE_0
,
891 .Component3Control
= VFCOMP_STORE_0
,
894 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem_state
);
895 dw
+= GENX(VERTEX_ELEMENT_STATE_length
);
900 for (unsigned i
= 0, j
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
901 const struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
902 const struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[input
->buffer
];
903 unsigned element_index
;
905 /* The edge flag element is reordered to be the last one in the code
906 * above so we need to compensate for that in the element indices used
909 if (input
== gen6_edgeflag_input
)
910 element_index
= nr_elements
- 1;
914 brw_batch_emit(brw
, GENX(3DSTATE_VF_INSTANCING
), vfi
) {
915 vfi
.VertexElementIndex
= element_index
;
916 vfi
.InstancingEnable
= buffer
->step_rate
!= 0;
917 vfi
.InstanceDataStepRate
= buffer
->step_rate
;
921 if (vs_prog_data
->uses_drawid
) {
922 const unsigned element
= brw
->vb
.nr_enabled
+ needs_sgvs_element
;
924 brw_batch_emit(brw
, GENX(3DSTATE_VF_INSTANCING
), vfi
) {
925 vfi
.VertexElementIndex
= element
;
931 static const struct brw_tracked_state
genX(vertices
) = {
933 .mesa
= _NEW_POLYGON
,
934 .brw
= BRW_NEW_BATCH
|
937 BRW_NEW_VS_PROG_DATA
,
939 .emit
= genX(emit_vertices
),
943 genX(emit_index_buffer
)(struct brw_context
*brw
)
945 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
947 if (index_buffer
== NULL
)
950 vf_invalidate_for_ib_48bit_transition(brw
);
952 brw_batch_emit(brw
, GENX(3DSTATE_INDEX_BUFFER
), ib
) {
953 #if GEN_GEN < 8 && !GEN_IS_HASWELL
954 ib
.CutIndexEnable
= brw
->prim_restart
.enable_cut_index
;
956 ib
.IndexFormat
= brw_get_index_type(index_buffer
->index_size
);
958 /* The VF cache designers apparently cut corners, and made the cache
959 * only consider the bottom 32 bits of memory addresses. If you happen
960 * to have two index buffers which get placed exactly 4 GiB apart and
961 * use them in back-to-back draw calls, you can get collisions. To work
962 * around this problem, we restrict index buffers to the low 32 bits of
965 ib
.BufferStartingAddress
= ro_32_bo(brw
->ib
.bo
, 0);
967 ib
.IndexBufferMOCS
= GEN_GEN
>= 9 ? SKL_MOCS_WB
: BDW_MOCS_WB
;
968 ib
.BufferSize
= brw
->ib
.size
;
970 ib
.BufferEndingAddress
= ro_bo(brw
->ib
.bo
, brw
->ib
.size
- 1);
975 static const struct brw_tracked_state
genX(index_buffer
) = {
978 .brw
= BRW_NEW_BATCH
|
980 BRW_NEW_INDEX_BUFFER
,
982 .emit
= genX(emit_index_buffer
),
985 #if GEN_IS_HASWELL || GEN_GEN >= 8
987 genX(upload_cut_index
)(struct brw_context
*brw
)
989 const struct gl_context
*ctx
= &brw
->ctx
;
991 brw_batch_emit(brw
, GENX(3DSTATE_VF
), vf
) {
992 if (ctx
->Array
._PrimitiveRestart
&& brw
->ib
.ib
) {
993 vf
.IndexedDrawCutIndexEnable
= true;
994 vf
.CutIndex
= _mesa_primitive_restart_index(ctx
, brw
->ib
.index_size
);
999 const struct brw_tracked_state
genX(cut_index
) = {
1001 .mesa
= _NEW_TRANSFORM
,
1002 .brw
= BRW_NEW_INDEX_BUFFER
,
1004 .emit
= genX(upload_cut_index
),
1010 * Determine the appropriate attribute override value to store into the
1011 * 3DSTATE_SF structure for a given fragment shader attribute. The attribute
1012 * override value contains two pieces of information: the location of the
1013 * attribute in the VUE (relative to urb_entry_read_offset, see below), and a
1014 * flag indicating whether to "swizzle" the attribute based on the direction
1015 * the triangle is facing.
1017 * If an attribute is "swizzled", then the given VUE location is used for
1018 * front-facing triangles, and the VUE location that immediately follows is
1019 * used for back-facing triangles. We use this to implement the mapping from
1020 * gl_FrontColor/gl_BackColor to gl_Color.
1022 * urb_entry_read_offset is the offset into the VUE at which the SF unit is
1023 * being instructed to begin reading attribute data. It can be set to a
1024 * nonzero value to prevent the SF unit from wasting time reading elements of
1025 * the VUE that are not needed by the fragment shader. It is measured in
1026 * 256-bit increments.
1029 genX(get_attr_override
)(struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) *attr
,
1030 const struct brw_vue_map
*vue_map
,
1031 int urb_entry_read_offset
, int fs_attr
,
1032 bool two_side_color
, uint32_t *max_source_attr
)
1034 /* Find the VUE slot for this attribute. */
1035 int slot
= vue_map
->varying_to_slot
[fs_attr
];
1037 /* Viewport and Layer are stored in the VUE header. We need to override
1038 * them to zero if earlier stages didn't write them, as GL requires that
1039 * they read back as zero when not explicitly set.
1041 if (fs_attr
== VARYING_SLOT_VIEWPORT
|| fs_attr
== VARYING_SLOT_LAYER
) {
1042 attr
->ComponentOverrideX
= true;
1043 attr
->ComponentOverrideW
= true;
1044 attr
->ConstantSource
= CONST_0000
;
1046 if (!(vue_map
->slots_valid
& VARYING_BIT_LAYER
))
1047 attr
->ComponentOverrideY
= true;
1048 if (!(vue_map
->slots_valid
& VARYING_BIT_VIEWPORT
))
1049 attr
->ComponentOverrideZ
= true;
1054 /* If there was only a back color written but not front, use back
1055 * as the color instead of undefined
1057 if (slot
== -1 && fs_attr
== VARYING_SLOT_COL0
)
1058 slot
= vue_map
->varying_to_slot
[VARYING_SLOT_BFC0
];
1059 if (slot
== -1 && fs_attr
== VARYING_SLOT_COL1
)
1060 slot
= vue_map
->varying_to_slot
[VARYING_SLOT_BFC1
];
1063 /* This attribute does not exist in the VUE--that means that the vertex
1064 * shader did not write to it. This means that either:
1066 * (a) This attribute is a texture coordinate, and it is going to be
1067 * replaced with point coordinates (as a consequence of a call to
1068 * glTexEnvi(GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE)), so the
1069 * hardware will ignore whatever attribute override we supply.
1071 * (b) This attribute is read by the fragment shader but not written by
1072 * the vertex shader, so its value is undefined. Therefore the
1073 * attribute override we supply doesn't matter.
1075 * (c) This attribute is gl_PrimitiveID, and it wasn't written by the
1076 * previous shader stage.
1078 * Note that we don't have to worry about the cases where the attribute
1079 * is gl_PointCoord or is undergoing point sprite coordinate
1080 * replacement, because in those cases, this function isn't called.
1082 * In case (c), we need to program the attribute overrides so that the
1083 * primitive ID will be stored in this slot. In every other case, the
1084 * attribute override we supply doesn't matter. So just go ahead and
1085 * program primitive ID in every case.
1087 attr
->ComponentOverrideW
= true;
1088 attr
->ComponentOverrideX
= true;
1089 attr
->ComponentOverrideY
= true;
1090 attr
->ComponentOverrideZ
= true;
1091 attr
->ConstantSource
= PRIM_ID
;
1095 /* Compute the location of the attribute relative to urb_entry_read_offset.
1096 * Each increment of urb_entry_read_offset represents a 256-bit value, so
1097 * it counts for two 128-bit VUE slots.
1099 int source_attr
= slot
- 2 * urb_entry_read_offset
;
1100 assert(source_attr
>= 0 && source_attr
< 32);
1102 /* If we are doing two-sided color, and the VUE slot following this one
1103 * represents a back-facing color, then we need to instruct the SF unit to
1104 * do back-facing swizzling.
1106 bool swizzling
= two_side_color
&&
1107 ((vue_map
->slot_to_varying
[slot
] == VARYING_SLOT_COL0
&&
1108 vue_map
->slot_to_varying
[slot
+1] == VARYING_SLOT_BFC0
) ||
1109 (vue_map
->slot_to_varying
[slot
] == VARYING_SLOT_COL1
&&
1110 vue_map
->slot_to_varying
[slot
+1] == VARYING_SLOT_BFC1
));
1112 /* Update max_source_attr. If swizzling, the SF will read this slot + 1. */
1113 if (*max_source_attr
< source_attr
+ swizzling
)
1114 *max_source_attr
= source_attr
+ swizzling
;
1116 attr
->SourceAttribute
= source_attr
;
1118 attr
->SwizzleSelect
= INPUTATTR_FACING
;
1123 genX(calculate_attr_overrides
)(const struct brw_context
*brw
,
1124 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) *attr_overrides
,
1125 uint32_t *point_sprite_enables
,
1126 uint32_t *urb_entry_read_length
,
1127 uint32_t *urb_entry_read_offset
)
1129 const struct gl_context
*ctx
= &brw
->ctx
;
1132 const struct gl_point_attrib
*point
= &ctx
->Point
;
1134 /* BRW_NEW_FRAGMENT_PROGRAM */
1135 const struct gl_program
*fp
= brw
->programs
[MESA_SHADER_FRAGMENT
];
1137 /* BRW_NEW_FS_PROG_DATA */
1138 const struct brw_wm_prog_data
*wm_prog_data
=
1139 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1140 uint32_t max_source_attr
= 0;
1142 *point_sprite_enables
= 0;
1145 brw_compute_first_urb_slot_required(fp
->info
.inputs_read
,
1146 &brw
->vue_map_geom_out
);
1148 /* Each URB offset packs two varying slots */
1149 assert(first_slot
% 2 == 0);
1150 *urb_entry_read_offset
= first_slot
/ 2;
1152 /* From the Ivybridge PRM, Vol 2 Part 1, 3DSTATE_SBE,
1153 * description of dw10 Point Sprite Texture Coordinate Enable:
1155 * "This field must be programmed to zero when non-point primitives
1158 * The SandyBridge PRM doesn't explicitly say that point sprite enables
1159 * must be programmed to zero when rendering non-point primitives, but
1160 * the IvyBridge PRM does, and if we don't, we get garbage.
1162 * This is not required on Haswell, as the hardware ignores this state
1163 * when drawing non-points -- although we do still need to be careful to
1164 * correctly set the attr overrides.
1167 * BRW_NEW_PRIMITIVE | BRW_NEW_GS_PROG_DATA | BRW_NEW_TES_PROG_DATA
1169 bool drawing_points
= brw_is_drawing_points(brw
);
1171 for (int attr
= 0; attr
< VARYING_SLOT_MAX
; attr
++) {
1172 int input_index
= wm_prog_data
->urb_setup
[attr
];
1174 if (input_index
< 0)
1178 bool point_sprite
= false;
1179 if (drawing_points
) {
1180 if (point
->PointSprite
&&
1181 (attr
>= VARYING_SLOT_TEX0
&& attr
<= VARYING_SLOT_TEX7
) &&
1182 (point
->CoordReplace
& (1u << (attr
- VARYING_SLOT_TEX0
)))) {
1183 point_sprite
= true;
1186 if (attr
== VARYING_SLOT_PNTC
)
1187 point_sprite
= true;
1190 *point_sprite_enables
|= (1 << input_index
);
1193 /* BRW_NEW_VUE_MAP_GEOM_OUT | _NEW_LIGHT | _NEW_PROGRAM */
1194 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) attribute
= { 0 };
1196 if (!point_sprite
) {
1197 genX(get_attr_override
)(&attribute
,
1198 &brw
->vue_map_geom_out
,
1199 *urb_entry_read_offset
, attr
,
1200 _mesa_vertex_program_two_side_enabled(ctx
),
1204 /* The hardware can only do the overrides on 16 overrides at a
1205 * time, and the other up to 16 have to be lined up so that the
1206 * input index = the output index. We'll need to do some
1207 * tweaking to make sure that's the case.
1209 if (input_index
< 16)
1210 attr_overrides
[input_index
] = attribute
;
1212 assert(attribute
.SourceAttribute
== input_index
);
1215 /* From the Sandy Bridge PRM, Volume 2, Part 1, documentation for
1216 * 3DSTATE_SF DWord 1 bits 15:11, "Vertex URB Entry Read Length":
1218 * "This field should be set to the minimum length required to read the
1219 * maximum source attribute. The maximum source attribute is indicated
1220 * by the maximum value of the enabled Attribute # Source Attribute if
1221 * Attribute Swizzle Enable is set, Number of Output Attributes-1 if
1222 * enable is not set.
1223 * read_length = ceiling((max_source_attr + 1) / 2)
1225 * [errata] Corruption/Hang possible if length programmed larger than
1228 * Similar text exists for Ivy Bridge.
1230 *urb_entry_read_length
= DIV_ROUND_UP(max_source_attr
+ 1, 2);
1234 /* ---------------------------------------------------------------------- */
1237 typedef struct GENX(3DSTATE_WM_DEPTH_STENCIL
) DEPTH_STENCIL_GENXML
;
1239 typedef struct GENX(DEPTH_STENCIL_STATE
) DEPTH_STENCIL_GENXML
;
1241 typedef struct GENX(COLOR_CALC_STATE
) DEPTH_STENCIL_GENXML
;
1245 set_depth_stencil_bits(struct brw_context
*brw
, DEPTH_STENCIL_GENXML
*ds
)
1247 struct gl_context
*ctx
= &brw
->ctx
;
1250 struct intel_renderbuffer
*depth_irb
=
1251 intel_get_renderbuffer(ctx
->DrawBuffer
, BUFFER_DEPTH
);
1254 struct gl_depthbuffer_attrib
*depth
= &ctx
->Depth
;
1257 struct gl_stencil_attrib
*stencil
= &ctx
->Stencil
;
1258 const int b
= stencil
->_BackFace
;
1260 if (depth
->Test
&& depth_irb
) {
1261 ds
->DepthTestEnable
= true;
1262 ds
->DepthBufferWriteEnable
= brw_depth_writes_enabled(brw
);
1263 ds
->DepthTestFunction
= intel_translate_compare_func(depth
->Func
);
1266 if (brw
->stencil_enabled
) {
1267 ds
->StencilTestEnable
= true;
1268 ds
->StencilWriteMask
= stencil
->WriteMask
[0] & 0xff;
1269 ds
->StencilTestMask
= stencil
->ValueMask
[0] & 0xff;
1271 ds
->StencilTestFunction
=
1272 intel_translate_compare_func(stencil
->Function
[0]);
1274 intel_translate_stencil_op(stencil
->FailFunc
[0]);
1275 ds
->StencilPassDepthPassOp
=
1276 intel_translate_stencil_op(stencil
->ZPassFunc
[0]);
1277 ds
->StencilPassDepthFailOp
=
1278 intel_translate_stencil_op(stencil
->ZFailFunc
[0]);
1280 ds
->StencilBufferWriteEnable
= brw
->stencil_write_enabled
;
1282 if (brw
->stencil_two_sided
) {
1283 ds
->DoubleSidedStencilEnable
= true;
1284 ds
->BackfaceStencilWriteMask
= stencil
->WriteMask
[b
] & 0xff;
1285 ds
->BackfaceStencilTestMask
= stencil
->ValueMask
[b
] & 0xff;
1287 ds
->BackfaceStencilTestFunction
=
1288 intel_translate_compare_func(stencil
->Function
[b
]);
1289 ds
->BackfaceStencilFailOp
=
1290 intel_translate_stencil_op(stencil
->FailFunc
[b
]);
1291 ds
->BackfaceStencilPassDepthPassOp
=
1292 intel_translate_stencil_op(stencil
->ZPassFunc
[b
]);
1293 ds
->BackfaceStencilPassDepthFailOp
=
1294 intel_translate_stencil_op(stencil
->ZFailFunc
[b
]);
1297 #if GEN_GEN <= 5 || GEN_GEN >= 9
1298 ds
->StencilReferenceValue
= _mesa_get_stencil_ref(ctx
, 0);
1299 ds
->BackfaceStencilReferenceValue
= _mesa_get_stencil_ref(ctx
, b
);
1306 genX(upload_depth_stencil_state
)(struct brw_context
*brw
)
1309 brw_batch_emit(brw
, GENX(3DSTATE_WM_DEPTH_STENCIL
), wmds
) {
1310 set_depth_stencil_bits(brw
, &wmds
);
1314 brw_state_emit(brw
, GENX(DEPTH_STENCIL_STATE
), 64, &ds_offset
, ds
) {
1315 set_depth_stencil_bits(brw
, &ds
);
1318 /* Now upload a pointer to the indirect state */
1320 brw_batch_emit(brw
, GENX(3DSTATE_CC_STATE_POINTERS
), ptr
) {
1321 ptr
.PointertoDEPTH_STENCIL_STATE
= ds_offset
;
1322 ptr
.DEPTH_STENCIL_STATEChange
= true;
1325 brw_batch_emit(brw
, GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS
), ptr
) {
1326 ptr
.PointertoDEPTH_STENCIL_STATE
= ds_offset
;
1332 static const struct brw_tracked_state
genX(depth_stencil_state
) = {
1334 .mesa
= _NEW_BUFFERS
|
1337 .brw
= BRW_NEW_BLORP
|
1338 (GEN_GEN
>= 8 ? BRW_NEW_CONTEXT
1340 BRW_NEW_STATE_BASE_ADDRESS
),
1342 .emit
= genX(upload_depth_stencil_state
),
1346 /* ---------------------------------------------------------------------- */
1351 genX(upload_clip_state
)(struct brw_context
*brw
)
1353 struct gl_context
*ctx
= &brw
->ctx
;
1355 ctx
->NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
1356 brw_state_emit(brw
, GENX(CLIP_STATE
), 32, &brw
->clip
.state_offset
, clip
) {
1357 clip
.KernelStartPointer
= KSP(brw
, brw
->clip
.prog_offset
);
1358 clip
.GRFRegisterCount
=
1359 DIV_ROUND_UP(brw
->clip
.prog_data
->total_grf
, 16) - 1;
1360 clip
.FloatingPointMode
= FLOATING_POINT_MODE_Alternate
;
1361 clip
.SingleProgramFlow
= true;
1362 clip
.VertexURBEntryReadLength
= brw
->clip
.prog_data
->urb_read_length
;
1363 clip
.ConstantURBEntryReadLength
= brw
->clip
.prog_data
->curb_read_length
;
1365 /* BRW_NEW_PUSH_CONSTANT_ALLOCATION */
1366 clip
.ConstantURBEntryReadOffset
= brw
->curbe
.clip_start
* 2;
1367 clip
.DispatchGRFStartRegisterForURBData
= 1;
1368 clip
.VertexURBEntryReadOffset
= 0;
1370 /* BRW_NEW_URB_FENCE */
1371 clip
.NumberofURBEntries
= brw
->urb
.nr_clip_entries
;
1372 clip
.URBEntryAllocationSize
= brw
->urb
.vsize
- 1;
1374 if (brw
->urb
.nr_clip_entries
>= 10) {
1375 /* Half of the URB entries go to each thread, and it has to be an
1378 assert(brw
->urb
.nr_clip_entries
% 2 == 0);
1380 /* Although up to 16 concurrent Clip threads are allowed on Ironlake,
1381 * only 2 threads can output VUEs at a time.
1383 clip
.MaximumNumberofThreads
= (GEN_GEN
== 5 ? 16 : 2) - 1;
1385 assert(brw
->urb
.nr_clip_entries
>= 5);
1386 clip
.MaximumNumberofThreads
= 1 - 1;
1389 clip
.VertexPositionSpace
= VPOS_NDCSPACE
;
1390 clip
.UserClipFlagsMustClipEnable
= true;
1391 clip
.GuardbandClipTestEnable
= true;
1393 clip
.ClipperViewportStatePointer
=
1394 ro_bo(brw
->batch
.state
.bo
, brw
->clip
.vp_offset
);
1396 clip
.ScreenSpaceViewportXMin
= -1;
1397 clip
.ScreenSpaceViewportXMax
= 1;
1398 clip
.ScreenSpaceViewportYMin
= -1;
1399 clip
.ScreenSpaceViewportYMax
= 1;
1401 clip
.ViewportXYClipTestEnable
= true;
1402 clip
.ViewportZClipTestEnable
= !(ctx
->Transform
.DepthClampNear
&&
1403 ctx
->Transform
.DepthClampFar
);
1405 /* _NEW_TRANSFORM */
1406 if (GEN_GEN
== 5 || GEN_IS_G4X
) {
1407 clip
.UserClipDistanceClipTestEnableBitmask
=
1408 ctx
->Transform
.ClipPlanesEnabled
;
1410 /* Up to 6 actual clip flags, plus the 7th for the negative RHW
1413 clip
.UserClipDistanceClipTestEnableBitmask
=
1414 (ctx
->Transform
.ClipPlanesEnabled
& 0x3f) | 0x40;
1417 if (ctx
->Transform
.ClipDepthMode
== GL_ZERO_TO_ONE
)
1418 clip
.APIMode
= APIMODE_D3D
;
1420 clip
.APIMode
= APIMODE_OGL
;
1422 clip
.GuardbandClipTestEnable
= true;
1424 clip
.ClipMode
= brw
->clip
.prog_data
->clip_mode
;
1427 clip
.NegativeWClipTestEnable
= true;
1432 const struct brw_tracked_state
genX(clip_state
) = {
1434 .mesa
= _NEW_TRANSFORM
|
1436 .brw
= BRW_NEW_BATCH
|
1438 BRW_NEW_CLIP_PROG_DATA
|
1439 BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
1440 BRW_NEW_PROGRAM_CACHE
|
1443 .emit
= genX(upload_clip_state
),
1449 genX(upload_clip_state
)(struct brw_context
*brw
)
1451 struct gl_context
*ctx
= &brw
->ctx
;
1454 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1456 /* BRW_NEW_FS_PROG_DATA */
1457 struct brw_wm_prog_data
*wm_prog_data
=
1458 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1460 brw_batch_emit(brw
, GENX(3DSTATE_CLIP
), clip
) {
1461 clip
.StatisticsEnable
= !brw
->meta_in_progress
;
1463 if (wm_prog_data
->barycentric_interp_modes
&
1464 BRW_BARYCENTRIC_NONPERSPECTIVE_BITS
)
1465 clip
.NonPerspectiveBarycentricEnable
= true;
1468 clip
.EarlyCullEnable
= true;
1472 clip
.FrontWinding
= brw
->polygon_front_bit
!= fb
->FlipY
;
1474 if (ctx
->Polygon
.CullFlag
) {
1475 switch (ctx
->Polygon
.CullFaceMode
) {
1477 clip
.CullMode
= CULLMODE_FRONT
;
1480 clip
.CullMode
= CULLMODE_BACK
;
1482 case GL_FRONT_AND_BACK
:
1483 clip
.CullMode
= CULLMODE_BOTH
;
1486 unreachable("Should not get here: invalid CullFlag");
1489 clip
.CullMode
= CULLMODE_NONE
;
1494 clip
.UserClipDistanceCullTestEnableBitmask
=
1495 brw_vue_prog_data(brw
->vs
.base
.prog_data
)->cull_distance_mask
;
1497 clip
.ViewportZClipTestEnable
= !(ctx
->Transform
.DepthClampNear
&&
1498 ctx
->Transform
.DepthClampFar
);
1502 if (ctx
->Light
.ProvokingVertex
== GL_FIRST_VERTEX_CONVENTION
) {
1503 clip
.TriangleStripListProvokingVertexSelect
= 0;
1504 clip
.TriangleFanProvokingVertexSelect
= 1;
1505 clip
.LineStripListProvokingVertexSelect
= 0;
1507 clip
.TriangleStripListProvokingVertexSelect
= 2;
1508 clip
.TriangleFanProvokingVertexSelect
= 2;
1509 clip
.LineStripListProvokingVertexSelect
= 1;
1512 /* _NEW_TRANSFORM */
1513 clip
.UserClipDistanceClipTestEnableBitmask
=
1514 ctx
->Transform
.ClipPlanesEnabled
;
1517 clip
.ForceUserClipDistanceClipTestEnableBitmask
= true;
1520 if (ctx
->Transform
.ClipDepthMode
== GL_ZERO_TO_ONE
)
1521 clip
.APIMode
= APIMODE_D3D
;
1523 clip
.APIMode
= APIMODE_OGL
;
1525 clip
.GuardbandClipTestEnable
= true;
1527 /* BRW_NEW_VIEWPORT_COUNT */
1528 const unsigned viewport_count
= brw
->clip
.viewport_count
;
1530 if (ctx
->RasterDiscard
) {
1531 clip
.ClipMode
= CLIPMODE_REJECT_ALL
;
1533 perf_debug("Rasterizer discard is currently implemented via the "
1534 "clipper; having the GS not write primitives would "
1535 "likely be faster.\n");
1538 clip
.ClipMode
= CLIPMODE_NORMAL
;
1541 clip
.ClipEnable
= true;
1544 * BRW_NEW_GEOMETRY_PROGRAM | BRW_NEW_TES_PROG_DATA | BRW_NEW_PRIMITIVE
1546 if (!brw_is_drawing_points(brw
) && !brw_is_drawing_lines(brw
))
1547 clip
.ViewportXYClipTestEnable
= true;
1549 clip
.MinimumPointWidth
= 0.125;
1550 clip
.MaximumPointWidth
= 255.875;
1551 clip
.MaximumVPIndex
= viewport_count
- 1;
1552 if (_mesa_geometric_layers(fb
) == 0)
1553 clip
.ForceZeroRTAIndexEnable
= true;
1557 static const struct brw_tracked_state
genX(clip_state
) = {
1559 .mesa
= _NEW_BUFFERS
|
1563 .brw
= BRW_NEW_BLORP
|
1565 BRW_NEW_FS_PROG_DATA
|
1566 BRW_NEW_GS_PROG_DATA
|
1567 BRW_NEW_VS_PROG_DATA
|
1568 BRW_NEW_META_IN_PROGRESS
|
1570 BRW_NEW_RASTERIZER_DISCARD
|
1571 BRW_NEW_TES_PROG_DATA
|
1572 BRW_NEW_VIEWPORT_COUNT
,
1574 .emit
= genX(upload_clip_state
),
1578 /* ---------------------------------------------------------------------- */
1581 genX(upload_sf
)(struct brw_context
*brw
)
1583 struct gl_context
*ctx
= &brw
->ctx
;
1588 bool flip_y
= ctx
->DrawBuffer
->FlipY
;
1589 UNUSED
const bool multisampled_fbo
=
1590 _mesa_geometric_samples(ctx
->DrawBuffer
) > 1;
1594 const struct brw_sf_prog_data
*sf_prog_data
= brw
->sf
.prog_data
;
1596 ctx
->NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
1598 brw_state_emit(brw
, GENX(SF_STATE
), 64, &brw
->sf
.state_offset
, sf
) {
1599 sf
.KernelStartPointer
= KSP(brw
, brw
->sf
.prog_offset
);
1600 sf
.FloatingPointMode
= FLOATING_POINT_MODE_Alternate
;
1601 sf
.GRFRegisterCount
= DIV_ROUND_UP(sf_prog_data
->total_grf
, 16) - 1;
1602 sf
.DispatchGRFStartRegisterForURBData
= 3;
1603 sf
.VertexURBEntryReadOffset
= BRW_SF_URB_ENTRY_READ_OFFSET
;
1604 sf
.VertexURBEntryReadLength
= sf_prog_data
->urb_read_length
;
1605 sf
.NumberofURBEntries
= brw
->urb
.nr_sf_entries
;
1606 sf
.URBEntryAllocationSize
= brw
->urb
.sfsize
- 1;
1608 /* STATE_PREFETCH command description describes this state as being
1609 * something loaded through the GPE (L2 ISC), so it's INSTRUCTION
1612 sf
.SetupViewportStateOffset
=
1613 ro_bo(brw
->batch
.state
.bo
, brw
->sf
.vp_offset
);
1615 sf
.PointRasterizationRule
= RASTRULE_UPPER_RIGHT
;
1617 /* sf.ConstantURBEntryReadLength = stage_prog_data->curb_read_length; */
1618 /* sf.ConstantURBEntryReadOffset = brw->curbe.vs_start * 2; */
1620 sf
.MaximumNumberofThreads
=
1621 MIN2(GEN_GEN
== 5 ? 48 : 24, brw
->urb
.nr_sf_entries
) - 1;
1623 sf
.SpritePointEnable
= ctx
->Point
.PointSprite
;
1625 sf
.DestinationOriginHorizontalBias
= 0.5;
1626 sf
.DestinationOriginVerticalBias
= 0.5;
1628 brw_batch_emit(brw
, GENX(3DSTATE_SF
), sf
) {
1629 sf
.StatisticsEnable
= true;
1631 sf
.ViewportTransformEnable
= true;
1635 sf
.DepthBufferSurfaceFormat
= brw_depthbuffer_format(brw
);
1640 sf
.FrontWinding
= brw
->polygon_front_bit
!= flip_y
;
1642 sf
.GlobalDepthOffsetEnableSolid
= ctx
->Polygon
.OffsetFill
;
1643 sf
.GlobalDepthOffsetEnableWireframe
= ctx
->Polygon
.OffsetLine
;
1644 sf
.GlobalDepthOffsetEnablePoint
= ctx
->Polygon
.OffsetPoint
;
1646 switch (ctx
->Polygon
.FrontMode
) {
1648 sf
.FrontFaceFillMode
= FILL_MODE_SOLID
;
1651 sf
.FrontFaceFillMode
= FILL_MODE_WIREFRAME
;
1654 sf
.FrontFaceFillMode
= FILL_MODE_POINT
;
1657 unreachable("not reached");
1660 switch (ctx
->Polygon
.BackMode
) {
1662 sf
.BackFaceFillMode
= FILL_MODE_SOLID
;
1665 sf
.BackFaceFillMode
= FILL_MODE_WIREFRAME
;
1668 sf
.BackFaceFillMode
= FILL_MODE_POINT
;
1671 unreachable("not reached");
1674 if (multisampled_fbo
&& ctx
->Multisample
.Enabled
)
1675 sf
.MultisampleRasterizationMode
= MSRASTMODE_ON_PATTERN
;
1677 sf
.GlobalDepthOffsetConstant
= ctx
->Polygon
.OffsetUnits
* 2;
1678 sf
.GlobalDepthOffsetScale
= ctx
->Polygon
.OffsetFactor
;
1679 sf
.GlobalDepthOffsetClamp
= ctx
->Polygon
.OffsetClamp
;
1682 sf
.ScissorRectangleEnable
= true;
1684 if (ctx
->Polygon
.CullFlag
) {
1685 switch (ctx
->Polygon
.CullFaceMode
) {
1687 sf
.CullMode
= CULLMODE_FRONT
;
1690 sf
.CullMode
= CULLMODE_BACK
;
1692 case GL_FRONT_AND_BACK
:
1693 sf
.CullMode
= CULLMODE_BOTH
;
1696 unreachable("not reached");
1699 sf
.CullMode
= CULLMODE_NONE
;
1703 sf
.LineStippleEnable
= ctx
->Line
.StippleFlag
;
1710 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1712 if (devinfo
->is_cherryview
)
1713 sf
.CHVLineWidth
= brw_get_line_width(brw
);
1715 sf
.LineWidth
= brw_get_line_width(brw
);
1717 sf
.LineWidth
= brw_get_line_width(brw
);
1720 if (ctx
->Line
.SmoothFlag
) {
1721 sf
.LineEndCapAntialiasingRegionWidth
= _10pixels
;
1723 sf
.AntiAliasingEnable
= true;
1727 /* _NEW_POINT - Clamp to ARB_point_parameters user limits */
1728 point_size
= CLAMP(ctx
->Point
.Size
, ctx
->Point
.MinSize
, ctx
->Point
.MaxSize
);
1729 /* Clamp to the hardware limits */
1730 sf
.PointWidth
= CLAMP(point_size
, 0.125f
, 255.875f
);
1732 /* _NEW_PROGRAM | _NEW_POINT, BRW_NEW_VUE_MAP_GEOM_OUT */
1733 if (use_state_point_size(brw
))
1734 sf
.PointWidthSource
= State
;
1737 /* _NEW_POINT | _NEW_MULTISAMPLE */
1738 if ((ctx
->Point
.SmoothFlag
|| _mesa_is_multisample_enabled(ctx
)) &&
1739 !ctx
->Point
.PointSprite
)
1740 sf
.SmoothPointEnable
= true;
1745 * Smooth Point Enable bit MUST not be set when NUM_MULTISAMPLES > 1.
1747 const bool multisampled_fbo
=
1748 _mesa_geometric_samples(ctx
->DrawBuffer
) > 1;
1749 if (multisampled_fbo
)
1750 sf
.SmoothPointEnable
= false;
1753 #if GEN_IS_G4X || GEN_GEN >= 5
1754 sf
.AALineDistanceMode
= AALINEDISTANCE_TRUE
;
1758 if (ctx
->Light
.ProvokingVertex
!= GL_FIRST_VERTEX_CONVENTION
) {
1759 sf
.TriangleStripListProvokingVertexSelect
= 2;
1760 sf
.TriangleFanProvokingVertexSelect
= 2;
1761 sf
.LineStripListProvokingVertexSelect
= 1;
1763 sf
.TriangleFanProvokingVertexSelect
= 1;
1767 /* BRW_NEW_FS_PROG_DATA */
1768 const struct brw_wm_prog_data
*wm_prog_data
=
1769 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1771 sf
.AttributeSwizzleEnable
= true;
1772 sf
.NumberofSFOutputAttributes
= wm_prog_data
->num_varying_inputs
;
1775 * Window coordinates in an FBO are inverted, which means point
1776 * sprite origin must be inverted, too.
1778 if ((ctx
->Point
.SpriteOrigin
== GL_LOWER_LEFT
) == flip_y
) {
1779 sf
.PointSpriteTextureCoordinateOrigin
= LOWERLEFT
;
1781 sf
.PointSpriteTextureCoordinateOrigin
= UPPERLEFT
;
1784 /* BRW_NEW_VUE_MAP_GEOM_OUT | BRW_NEW_FRAGMENT_PROGRAM |
1785 * _NEW_POINT | _NEW_LIGHT | _NEW_PROGRAM | BRW_NEW_FS_PROG_DATA
1787 uint32_t urb_entry_read_length
;
1788 uint32_t urb_entry_read_offset
;
1789 uint32_t point_sprite_enables
;
1790 genX(calculate_attr_overrides
)(brw
, sf
.Attribute
, &point_sprite_enables
,
1791 &urb_entry_read_length
,
1792 &urb_entry_read_offset
);
1793 sf
.VertexURBEntryReadLength
= urb_entry_read_length
;
1794 sf
.VertexURBEntryReadOffset
= urb_entry_read_offset
;
1795 sf
.PointSpriteTextureCoordinateEnable
= point_sprite_enables
;
1796 sf
.ConstantInterpolationEnable
= wm_prog_data
->flat_inputs
;
1801 static const struct brw_tracked_state
genX(sf_state
) = {
1803 .mesa
= _NEW_LIGHT
|
1807 (GEN_GEN
>= 6 ? _NEW_MULTISAMPLE
: 0) |
1808 (GEN_GEN
<= 7 ? _NEW_BUFFERS
| _NEW_POLYGON
: 0) |
1809 (GEN_GEN
== 10 ? _NEW_BUFFERS
: 0),
1810 .brw
= BRW_NEW_BLORP
|
1811 BRW_NEW_VUE_MAP_GEOM_OUT
|
1812 (GEN_GEN
<= 5 ? BRW_NEW_BATCH
|
1813 BRW_NEW_PROGRAM_CACHE
|
1814 BRW_NEW_SF_PROG_DATA
|
1818 (GEN_GEN
>= 6 ? BRW_NEW_CONTEXT
: 0) |
1819 (GEN_GEN
>= 6 && GEN_GEN
<= 7 ?
1820 BRW_NEW_GS_PROG_DATA
|
1822 BRW_NEW_TES_PROG_DATA
1824 (GEN_GEN
== 6 ? BRW_NEW_FS_PROG_DATA
|
1825 BRW_NEW_FRAGMENT_PROGRAM
1828 .emit
= genX(upload_sf
),
1831 /* ---------------------------------------------------------------------- */
1834 brw_color_buffer_write_enabled(struct brw_context
*brw
)
1836 struct gl_context
*ctx
= &brw
->ctx
;
1837 /* BRW_NEW_FRAGMENT_PROGRAM */
1838 const struct gl_program
*fp
= brw
->programs
[MESA_SHADER_FRAGMENT
];
1842 for (i
= 0; i
< ctx
->DrawBuffer
->_NumColorDrawBuffers
; i
++) {
1843 struct gl_renderbuffer
*rb
= ctx
->DrawBuffer
->_ColorDrawBuffers
[i
];
1844 uint64_t outputs_written
= fp
->info
.outputs_written
;
1847 if (rb
&& (outputs_written
& BITFIELD64_BIT(FRAG_RESULT_COLOR
) ||
1848 outputs_written
& BITFIELD64_BIT(FRAG_RESULT_DATA0
+ i
)) &&
1849 GET_COLORMASK(ctx
->Color
.ColorMask
, i
)) {
1858 genX(upload_wm
)(struct brw_context
*brw
)
1860 struct gl_context
*ctx
= &brw
->ctx
;
1862 /* BRW_NEW_FS_PROG_DATA */
1863 const struct brw_wm_prog_data
*wm_prog_data
=
1864 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1866 UNUSED
bool writes_depth
=
1867 wm_prog_data
->computed_depth_mode
!= BRW_PSCDEPTH_OFF
;
1868 UNUSED
struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
1869 UNUSED
const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1872 /* We can't fold this into gen6_upload_wm_push_constants(), because
1873 * according to the SNB PRM, vol 2 part 1 section 7.2.2
1874 * (3DSTATE_CONSTANT_PS [DevSNB]):
1876 * "[DevSNB]: This packet must be followed by WM_STATE."
1878 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_PS
), wmcp
) {
1879 if (wm_prog_data
->base
.nr_params
!= 0) {
1880 wmcp
.Buffer0Valid
= true;
1881 /* Pointer to the WM constant buffer. Covered by the set of
1882 * state flags from gen6_upload_wm_push_constants.
1884 wmcp
.ConstantBody
.PointertoConstantBuffer0
= stage_state
->push_const_offset
;
1885 wmcp
.ConstantBody
.ConstantBuffer0ReadLength
= stage_state
->push_const_size
- 1;
1891 brw_batch_emit(brw
, GENX(3DSTATE_WM
), wm
) {
1893 ctx
->NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
1894 brw_state_emit(brw
, GENX(WM_STATE
), 64, &stage_state
->state_offset
, wm
) {
1898 wm
._8PixelDispatchEnable
= wm_prog_data
->dispatch_8
;
1899 wm
._16PixelDispatchEnable
= wm_prog_data
->dispatch_16
;
1900 wm
._32PixelDispatchEnable
= wm_prog_data
->dispatch_32
;
1904 /* On gen4, we only have one shader kernel */
1905 if (brw_wm_state_has_ksp(wm
, 0)) {
1906 assert(brw_wm_prog_data_prog_offset(wm_prog_data
, wm
, 0) == 0);
1907 wm
.KernelStartPointer0
= KSP(brw
, stage_state
->prog_offset
);
1908 wm
.GRFRegisterCount0
= brw_wm_prog_data_reg_blocks(wm_prog_data
, wm
, 0);
1909 wm
.DispatchGRFStartRegisterForConstantSetupData0
=
1910 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, wm
, 0);
1913 /* On gen5, we have multiple shader kernels but only one GRF start
1914 * register for all kernels
1916 wm
.KernelStartPointer0
= stage_state
->prog_offset
+
1917 brw_wm_prog_data_prog_offset(wm_prog_data
, wm
, 0);
1918 wm
.KernelStartPointer1
= stage_state
->prog_offset
+
1919 brw_wm_prog_data_prog_offset(wm_prog_data
, wm
, 1);
1920 wm
.KernelStartPointer2
= stage_state
->prog_offset
+
1921 brw_wm_prog_data_prog_offset(wm_prog_data
, wm
, 2);
1923 wm
.GRFRegisterCount0
= brw_wm_prog_data_reg_blocks(wm_prog_data
, wm
, 0);
1924 wm
.GRFRegisterCount1
= brw_wm_prog_data_reg_blocks(wm_prog_data
, wm
, 1);
1925 wm
.GRFRegisterCount2
= brw_wm_prog_data_reg_blocks(wm_prog_data
, wm
, 2);
1927 wm
.DispatchGRFStartRegisterForConstantSetupData0
=
1928 wm_prog_data
->base
.dispatch_grf_start_reg
;
1930 /* Dispatch GRF Start should be the same for all shaders on gen5 */
1931 if (brw_wm_state_has_ksp(wm
, 1)) {
1932 assert(wm_prog_data
->base
.dispatch_grf_start_reg
==
1933 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, wm
, 1));
1935 if (brw_wm_state_has_ksp(wm
, 2)) {
1936 assert(wm_prog_data
->base
.dispatch_grf_start_reg
==
1937 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, wm
, 2));
1940 /* On gen6, we have multiple shader kernels and we no longer specify a
1941 * register count for each one.
1943 wm
.KernelStartPointer0
= stage_state
->prog_offset
+
1944 brw_wm_prog_data_prog_offset(wm_prog_data
, wm
, 0);
1945 wm
.KernelStartPointer1
= stage_state
->prog_offset
+
1946 brw_wm_prog_data_prog_offset(wm_prog_data
, wm
, 1);
1947 wm
.KernelStartPointer2
= stage_state
->prog_offset
+
1948 brw_wm_prog_data_prog_offset(wm_prog_data
, wm
, 2);
1950 wm
.DispatchGRFStartRegisterForConstantSetupData0
=
1951 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, wm
, 0);
1952 wm
.DispatchGRFStartRegisterForConstantSetupData1
=
1953 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, wm
, 1);
1954 wm
.DispatchGRFStartRegisterForConstantSetupData2
=
1955 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, wm
, 2);
1959 wm
.ConstantURBEntryReadLength
= wm_prog_data
->base
.curb_read_length
;
1960 /* BRW_NEW_PUSH_CONSTANT_ALLOCATION */
1961 wm
.ConstantURBEntryReadOffset
= brw
->curbe
.wm_start
* 2;
1962 wm
.SetupURBEntryReadLength
= wm_prog_data
->num_varying_inputs
* 2;
1963 wm
.SetupURBEntryReadOffset
= 0;
1964 wm
.EarlyDepthTestEnable
= true;
1968 wm
.LineAntialiasingRegionWidth
= _10pixels
;
1969 wm
.LineEndCapAntialiasingRegionWidth
= _05pixels
;
1971 wm
.PointRasterizationRule
= RASTRULE_UPPER_RIGHT
;
1972 wm
.BarycentricInterpolationMode
= wm_prog_data
->barycentric_interp_modes
;
1974 if (stage_state
->sampler_count
)
1975 wm
.SamplerStatePointer
=
1976 ro_bo(brw
->batch
.state
.bo
, stage_state
->sampler_offset
);
1978 wm
.LineAntialiasingRegionWidth
= _05pixels
;
1979 wm
.LineEndCapAntialiasingRegionWidth
= _10pixels
;
1982 if (ctx
->Polygon
.OffsetFill
) {
1983 wm
.GlobalDepthOffsetEnable
= true;
1984 /* Something weird going on with legacy_global_depth_bias,
1985 * offset_constant, scaling and MRD. This value passes glean
1986 * but gives some odd results elsewere (eg. the
1987 * quad-offset-units test).
1989 wm
.GlobalDepthOffsetConstant
= ctx
->Polygon
.OffsetUnits
* 2;
1991 /* This is the only value that passes glean:
1993 wm
.GlobalDepthOffsetScale
= ctx
->Polygon
.OffsetFactor
;
1996 wm
.DepthCoefficientURBReadOffset
= 1;
1999 /* BRW_NEW_STATS_WM */
2000 wm
.StatisticsEnable
= GEN_GEN
>= 6 || brw
->stats_wm
;
2003 if (wm_prog_data
->base
.use_alt_mode
)
2004 wm
.FloatingPointMode
= FLOATING_POINT_MODE_Alternate
;
2006 wm
.SamplerCount
= GEN_GEN
== 5 ?
2007 0 : DIV_ROUND_UP(stage_state
->sampler_count
, 4);
2009 wm
.BindingTableEntryCount
=
2010 wm_prog_data
->base
.binding_table
.size_bytes
/ 4;
2011 wm
.MaximumNumberofThreads
= devinfo
->max_wm_threads
- 1;
2014 wm
.DualSourceBlendEnable
=
2015 wm_prog_data
->dual_src_blend
&& (ctx
->Color
.BlendEnabled
& 1) &&
2016 ctx
->Color
.Blend
[0]._UsesDualSrc
;
2017 wm
.oMaskPresenttoRenderTarget
= wm_prog_data
->uses_omask
;
2018 wm
.NumberofSFOutputAttributes
= wm_prog_data
->num_varying_inputs
;
2020 /* From the SNB PRM, volume 2 part 1, page 281:
2021 * "If the PS kernel does not need the Position XY Offsets
2022 * to compute a Position XY value, then this field should be
2023 * programmed to POSOFFSET_NONE."
2025 * "SW Recommendation: If the PS kernel needs the Position Offsets
2026 * to compute a Position XY value, this field should match Position
2027 * ZW Interpolation Mode to ensure a consistent position.xyzw
2029 * We only require XY sample offsets. So, this recommendation doesn't
2030 * look useful at the moment. We might need this in future.
2032 if (wm_prog_data
->uses_pos_offset
)
2033 wm
.PositionXYOffsetSelect
= POSOFFSET_SAMPLE
;
2035 wm
.PositionXYOffsetSelect
= POSOFFSET_NONE
;
2038 if (wm_prog_data
->base
.total_scratch
) {
2039 wm
.ScratchSpaceBasePointer
= rw_32_bo(stage_state
->scratch_bo
, 0);
2040 wm
.PerThreadScratchSpace
=
2041 ffs(stage_state
->per_thread_scratch
) - 11;
2044 wm
.PixelShaderComputedDepth
= writes_depth
;
2048 wm
.LineStippleEnable
= ctx
->Line
.StippleFlag
;
2051 wm
.PolygonStippleEnable
= ctx
->Polygon
.StippleFlag
;
2056 wm
.PixelShaderUsesSourceW
= wm_prog_data
->uses_src_w
;
2059 const bool multisampled_fbo
= _mesa_geometric_samples(ctx
->DrawBuffer
) > 1;
2061 if (multisampled_fbo
) {
2062 /* _NEW_MULTISAMPLE */
2063 if (ctx
->Multisample
.Enabled
)
2064 wm
.MultisampleRasterizationMode
= MSRASTMODE_ON_PATTERN
;
2066 wm
.MultisampleRasterizationMode
= MSRASTMODE_OFF_PIXEL
;
2068 if (wm_prog_data
->persample_dispatch
)
2069 wm
.MultisampleDispatchMode
= MSDISPMODE_PERSAMPLE
;
2071 wm
.MultisampleDispatchMode
= MSDISPMODE_PERPIXEL
;
2073 wm
.MultisampleRasterizationMode
= MSRASTMODE_OFF_PIXEL
;
2074 wm
.MultisampleDispatchMode
= MSDISPMODE_PERSAMPLE
;
2077 wm
.PixelShaderUsesSourceDepth
= wm_prog_data
->uses_src_depth
;
2078 if (wm_prog_data
->uses_kill
||
2079 _mesa_is_alpha_test_enabled(ctx
) ||
2080 _mesa_is_alpha_to_coverage_enabled(ctx
) ||
2081 (GEN_GEN
>= 6 && wm_prog_data
->uses_omask
)) {
2082 wm
.PixelShaderKillsPixel
= true;
2085 /* _NEW_BUFFERS | _NEW_COLOR */
2086 if (brw_color_buffer_write_enabled(brw
) || writes_depth
||
2087 wm
.PixelShaderKillsPixel
||
2088 (GEN_GEN
>= 6 && wm_prog_data
->has_side_effects
)) {
2089 wm
.ThreadDispatchEnable
= true;
2093 wm
.PixelShaderComputedDepthMode
= wm_prog_data
->computed_depth_mode
;
2094 wm
.PixelShaderUsesInputCoverageMask
= wm_prog_data
->uses_sample_mask
;
2097 /* The "UAV access enable" bits are unnecessary on HSW because they only
2098 * seem to have an effect on the HW-assisted coherency mechanism which we
2099 * don't need, and the rasterization-related UAV_ONLY flag and the
2100 * DISPATCH_ENABLE bit can be set independently from it.
2101 * C.f. gen8_upload_ps_extra().
2103 * BRW_NEW_FRAGMENT_PROGRAM | BRW_NEW_FS_PROG_DATA | _NEW_BUFFERS |
2107 if (!(brw_color_buffer_write_enabled(brw
) || writes_depth
) &&
2108 wm_prog_data
->has_side_effects
)
2114 /* BRW_NEW_FS_PROG_DATA */
2115 if (wm_prog_data
->early_fragment_tests
)
2116 wm
.EarlyDepthStencilControl
= EDSC_PREPS
;
2117 else if (wm_prog_data
->has_side_effects
)
2118 wm
.EarlyDepthStencilControl
= EDSC_PSEXEC
;
2123 if (brw
->wm
.offset_clamp
!= ctx
->Polygon
.OffsetClamp
) {
2124 brw_batch_emit(brw
, GENX(3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP
), clamp
) {
2125 clamp
.GlobalDepthOffsetClamp
= ctx
->Polygon
.OffsetClamp
;
2128 brw
->wm
.offset_clamp
= ctx
->Polygon
.OffsetClamp
;
2133 static const struct brw_tracked_state
genX(wm_state
) = {
2137 (GEN_GEN
< 8 ? _NEW_BUFFERS
|
2140 (GEN_GEN
== 6 ? _NEW_PROGRAM_CONSTANTS
: 0) |
2141 (GEN_GEN
< 6 ? _NEW_POLYGONSTIPPLE
: 0) |
2142 (GEN_GEN
< 8 && GEN_GEN
>= 6 ? _NEW_MULTISAMPLE
: 0),
2143 .brw
= BRW_NEW_BLORP
|
2144 BRW_NEW_FS_PROG_DATA
|
2145 (GEN_GEN
< 6 ? BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
2146 BRW_NEW_FRAGMENT_PROGRAM
|
2147 BRW_NEW_PROGRAM_CACHE
|
2148 BRW_NEW_SAMPLER_STATE_TABLE
|
2151 (GEN_GEN
< 7 ? BRW_NEW_BATCH
: BRW_NEW_CONTEXT
),
2153 .emit
= genX(upload_wm
),
2156 /* ---------------------------------------------------------------------- */
2158 /* We restrict scratch buffers to the bottom 32 bits of the address space
2159 * by using rw_32_bo().
2161 * General State Base Address is a bit broken. If the address + size as
2162 * seen by STATE_BASE_ADDRESS overflows 48 bits, the GPU appears to treat
2163 * all accesses to the buffer as being out of bounds and returns zero.
2166 #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix) \
2167 pkt.KernelStartPointer = KSP(brw, stage_state->prog_offset); \
2168 pkt.SamplerCount = \
2169 DIV_ROUND_UP(CLAMP(stage_state->sampler_count, 0, 16), 4); \
2170 /* Gen 11 workarounds table #2056 WABTPPrefetchDisable suggests to \
2171 * disable prefetching of binding tables in A0 and B0 steppings. \
2172 * TODO: Revisit this WA on C0 stepping. \
2174 pkt.BindingTableEntryCount = \
2177 stage_prog_data->binding_table.size_bytes / 4; \
2178 pkt.FloatingPointMode = stage_prog_data->use_alt_mode; \
2180 if (stage_prog_data->total_scratch) { \
2181 pkt.ScratchSpaceBasePointer = rw_32_bo(stage_state->scratch_bo, 0); \
2182 pkt.PerThreadScratchSpace = \
2183 ffs(stage_state->per_thread_scratch) - 11; \
2186 pkt.DispatchGRFStartRegisterForURBData = \
2187 stage_prog_data->dispatch_grf_start_reg; \
2188 pkt.prefix##URBEntryReadLength = vue_prog_data->urb_read_length; \
2189 pkt.prefix##URBEntryReadOffset = 0; \
2191 pkt.StatisticsEnable = true; \
2195 genX(upload_vs_state
)(struct brw_context
*brw
)
2197 UNUSED
struct gl_context
*ctx
= &brw
->ctx
;
2198 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
2199 struct brw_stage_state
*stage_state
= &brw
->vs
.base
;
2201 /* BRW_NEW_VS_PROG_DATA */
2202 const struct brw_vue_prog_data
*vue_prog_data
=
2203 brw_vue_prog_data(brw
->vs
.base
.prog_data
);
2204 const struct brw_stage_prog_data
*stage_prog_data
= &vue_prog_data
->base
;
2206 assert(vue_prog_data
->dispatch_mode
== DISPATCH_MODE_SIMD8
||
2207 vue_prog_data
->dispatch_mode
== DISPATCH_MODE_4X2_DUAL_OBJECT
);
2208 assert(GEN_GEN
< 11 ||
2209 vue_prog_data
->dispatch_mode
== DISPATCH_MODE_SIMD8
);
2212 /* From the BSpec, 3D Pipeline > Geometry > Vertex Shader > State,
2213 * 3DSTATE_VS, Dword 5.0 "VS Function Enable":
2215 * [DevSNB] A pipeline flush must be programmed prior to a 3DSTATE_VS
2216 * command that causes the VS Function Enable to toggle. Pipeline
2217 * flush can be executed by sending a PIPE_CONTROL command with CS
2218 * stall bit set and a post sync operation.
2220 * We've already done such a flush at the start of state upload, so we
2221 * don't need to do another one here.
2223 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_VS
), cvs
) {
2224 if (stage_state
->push_const_size
!= 0) {
2225 cvs
.Buffer0Valid
= true;
2226 cvs
.ConstantBody
.PointertoConstantBuffer0
= stage_state
->push_const_offset
;
2227 cvs
.ConstantBody
.ConstantBuffer0ReadLength
= stage_state
->push_const_size
- 1;
2232 if (GEN_GEN
== 7 && devinfo
->is_ivybridge
)
2233 gen7_emit_vs_workaround_flush(brw
);
2236 brw_batch_emit(brw
, GENX(3DSTATE_VS
), vs
) {
2238 ctx
->NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
2239 brw_state_emit(brw
, GENX(VS_STATE
), 32, &stage_state
->state_offset
, vs
) {
2241 INIT_THREAD_DISPATCH_FIELDS(vs
, Vertex
);
2243 vs
.MaximumNumberofThreads
= devinfo
->max_vs_threads
- 1;
2246 vs
.GRFRegisterCount
= DIV_ROUND_UP(vue_prog_data
->total_grf
, 16) - 1;
2247 vs
.ConstantURBEntryReadLength
= stage_prog_data
->curb_read_length
;
2248 vs
.ConstantURBEntryReadOffset
= brw
->curbe
.vs_start
* 2;
2250 vs
.NumberofURBEntries
= brw
->urb
.nr_vs_entries
>> (GEN_GEN
== 5 ? 2 : 0);
2251 vs
.URBEntryAllocationSize
= brw
->urb
.vsize
- 1;
2253 vs
.MaximumNumberofThreads
=
2254 CLAMP(brw
->urb
.nr_vs_entries
/ 2, 1, devinfo
->max_vs_threads
) - 1;
2256 vs
.StatisticsEnable
= false;
2257 vs
.SamplerStatePointer
=
2258 ro_bo(brw
->batch
.state
.bo
, stage_state
->sampler_offset
);
2262 /* Force single program flow on Ironlake. We cannot reliably get
2263 * all applications working without it. See:
2264 * https://bugs.freedesktop.org/show_bug.cgi?id=29172
2266 * The most notable and reliably failing application is the Humus
2269 vs
.SingleProgramFlow
= true;
2270 vs
.SamplerCount
= 0; /* hardware requirement */
2274 vs
.SIMD8DispatchEnable
=
2275 vue_prog_data
->dispatch_mode
== DISPATCH_MODE_SIMD8
;
2277 vs
.UserClipDistanceCullTestEnableBitmask
=
2278 vue_prog_data
->cull_distance_mask
;
2283 /* Based on my reading of the simulator, the VS constants don't get
2284 * pulled into the VS FF unit until an appropriate pipeline flush
2285 * happens, and instead the 3DSTATE_CONSTANT_VS packet just adds
2286 * references to them into a little FIFO. The flushes are common,
2287 * but don't reliably happen between this and a 3DPRIMITIVE, causing
2288 * the primitive to use the wrong constants. Then the FIFO
2289 * containing the constant setup gets added to again on the next
2290 * constants change, and eventually when a flush does happen the
2291 * unit is overwhelmed by constant changes and dies.
2293 * To avoid this, send a PIPE_CONTROL down the line that will
2294 * update the unit immediately loading the constants. The flush
2295 * type bits here were those set by the STATE_BASE_ADDRESS whose
2296 * move in a82a43e8d99e1715dd11c9c091b5ab734079b6a6 triggered the
2297 * bug reports that led to this workaround, and may be more than
2298 * what is strictly required to avoid the issue.
2300 brw_emit_pipe_control_flush(brw
,
2301 PIPE_CONTROL_DEPTH_STALL
|
2302 PIPE_CONTROL_INSTRUCTION_INVALIDATE
|
2303 PIPE_CONTROL_STATE_CACHE_INVALIDATE
);
2307 static const struct brw_tracked_state
genX(vs_state
) = {
2309 .mesa
= (GEN_GEN
== 6 ? (_NEW_PROGRAM_CONSTANTS
| _NEW_TRANSFORM
) : 0),
2310 .brw
= BRW_NEW_BATCH
|
2313 BRW_NEW_VS_PROG_DATA
|
2314 (GEN_GEN
== 6 ? BRW_NEW_VERTEX_PROGRAM
: 0) |
2315 (GEN_GEN
<= 5 ? BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
2316 BRW_NEW_PROGRAM_CACHE
|
2317 BRW_NEW_SAMPLER_STATE_TABLE
|
2321 .emit
= genX(upload_vs_state
),
2324 /* ---------------------------------------------------------------------- */
2327 genX(upload_cc_viewport
)(struct brw_context
*brw
)
2329 struct gl_context
*ctx
= &brw
->ctx
;
2331 /* BRW_NEW_VIEWPORT_COUNT */
2332 const unsigned viewport_count
= brw
->clip
.viewport_count
;
2334 struct GENX(CC_VIEWPORT
) ccv
;
2335 uint32_t cc_vp_offset
;
2337 brw_state_batch(brw
, 4 * GENX(CC_VIEWPORT_length
) * viewport_count
,
2340 for (unsigned i
= 0; i
< viewport_count
; i
++) {
2341 /* _NEW_VIEWPORT | _NEW_TRANSFORM */
2342 const struct gl_viewport_attrib
*vp
= &ctx
->ViewportArray
[i
];
2343 if (ctx
->Transform
.DepthClampNear
&& ctx
->Transform
.DepthClampFar
) {
2344 ccv
.MinimumDepth
= MIN2(vp
->Near
, vp
->Far
);
2345 ccv
.MaximumDepth
= MAX2(vp
->Near
, vp
->Far
);
2347 ccv
.MinimumDepth
= 0.0;
2348 ccv
.MaximumDepth
= 1.0;
2350 GENX(CC_VIEWPORT_pack
)(NULL
, cc_map
, &ccv
);
2351 cc_map
+= GENX(CC_VIEWPORT_length
);
2355 brw_batch_emit(brw
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC
), ptr
) {
2356 ptr
.CCViewportPointer
= cc_vp_offset
;
2359 brw_batch_emit(brw
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS
), vp
) {
2360 vp
.CCViewportStateChange
= 1;
2361 vp
.PointertoCC_VIEWPORT
= cc_vp_offset
;
2364 brw
->cc
.vp_offset
= cc_vp_offset
;
2365 ctx
->NewDriverState
|= BRW_NEW_CC_VP
;
2369 const struct brw_tracked_state
genX(cc_vp
) = {
2371 .mesa
= _NEW_TRANSFORM
|
2373 .brw
= BRW_NEW_BATCH
|
2375 BRW_NEW_VIEWPORT_COUNT
,
2377 .emit
= genX(upload_cc_viewport
)
2380 /* ---------------------------------------------------------------------- */
2383 set_scissor_bits(const struct gl_context
*ctx
, int i
,
2384 bool flip_y
, unsigned fb_width
, unsigned fb_height
,
2385 struct GENX(SCISSOR_RECT
) *sc
)
2389 bbox
[0] = MAX2(ctx
->ViewportArray
[i
].X
, 0);
2390 bbox
[1] = MIN2(bbox
[0] + ctx
->ViewportArray
[i
].Width
, fb_width
);
2391 bbox
[2] = MAX2(ctx
->ViewportArray
[i
].Y
, 0);
2392 bbox
[3] = MIN2(bbox
[2] + ctx
->ViewportArray
[i
].Height
, fb_height
);
2393 _mesa_intersect_scissor_bounding_box(ctx
, i
, bbox
);
2395 if (bbox
[0] == bbox
[1] || bbox
[2] == bbox
[3]) {
2396 /* If the scissor was out of bounds and got clamped to 0 width/height
2397 * at the bounds, the subtraction of 1 from maximums could produce a
2398 * negative number and thus not clip anything. Instead, just provide
2399 * a min > max scissor inside the bounds, which produces the expected
2402 sc
->ScissorRectangleXMin
= 1;
2403 sc
->ScissorRectangleXMax
= 0;
2404 sc
->ScissorRectangleYMin
= 1;
2405 sc
->ScissorRectangleYMax
= 0;
2406 } else if (!flip_y
) {
2407 /* texmemory: Y=0=bottom */
2408 sc
->ScissorRectangleXMin
= bbox
[0];
2409 sc
->ScissorRectangleXMax
= bbox
[1] - 1;
2410 sc
->ScissorRectangleYMin
= bbox
[2];
2411 sc
->ScissorRectangleYMax
= bbox
[3] - 1;
2413 /* memory: Y=0=top */
2414 sc
->ScissorRectangleXMin
= bbox
[0];
2415 sc
->ScissorRectangleXMax
= bbox
[1] - 1;
2416 sc
->ScissorRectangleYMin
= fb_height
- bbox
[3];
2417 sc
->ScissorRectangleYMax
= fb_height
- bbox
[2] - 1;
2423 genX(upload_scissor_state
)(struct brw_context
*brw
)
2425 struct gl_context
*ctx
= &brw
->ctx
;
2426 const bool flip_y
= ctx
->DrawBuffer
->FlipY
;
2427 struct GENX(SCISSOR_RECT
) scissor
;
2428 uint32_t scissor_state_offset
;
2429 const unsigned int fb_width
= _mesa_geometric_width(ctx
->DrawBuffer
);
2430 const unsigned int fb_height
= _mesa_geometric_height(ctx
->DrawBuffer
);
2431 uint32_t *scissor_map
;
2433 /* BRW_NEW_VIEWPORT_COUNT */
2434 const unsigned viewport_count
= brw
->clip
.viewport_count
;
2436 scissor_map
= brw_state_batch(
2437 brw
, GENX(SCISSOR_RECT_length
) * sizeof(uint32_t) * viewport_count
,
2438 32, &scissor_state_offset
);
2440 /* _NEW_SCISSOR | _NEW_BUFFERS | _NEW_VIEWPORT */
2442 /* The scissor only needs to handle the intersection of drawable and
2443 * scissor rect. Clipping to the boundaries of static shared buffers
2444 * for front/back/depth is covered by looping over cliprects in brw_draw.c.
2446 * Note that the hardware's coordinates are inclusive, while Mesa's min is
2447 * inclusive but max is exclusive.
2449 for (unsigned i
= 0; i
< viewport_count
; i
++) {
2450 set_scissor_bits(ctx
, i
, flip_y
, fb_width
, fb_height
, &scissor
);
2451 GENX(SCISSOR_RECT_pack
)(
2452 NULL
, scissor_map
+ i
* GENX(SCISSOR_RECT_length
), &scissor
);
2455 brw_batch_emit(brw
, GENX(3DSTATE_SCISSOR_STATE_POINTERS
), ptr
) {
2456 ptr
.ScissorRectPointer
= scissor_state_offset
;
2460 static const struct brw_tracked_state
genX(scissor_state
) = {
2462 .mesa
= _NEW_BUFFERS
|
2465 .brw
= BRW_NEW_BATCH
|
2467 BRW_NEW_VIEWPORT_COUNT
,
2469 .emit
= genX(upload_scissor_state
),
2473 /* ---------------------------------------------------------------------- */
2476 brw_calculate_guardband_size(uint32_t fb_width
, uint32_t fb_height
,
2477 float m00
, float m11
, float m30
, float m31
,
2478 float *xmin
, float *xmax
,
2479 float *ymin
, float *ymax
)
2481 /* According to the "Vertex X,Y Clamping and Quantization" section of the
2482 * Strips and Fans documentation:
2484 * "The vertex X and Y screen-space coordinates are also /clamped/ to the
2485 * fixed-point "guardband" range supported by the rasterization hardware"
2489 * "In almost all circumstances, if an object’s vertices are actually
2490 * modified by this clamping (i.e., had X or Y coordinates outside of
2491 * the guardband extent the rendered object will not match the intended
2492 * result. Therefore software should take steps to ensure that this does
2493 * not happen - e.g., by clipping objects such that they do not exceed
2494 * these limits after the Drawing Rectangle is applied."
2496 * I believe the fundamental restriction is that the rasterizer (in
2497 * the SF/WM stages) have a limit on the number of pixels that can be
2498 * rasterized. We need to ensure any coordinates beyond the rasterizer
2499 * limit are handled by the clipper. So effectively that limit becomes
2500 * the clipper's guardband size.
2502 * It goes on to say:
2504 * "In addition, in order to be correctly rendered, objects must have a
2505 * screenspace bounding box not exceeding 8K in the X or Y direction.
2506 * This additional restriction must also be comprehended by software,
2507 * i.e., enforced by use of clipping."
2509 * This makes no sense. Gen7+ hardware supports 16K render targets,
2510 * and you definitely need to be able to draw polygons that fill the
2511 * surface. Our assumption is that the rasterizer was limited to 8K
2512 * on Sandybridge, which only supports 8K surfaces, and it was actually
2513 * increased to 16K on Ivybridge and later.
2515 * So, limit the guardband to 16K on Gen7+ and 8K on Sandybridge.
2517 const float gb_size
= GEN_GEN
>= 7 ? 16384.0f
: 8192.0f
;
2519 /* Workaround: prevent gpu hangs on SandyBridge
2520 * by disabling guardband clipping for odd dimensions.
2522 if (GEN_GEN
== 6 && (fb_width
& 1 || fb_height
& 1)) {
2530 if (m00
!= 0 && m11
!= 0) {
2531 /* First, we compute the screen-space render area */
2532 const float ss_ra_xmin
= MIN3( 0, m30
+ m00
, m30
- m00
);
2533 const float ss_ra_xmax
= MAX3( fb_width
, m30
+ m00
, m30
- m00
);
2534 const float ss_ra_ymin
= MIN3( 0, m31
+ m11
, m31
- m11
);
2535 const float ss_ra_ymax
= MAX3(fb_height
, m31
+ m11
, m31
- m11
);
2537 /* We want the guardband to be centered on that */
2538 const float ss_gb_xmin
= (ss_ra_xmin
+ ss_ra_xmax
) / 2 - gb_size
;
2539 const float ss_gb_xmax
= (ss_ra_xmin
+ ss_ra_xmax
) / 2 + gb_size
;
2540 const float ss_gb_ymin
= (ss_ra_ymin
+ ss_ra_ymax
) / 2 - gb_size
;
2541 const float ss_gb_ymax
= (ss_ra_ymin
+ ss_ra_ymax
) / 2 + gb_size
;
2543 /* Now we need it in native device coordinates */
2544 const float ndc_gb_xmin
= (ss_gb_xmin
- m30
) / m00
;
2545 const float ndc_gb_xmax
= (ss_gb_xmax
- m30
) / m00
;
2546 const float ndc_gb_ymin
= (ss_gb_ymin
- m31
) / m11
;
2547 const float ndc_gb_ymax
= (ss_gb_ymax
- m31
) / m11
;
2549 /* Thanks to Y-flipping and ORIGIN_UPPER_LEFT, the Y coordinates may be
2550 * flipped upside-down. X should be fine though.
2552 assert(ndc_gb_xmin
<= ndc_gb_xmax
);
2553 *xmin
= ndc_gb_xmin
;
2554 *xmax
= ndc_gb_xmax
;
2555 *ymin
= MIN2(ndc_gb_ymin
, ndc_gb_ymax
);
2556 *ymax
= MAX2(ndc_gb_ymin
, ndc_gb_ymax
);
2558 /* The viewport scales to 0, so nothing will be rendered. */
2567 genX(upload_sf_clip_viewport
)(struct brw_context
*brw
)
2569 struct gl_context
*ctx
= &brw
->ctx
;
2570 float y_scale
, y_bias
;
2572 /* BRW_NEW_VIEWPORT_COUNT */
2573 const unsigned viewport_count
= brw
->clip
.viewport_count
;
2576 const bool flip_y
= ctx
->DrawBuffer
->FlipY
;
2577 const uint32_t fb_width
= (float)_mesa_geometric_width(ctx
->DrawBuffer
);
2578 const uint32_t fb_height
= (float)_mesa_geometric_height(ctx
->DrawBuffer
);
2582 struct GENX(SF_CLIP_VIEWPORT
) sfv
;
2583 uint32_t sf_clip_vp_offset
;
2584 uint32_t *sf_clip_map
=
2585 brw_state_batch(brw
, GENX(SF_CLIP_VIEWPORT_length
) * 4 * viewport_count
,
2586 64, &sf_clip_vp_offset
);
2588 struct GENX(SF_VIEWPORT
) sfv
;
2589 struct GENX(CLIP_VIEWPORT
) clv
;
2590 uint32_t sf_vp_offset
, clip_vp_offset
;
2592 brw_state_batch(brw
, GENX(SF_VIEWPORT_length
) * 4 * viewport_count
,
2594 uint32_t *clip_map
=
2595 brw_state_batch(brw
, GENX(CLIP_VIEWPORT_length
) * 4 * viewport_count
,
2596 32, &clip_vp_offset
);
2602 y_bias
= (float)fb_height
;
2608 for (unsigned i
= 0; i
< brw
->clip
.viewport_count
; i
++) {
2609 /* _NEW_VIEWPORT: Guardband Clipping */
2610 float scale
[3], translate
[3], gb_xmin
, gb_xmax
, gb_ymin
, gb_ymax
;
2611 _mesa_get_viewport_xform(ctx
, i
, scale
, translate
);
2613 sfv
.ViewportMatrixElementm00
= scale
[0];
2614 sfv
.ViewportMatrixElementm11
= scale
[1] * y_scale
,
2615 sfv
.ViewportMatrixElementm22
= scale
[2],
2616 sfv
.ViewportMatrixElementm30
= translate
[0],
2617 sfv
.ViewportMatrixElementm31
= translate
[1] * y_scale
+ y_bias
,
2618 sfv
.ViewportMatrixElementm32
= translate
[2],
2619 brw_calculate_guardband_size(fb_width
, fb_height
,
2620 sfv
.ViewportMatrixElementm00
,
2621 sfv
.ViewportMatrixElementm11
,
2622 sfv
.ViewportMatrixElementm30
,
2623 sfv
.ViewportMatrixElementm31
,
2624 &gb_xmin
, &gb_xmax
, &gb_ymin
, &gb_ymax
);
2627 clv
.XMinClipGuardband
= gb_xmin
;
2628 clv
.XMaxClipGuardband
= gb_xmax
;
2629 clv
.YMinClipGuardband
= gb_ymin
;
2630 clv
.YMaxClipGuardband
= gb_ymax
;
2633 set_scissor_bits(ctx
, i
, flip_y
, fb_width
, fb_height
,
2634 &sfv
.ScissorRectangle
);
2636 /* _NEW_VIEWPORT | _NEW_BUFFERS: Screen Space Viewport
2637 * The hardware will take the intersection of the drawing rectangle,
2638 * scissor rectangle, and the viewport extents. However, emitting
2639 * 3DSTATE_DRAWING_RECTANGLE is expensive since it requires a full
2640 * pipeline stall so we're better off just being a little more clever
2641 * with our viewport so we can emit it once at context creation time.
2643 const float viewport_Xmin
= MAX2(ctx
->ViewportArray
[i
].X
, 0);
2644 const float viewport_Ymin
= MAX2(ctx
->ViewportArray
[i
].Y
, 0);
2645 const float viewport_Xmax
=
2646 MIN2(ctx
->ViewportArray
[i
].X
+ ctx
->ViewportArray
[i
].Width
, fb_width
);
2647 const float viewport_Ymax
=
2648 MIN2(ctx
->ViewportArray
[i
].Y
+ ctx
->ViewportArray
[i
].Height
, fb_height
);
2651 sfv
.XMinViewPort
= viewport_Xmin
;
2652 sfv
.XMaxViewPort
= viewport_Xmax
- 1;
2653 sfv
.YMinViewPort
= fb_height
- viewport_Ymax
;
2654 sfv
.YMaxViewPort
= fb_height
- viewport_Ymin
- 1;
2656 sfv
.XMinViewPort
= viewport_Xmin
;
2657 sfv
.XMaxViewPort
= viewport_Xmax
- 1;
2658 sfv
.YMinViewPort
= viewport_Ymin
;
2659 sfv
.YMaxViewPort
= viewport_Ymax
- 1;
2664 GENX(SF_CLIP_VIEWPORT_pack
)(NULL
, sf_clip_map
, &sfv
);
2665 sf_clip_map
+= GENX(SF_CLIP_VIEWPORT_length
);
2667 GENX(SF_VIEWPORT_pack
)(NULL
, sf_map
, &sfv
);
2668 GENX(CLIP_VIEWPORT_pack
)(NULL
, clip_map
, &clv
);
2669 sf_map
+= GENX(SF_VIEWPORT_length
);
2670 clip_map
+= GENX(CLIP_VIEWPORT_length
);
2675 brw_batch_emit(brw
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
), ptr
) {
2676 ptr
.SFClipViewportPointer
= sf_clip_vp_offset
;
2679 brw_batch_emit(brw
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS
), vp
) {
2680 vp
.SFViewportStateChange
= 1;
2681 vp
.CLIPViewportStateChange
= 1;
2682 vp
.PointertoCLIP_VIEWPORT
= clip_vp_offset
;
2683 vp
.PointertoSF_VIEWPORT
= sf_vp_offset
;
2686 brw
->sf
.vp_offset
= sf_vp_offset
;
2687 brw
->clip
.vp_offset
= clip_vp_offset
;
2688 brw
->ctx
.NewDriverState
|= BRW_NEW_SF_VP
| BRW_NEW_CLIP_VP
;
2692 static const struct brw_tracked_state
genX(sf_clip_viewport
) = {
2694 .mesa
= _NEW_BUFFERS
|
2696 (GEN_GEN
<= 5 ? _NEW_SCISSOR
: 0),
2697 .brw
= BRW_NEW_BATCH
|
2699 BRW_NEW_VIEWPORT_COUNT
,
2701 .emit
= genX(upload_sf_clip_viewport
),
2704 /* ---------------------------------------------------------------------- */
2707 genX(upload_gs_state
)(struct brw_context
*brw
)
2709 UNUSED
struct gl_context
*ctx
= &brw
->ctx
;
2710 UNUSED
const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
2711 const struct brw_stage_state
*stage_state
= &brw
->gs
.base
;
2712 const struct gl_program
*gs_prog
= brw
->programs
[MESA_SHADER_GEOMETRY
];
2713 /* BRW_NEW_GEOMETRY_PROGRAM */
2714 bool active
= GEN_GEN
>= 6 && gs_prog
;
2716 /* BRW_NEW_GS_PROG_DATA */
2717 struct brw_stage_prog_data
*stage_prog_data
= stage_state
->prog_data
;
2718 UNUSED
const struct brw_vue_prog_data
*vue_prog_data
=
2719 brw_vue_prog_data(stage_prog_data
);
2721 const struct brw_gs_prog_data
*gs_prog_data
=
2722 brw_gs_prog_data(stage_prog_data
);
2726 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_GS
), cgs
) {
2727 if (active
&& stage_state
->push_const_size
!= 0) {
2728 cgs
.Buffer0Valid
= true;
2729 cgs
.ConstantBody
.PointertoConstantBuffer0
= stage_state
->push_const_offset
;
2730 cgs
.ConstantBody
.ConstantBuffer0ReadLength
= stage_state
->push_const_size
- 1;
2735 #if GEN_GEN == 7 && !GEN_IS_HASWELL
2737 * From Graphics BSpec: 3D-Media-GPGPU Engine > 3D Pipeline Stages >
2738 * Geometry > Geometry Shader > State:
2740 * "Note: Because of corruption in IVB:GT2, software needs to flush the
2741 * whole fixed function pipeline when the GS enable changes value in
2744 * The hardware architects have clarified that in this context "flush the
2745 * whole fixed function pipeline" means to emit a PIPE_CONTROL with the "CS
2748 if (devinfo
->gt
== 2 && brw
->gs
.enabled
!= active
)
2749 gen7_emit_cs_stall_flush(brw
);
2753 brw_batch_emit(brw
, GENX(3DSTATE_GS
), gs
) {
2755 ctx
->NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
2756 brw_state_emit(brw
, GENX(GS_STATE
), 32, &brw
->ff_gs
.state_offset
, gs
) {
2761 INIT_THREAD_DISPATCH_FIELDS(gs
, Vertex
);
2764 gs
.OutputVertexSize
= gs_prog_data
->output_vertex_size_hwords
* 2 - 1;
2765 gs
.OutputTopology
= gs_prog_data
->output_topology
;
2766 gs
.ControlDataHeaderSize
=
2767 gs_prog_data
->control_data_header_size_hwords
;
2769 gs
.InstanceControl
= gs_prog_data
->invocations
- 1;
2770 gs
.DispatchMode
= vue_prog_data
->dispatch_mode
;
2772 gs
.IncludePrimitiveID
= gs_prog_data
->include_primitive_id
;
2774 gs
.ControlDataFormat
= gs_prog_data
->control_data_format
;
2777 /* Note: the meaning of the GEN7_GS_REORDER_TRAILING bit changes between
2778 * Ivy Bridge and Haswell.
2780 * On Ivy Bridge, setting this bit causes the vertices of a triangle
2781 * strip to be delivered to the geometry shader in an order that does
2782 * not strictly follow the OpenGL spec, but preserves triangle
2783 * orientation. For example, if the vertices are (1, 2, 3, 4, 5), then
2784 * the geometry shader sees triangles:
2786 * (1, 2, 3), (2, 4, 3), (3, 4, 5)
2788 * (Clearing the bit is even worse, because it fails to preserve
2791 * Triangle strips with adjacency always ordered in a way that preserves
2792 * triangle orientation but does not strictly follow the OpenGL spec,
2793 * regardless of the setting of this bit.
2795 * On Haswell, both triangle strips and triangle strips with adjacency
2796 * are always ordered in a way that preserves triangle orientation.
2797 * Setting this bit causes the ordering to strictly follow the OpenGL
2800 * So in either case we want to set the bit. Unfortunately on Ivy
2801 * Bridge this will get the order close to correct but not perfect.
2803 gs
.ReorderMode
= TRAILING
;
2804 gs
.MaximumNumberofThreads
=
2805 GEN_GEN
== 8 ? (devinfo
->max_gs_threads
/ 2 - 1)
2806 : (devinfo
->max_gs_threads
- 1);
2809 gs
.SOStatisticsEnable
= true;
2810 if (gs_prog
->info
.has_transform_feedback_varyings
)
2811 gs
.SVBIPayloadEnable
= _mesa_is_xfb_active_and_unpaused(ctx
);
2813 /* GEN6_GS_SPF_MODE and GEN6_GS_VECTOR_MASK_ENABLE are enabled as it
2814 * was previously done for gen6.
2816 * TODO: test with both disabled to see if the HW is behaving
2817 * as expected, like in gen7.
2819 gs
.SingleProgramFlow
= true;
2820 gs
.VectorMaskEnable
= true;
2824 gs
.ExpectedVertexCount
= gs_prog_data
->vertices_in
;
2826 if (gs_prog_data
->static_vertex_count
!= -1) {
2827 gs
.StaticOutput
= true;
2828 gs
.StaticOutputVertexCount
= gs_prog_data
->static_vertex_count
;
2830 gs
.IncludeVertexHandles
= vue_prog_data
->include_vue_handles
;
2832 gs
.UserClipDistanceCullTestEnableBitmask
=
2833 vue_prog_data
->cull_distance_mask
;
2835 const int urb_entry_write_offset
= 1;
2836 const uint32_t urb_entry_output_length
=
2837 DIV_ROUND_UP(vue_prog_data
->vue_map
.num_slots
, 2) -
2838 urb_entry_write_offset
;
2840 gs
.VertexURBEntryOutputReadOffset
= urb_entry_write_offset
;
2841 gs
.VertexURBEntryOutputLength
= MAX2(urb_entry_output_length
, 1);
2847 if (!active
&& brw
->ff_gs
.prog_active
) {
2848 /* In gen6, transform feedback for the VS stage is done with an
2849 * ad-hoc GS program. This function provides the needed 3DSTATE_GS
2852 gs
.KernelStartPointer
= KSP(brw
, brw
->ff_gs
.prog_offset
);
2853 gs
.SingleProgramFlow
= true;
2854 gs
.DispatchGRFStartRegisterForURBData
= GEN_GEN
== 6 ? 2 : 1;
2855 gs
.VertexURBEntryReadLength
= brw
->ff_gs
.prog_data
->urb_read_length
;
2858 gs
.GRFRegisterCount
=
2859 DIV_ROUND_UP(brw
->ff_gs
.prog_data
->total_grf
, 16) - 1;
2860 /* BRW_NEW_URB_FENCE */
2861 gs
.NumberofURBEntries
= brw
->urb
.nr_gs_entries
;
2862 gs
.URBEntryAllocationSize
= brw
->urb
.vsize
- 1;
2863 gs
.MaximumNumberofThreads
= brw
->urb
.nr_gs_entries
>= 8 ? 1 : 0;
2864 gs
.FloatingPointMode
= FLOATING_POINT_MODE_Alternate
;
2867 gs
.VectorMaskEnable
= true;
2868 gs
.SVBIPayloadEnable
= true;
2869 gs
.SVBIPostIncrementEnable
= true;
2870 gs
.SVBIPostIncrementValue
=
2871 brw
->ff_gs
.prog_data
->svbi_postincrement_value
;
2872 gs
.SOStatisticsEnable
= true;
2873 gs
.MaximumNumberofThreads
= devinfo
->max_gs_threads
- 1;
2877 if (!active
&& !brw
->ff_gs
.prog_active
) {
2879 gs
.DispatchGRFStartRegisterForURBData
= 1;
2881 gs
.IncludeVertexHandles
= true;
2887 gs
.StatisticsEnable
= true;
2889 #if GEN_GEN == 5 || GEN_GEN == 6
2890 gs
.RenderingEnabled
= true;
2893 gs
.MaximumVPIndex
= brw
->clip
.viewport_count
- 1;
2898 brw
->gs
.enabled
= active
;
2902 static const struct brw_tracked_state
genX(gs_state
) = {
2904 .mesa
= (GEN_GEN
== 6 ? _NEW_PROGRAM_CONSTANTS
: 0),
2905 .brw
= BRW_NEW_BATCH
|
2907 (GEN_GEN
<= 5 ? BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
2908 BRW_NEW_PROGRAM_CACHE
|
2910 BRW_NEW_VIEWPORT_COUNT
2912 (GEN_GEN
>= 6 ? BRW_NEW_CONTEXT
|
2913 BRW_NEW_GEOMETRY_PROGRAM
|
2914 BRW_NEW_GS_PROG_DATA
2916 (GEN_GEN
< 7 ? BRW_NEW_FF_GS_PROG_DATA
: 0),
2918 .emit
= genX(upload_gs_state
),
2921 /* ---------------------------------------------------------------------- */
2923 UNUSED
static GLenum
2924 fix_dual_blend_alpha_to_one(GLenum function
)
2930 case GL_ONE_MINUS_SRC1_ALPHA
:
2937 #define blend_factor(x) brw_translate_blend_factor(x)
2938 #define blend_eqn(x) brw_translate_blend_equation(x)
2941 * Modify blend function to force destination alpha to 1.0
2943 * If \c function specifies a blend function that uses destination alpha,
2944 * replace it with a function that hard-wires destination alpha to 1.0. This
2945 * is used when rendering to xRGB targets.
2948 brw_fix_xRGB_alpha(GLenum function
)
2954 case GL_ONE_MINUS_DST_ALPHA
:
2955 case GL_SRC_ALPHA_SATURATE
:
2963 typedef struct GENX(BLEND_STATE_ENTRY
) BLEND_ENTRY_GENXML
;
2965 typedef struct GENX(COLOR_CALC_STATE
) BLEND_ENTRY_GENXML
;
2969 set_blend_entry_bits(struct brw_context
*brw
, BLEND_ENTRY_GENXML
*entry
, int i
,
2972 struct gl_context
*ctx
= &brw
->ctx
;
2975 const struct gl_renderbuffer
*rb
= ctx
->DrawBuffer
->_ColorDrawBuffers
[i
];
2977 bool independent_alpha_blend
= false;
2979 /* Used for implementing the following bit of GL_EXT_texture_integer:
2980 * "Per-fragment operations that require floating-point color
2981 * components, including multisample alpha operations, alpha test,
2982 * blending, and dithering, have no effect when the corresponding
2983 * colors are written to an integer color buffer."
2985 const bool integer
= ctx
->DrawBuffer
->_IntegerBuffers
& (0x1 << i
);
2987 const unsigned blend_enabled
= GEN_GEN
>= 6 ?
2988 ctx
->Color
.BlendEnabled
& (1 << i
) : ctx
->Color
.BlendEnabled
;
2991 if (ctx
->Color
.ColorLogicOpEnabled
) {
2992 GLenum rb_type
= rb
? _mesa_get_format_datatype(rb
->Format
)
2993 : GL_UNSIGNED_NORMALIZED
;
2994 WARN_ONCE(ctx
->Color
.LogicOp
!= GL_COPY
&&
2995 rb_type
!= GL_UNSIGNED_NORMALIZED
&&
2996 rb_type
!= GL_FLOAT
, "Ignoring %s logic op on %s "
2998 _mesa_enum_to_string(ctx
->Color
.LogicOp
),
2999 _mesa_enum_to_string(rb_type
));
3000 if (GEN_GEN
>= 8 || rb_type
== GL_UNSIGNED_NORMALIZED
) {
3001 entry
->LogicOpEnable
= true;
3002 entry
->LogicOpFunction
= ctx
->Color
._LogicOp
;
3004 } else if (blend_enabled
&& !ctx
->Color
._AdvancedBlendMode
3005 && (GEN_GEN
<= 5 || !integer
)) {
3006 GLenum eqRGB
= ctx
->Color
.Blend
[i
].EquationRGB
;
3007 GLenum eqA
= ctx
->Color
.Blend
[i
].EquationA
;
3008 GLenum srcRGB
= ctx
->Color
.Blend
[i
].SrcRGB
;
3009 GLenum dstRGB
= ctx
->Color
.Blend
[i
].DstRGB
;
3010 GLenum srcA
= ctx
->Color
.Blend
[i
].SrcA
;
3011 GLenum dstA
= ctx
->Color
.Blend
[i
].DstA
;
3013 if (eqRGB
== GL_MIN
|| eqRGB
== GL_MAX
)
3014 srcRGB
= dstRGB
= GL_ONE
;
3016 if (eqA
== GL_MIN
|| eqA
== GL_MAX
)
3017 srcA
= dstA
= GL_ONE
;
3019 /* Due to hardware limitations, the destination may have information
3020 * in an alpha channel even when the format specifies no alpha
3021 * channel. In order to avoid getting any incorrect blending due to
3022 * that alpha channel, coerce the blend factors to values that will
3023 * not read the alpha channel, but will instead use the correct
3024 * implicit value for alpha.
3026 if (rb
&& !_mesa_base_format_has_channel(rb
->_BaseFormat
,
3027 GL_TEXTURE_ALPHA_TYPE
)) {
3028 srcRGB
= brw_fix_xRGB_alpha(srcRGB
);
3029 srcA
= brw_fix_xRGB_alpha(srcA
);
3030 dstRGB
= brw_fix_xRGB_alpha(dstRGB
);
3031 dstA
= brw_fix_xRGB_alpha(dstA
);
3034 /* From the BLEND_STATE docs, DWord 0, Bit 29 (AlphaToOne Enable):
3035 * "If Dual Source Blending is enabled, this bit must be disabled."
3037 * We override SRC1_ALPHA to ONE and ONE_MINUS_SRC1_ALPHA to ZERO,
3038 * and leave it enabled anyway.
3040 if (GEN_GEN
>= 6 && ctx
->Color
.Blend
[i
]._UsesDualSrc
&& alpha_to_one
) {
3041 srcRGB
= fix_dual_blend_alpha_to_one(srcRGB
);
3042 srcA
= fix_dual_blend_alpha_to_one(srcA
);
3043 dstRGB
= fix_dual_blend_alpha_to_one(dstRGB
);
3044 dstA
= fix_dual_blend_alpha_to_one(dstA
);
3047 entry
->ColorBufferBlendEnable
= true;
3048 entry
->DestinationBlendFactor
= blend_factor(dstRGB
);
3049 entry
->SourceBlendFactor
= blend_factor(srcRGB
);
3050 entry
->DestinationAlphaBlendFactor
= blend_factor(dstA
);
3051 entry
->SourceAlphaBlendFactor
= blend_factor(srcA
);
3052 entry
->ColorBlendFunction
= blend_eqn(eqRGB
);
3053 entry
->AlphaBlendFunction
= blend_eqn(eqA
);
3055 if (srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
)
3056 independent_alpha_blend
= true;
3059 return independent_alpha_blend
;
3064 genX(upload_blend_state
)(struct brw_context
*brw
)
3066 struct gl_context
*ctx
= &brw
->ctx
;
3069 /* We need at least one BLEND_STATE written, because we might do
3070 * thread dispatch even if _NumColorDrawBuffers is 0 (for example
3071 * for computed depth or alpha test), which will do an FB write
3072 * with render target 0, which will reference BLEND_STATE[0] for
3073 * alpha test enable.
3075 int nr_draw_buffers
= ctx
->DrawBuffer
->_NumColorDrawBuffers
;
3076 if (nr_draw_buffers
== 0 && ctx
->Color
.AlphaEnabled
)
3077 nr_draw_buffers
= 1;
3079 size
= GENX(BLEND_STATE_ENTRY_length
) * 4 * nr_draw_buffers
;
3081 size
+= GENX(BLEND_STATE_length
) * 4;
3084 uint32_t *blend_map
;
3085 blend_map
= brw_state_batch(brw
, size
, 64, &brw
->cc
.blend_state_offset
);
3088 struct GENX(BLEND_STATE
) blend
= { 0 };
3091 for (int i
= 0; i
< nr_draw_buffers
; i
++) {
3092 struct GENX(BLEND_STATE_ENTRY
) entry
= { 0 };
3095 /* OpenGL specification 3.3 (page 196), section 4.1.3 says:
3096 * "If drawbuffer zero is not NONE and the buffer it references has an
3097 * integer format, the SAMPLE_ALPHA_TO_COVERAGE and SAMPLE_ALPHA_TO_ONE
3098 * operations are skipped."
3100 if (!(ctx
->DrawBuffer
->_IntegerBuffers
& 0x1)) {
3101 /* _NEW_MULTISAMPLE */
3102 if (_mesa_is_multisample_enabled(ctx
)) {
3103 if (ctx
->Multisample
.SampleAlphaToCoverage
) {
3104 blend
.AlphaToCoverageEnable
= true;
3105 blend
.AlphaToCoverageDitherEnable
= GEN_GEN
>= 7;
3107 if (ctx
->Multisample
.SampleAlphaToOne
)
3108 blend
.AlphaToOneEnable
= true;
3112 if (ctx
->Color
.AlphaEnabled
) {
3113 blend
.AlphaTestEnable
= true;
3114 blend
.AlphaTestFunction
=
3115 intel_translate_compare_func(ctx
->Color
.AlphaFunc
);
3118 if (ctx
->Color
.DitherFlag
) {
3119 blend
.ColorDitherEnable
= true;
3124 for (int i
= 0; i
< nr_draw_buffers
; i
++) {
3125 struct GENX(BLEND_STATE_ENTRY
) entry
= { 0 };
3129 blend
.IndependentAlphaBlendEnable
=
3130 set_blend_entry_bits(brw
, &entry
, i
, blend
.AlphaToOneEnable
) ||
3131 blend
.IndependentAlphaBlendEnable
;
3133 /* See section 8.1.6 "Pre-Blend Color Clamping" of the
3134 * SandyBridge PRM Volume 2 Part 1 for HW requirements.
3136 * We do our ARB_color_buffer_float CLAMP_FRAGMENT_COLOR
3137 * clamping in the fragment shader. For its clamping of
3138 * blending, the spec says:
3140 * "RESOLVED: For fixed-point color buffers, the inputs and
3141 * the result of the blending equation are clamped. For
3142 * floating-point color buffers, no clamping occurs."
3144 * So, generally, we want clamping to the render target's range.
3145 * And, good news, the hardware tables for both pre- and
3146 * post-blend color clamping are either ignored, or any are
3147 * allowed, or clamping is required but RT range clamping is a
3150 entry
.PreBlendColorClampEnable
= true;
3151 entry
.PostBlendColorClampEnable
= true;
3152 entry
.ColorClampRange
= COLORCLAMP_RTFORMAT
;
3154 entry
.WriteDisableRed
= !GET_COLORMASK_BIT(ctx
->Color
.ColorMask
, i
, 0);
3155 entry
.WriteDisableGreen
= !GET_COLORMASK_BIT(ctx
->Color
.ColorMask
, i
, 1);
3156 entry
.WriteDisableBlue
= !GET_COLORMASK_BIT(ctx
->Color
.ColorMask
, i
, 2);
3157 entry
.WriteDisableAlpha
= !GET_COLORMASK_BIT(ctx
->Color
.ColorMask
, i
, 3);
3160 GENX(BLEND_STATE_ENTRY_pack
)(NULL
, &blend_map
[1 + i
* 2], &entry
);
3162 GENX(BLEND_STATE_ENTRY_pack
)(NULL
, &blend_map
[i
* 2], &entry
);
3168 GENX(BLEND_STATE_pack
)(NULL
, blend_map
, &blend
);
3172 brw_batch_emit(brw
, GENX(3DSTATE_CC_STATE_POINTERS
), ptr
) {
3173 ptr
.PointertoBLEND_STATE
= brw
->cc
.blend_state_offset
;
3174 ptr
.BLEND_STATEChange
= true;
3177 brw_batch_emit(brw
, GENX(3DSTATE_BLEND_STATE_POINTERS
), ptr
) {
3178 ptr
.BlendStatePointer
= brw
->cc
.blend_state_offset
;
3180 ptr
.BlendStatePointerValid
= true;
3186 static const struct brw_tracked_state
genX(blend_state
) = {
3188 .mesa
= _NEW_BUFFERS
|
3191 .brw
= BRW_NEW_BATCH
|
3193 BRW_NEW_STATE_BASE_ADDRESS
,
3195 .emit
= genX(upload_blend_state
),
3199 /* ---------------------------------------------------------------------- */
3202 UNUSED
static const uint32_t push_constant_opcodes
[] = {
3203 [MESA_SHADER_VERTEX
] = 21,
3204 [MESA_SHADER_TESS_CTRL
] = 25, /* HS */
3205 [MESA_SHADER_TESS_EVAL
] = 26, /* DS */
3206 [MESA_SHADER_GEOMETRY
] = 22,
3207 [MESA_SHADER_FRAGMENT
] = 23,
3208 [MESA_SHADER_COMPUTE
] = 0,
3212 genX(upload_push_constant_packets
)(struct brw_context
*brw
)
3214 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
3215 struct gl_context
*ctx
= &brw
->ctx
;
3217 UNUSED
uint32_t mocs
= GEN_GEN
< 8 ? GEN7_MOCS_L3
: 0;
3219 struct brw_stage_state
*stage_states
[] = {
3227 if (GEN_GEN
== 7 && !GEN_IS_HASWELL
&& !devinfo
->is_baytrail
&&
3228 stage_states
[MESA_SHADER_VERTEX
]->push_constants_dirty
)
3229 gen7_emit_vs_workaround_flush(brw
);
3231 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
3232 struct brw_stage_state
*stage_state
= stage_states
[stage
];
3233 UNUSED
struct gl_program
*prog
= ctx
->_Shader
->CurrentProgram
[stage
];
3235 if (!stage_state
->push_constants_dirty
)
3238 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_VS
), pkt
) {
3239 pkt
._3DCommandSubOpcode
= push_constant_opcodes
[stage
];
3240 if (stage_state
->prog_data
) {
3241 #if GEN_GEN >= 8 || GEN_IS_HASWELL
3242 /* The Skylake PRM contains the following restriction:
3244 * "The driver must ensure The following case does not occur
3245 * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
3246 * buffer 3 read length equal to zero committed followed by a
3247 * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
3250 * To avoid this, we program the buffers in the highest slots.
3251 * This way, slot 0 is only used if slot 3 is also used.
3255 for (int i
= 3; i
>= 0; i
--) {
3256 const struct brw_ubo_range
*range
=
3257 &stage_state
->prog_data
->ubo_ranges
[i
];
3259 if (range
->length
== 0)
3262 const struct gl_uniform_block
*block
=
3263 prog
->sh
.UniformBlocks
[range
->block
];
3264 const struct gl_buffer_binding
*binding
=
3265 &ctx
->UniformBufferBindings
[block
->Binding
];
3267 if (binding
->BufferObject
== ctx
->Shared
->NullBufferObj
) {
3268 static unsigned msg_id
= 0;
3269 _mesa_gl_debug(ctx
, &msg_id
, MESA_DEBUG_SOURCE_API
,
3270 MESA_DEBUG_TYPE_UNDEFINED
,
3271 MESA_DEBUG_SEVERITY_HIGH
,
3272 "UBO %d unbound, %s shader uniform data "
3273 "will be undefined.",
3275 _mesa_shader_stage_to_string(stage
));
3279 assert(binding
->Offset
% 32 == 0);
3281 struct brw_bo
*bo
= intel_bufferobj_buffer(brw
,
3282 intel_buffer_object(binding
->BufferObject
),
3283 binding
->Offset
, range
->length
* 32, false);
3285 pkt
.ConstantBody
.ReadLength
[n
] = range
->length
;
3286 pkt
.ConstantBody
.Buffer
[n
] =
3287 ro_bo(bo
, range
->start
* 32 + binding
->Offset
);
3291 if (stage_state
->push_const_size
> 0) {
3293 pkt
.ConstantBody
.ReadLength
[n
] = stage_state
->push_const_size
;
3294 pkt
.ConstantBody
.Buffer
[n
] =
3295 ro_bo(stage_state
->push_const_bo
,
3296 stage_state
->push_const_offset
);
3299 pkt
.ConstantBody
.ReadLength
[0] = stage_state
->push_const_size
;
3300 pkt
.ConstantBody
.Buffer
[0].offset
=
3301 stage_state
->push_const_offset
| mocs
;
3306 stage_state
->push_constants_dirty
= false;
3307 brw
->ctx
.NewDriverState
|= GEN_GEN
>= 9 ? BRW_NEW_SURFACES
: 0;
3311 const struct brw_tracked_state
genX(push_constant_packets
) = {
3314 .brw
= BRW_NEW_DRAW_CALL
,
3316 .emit
= genX(upload_push_constant_packets
),
3322 genX(upload_vs_push_constants
)(struct brw_context
*brw
)
3324 struct brw_stage_state
*stage_state
= &brw
->vs
.base
;
3326 /* BRW_NEW_VERTEX_PROGRAM */
3327 const struct gl_program
*vp
= brw
->programs
[MESA_SHADER_VERTEX
];
3328 /* BRW_NEW_VS_PROG_DATA */
3329 const struct brw_stage_prog_data
*prog_data
= brw
->vs
.base
.prog_data
;
3331 gen6_upload_push_constants(brw
, vp
, prog_data
, stage_state
);
3334 static const struct brw_tracked_state
genX(vs_push_constants
) = {
3336 .mesa
= _NEW_PROGRAM_CONSTANTS
|
3338 .brw
= BRW_NEW_BATCH
|
3340 BRW_NEW_VERTEX_PROGRAM
|
3341 BRW_NEW_VS_PROG_DATA
,
3343 .emit
= genX(upload_vs_push_constants
),
3347 genX(upload_gs_push_constants
)(struct brw_context
*brw
)
3349 struct brw_stage_state
*stage_state
= &brw
->gs
.base
;
3351 /* BRW_NEW_GEOMETRY_PROGRAM */
3352 const struct gl_program
*gp
= brw
->programs
[MESA_SHADER_GEOMETRY
];
3354 /* BRW_NEW_GS_PROG_DATA */
3355 struct brw_stage_prog_data
*prog_data
= brw
->gs
.base
.prog_data
;
3357 gen6_upload_push_constants(brw
, gp
, prog_data
, stage_state
);
3360 static const struct brw_tracked_state
genX(gs_push_constants
) = {
3362 .mesa
= _NEW_PROGRAM_CONSTANTS
|
3364 .brw
= BRW_NEW_BATCH
|
3366 BRW_NEW_GEOMETRY_PROGRAM
|
3367 BRW_NEW_GS_PROG_DATA
,
3369 .emit
= genX(upload_gs_push_constants
),
3373 genX(upload_wm_push_constants
)(struct brw_context
*brw
)
3375 struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
3376 /* BRW_NEW_FRAGMENT_PROGRAM */
3377 const struct gl_program
*fp
= brw
->programs
[MESA_SHADER_FRAGMENT
];
3378 /* BRW_NEW_FS_PROG_DATA */
3379 const struct brw_stage_prog_data
*prog_data
= brw
->wm
.base
.prog_data
;
3381 gen6_upload_push_constants(brw
, fp
, prog_data
, stage_state
);
3384 static const struct brw_tracked_state
genX(wm_push_constants
) = {
3386 .mesa
= _NEW_PROGRAM_CONSTANTS
,
3387 .brw
= BRW_NEW_BATCH
|
3389 BRW_NEW_FRAGMENT_PROGRAM
|
3390 BRW_NEW_FS_PROG_DATA
,
3392 .emit
= genX(upload_wm_push_constants
),
3396 /* ---------------------------------------------------------------------- */
3400 genX(determine_sample_mask
)(struct brw_context
*brw
)
3402 struct gl_context
*ctx
= &brw
->ctx
;
3403 float coverage
= 1.0f
;
3404 float coverage_invert
= false;
3405 unsigned sample_mask
= ~0u;
3407 /* BRW_NEW_NUM_SAMPLES */
3408 unsigned num_samples
= brw
->num_samples
;
3410 if (_mesa_is_multisample_enabled(ctx
)) {
3411 if (ctx
->Multisample
.SampleCoverage
) {
3412 coverage
= ctx
->Multisample
.SampleCoverageValue
;
3413 coverage_invert
= ctx
->Multisample
.SampleCoverageInvert
;
3415 if (ctx
->Multisample
.SampleMask
) {
3416 sample_mask
= ctx
->Multisample
.SampleMaskValue
;
3420 if (num_samples
> 1) {
3421 int coverage_int
= (int) (num_samples
* coverage
+ 0.5f
);
3422 uint32_t coverage_bits
= (1 << coverage_int
) - 1;
3423 if (coverage_invert
)
3424 coverage_bits
^= (1 << num_samples
) - 1;
3425 return coverage_bits
& sample_mask
;
3432 genX(emit_3dstate_multisample2
)(struct brw_context
*brw
,
3433 unsigned num_samples
)
3435 unsigned log2_samples
= ffs(num_samples
) - 1;
3437 brw_batch_emit(brw
, GENX(3DSTATE_MULTISAMPLE
), multi
) {
3438 multi
.PixelLocation
= CENTER
;
3439 multi
.NumberofMultisamples
= log2_samples
;
3441 GEN_SAMPLE_POS_4X(multi
.Sample
);
3443 switch (num_samples
) {
3445 GEN_SAMPLE_POS_1X(multi
.Sample
);
3448 GEN_SAMPLE_POS_2X(multi
.Sample
);
3451 GEN_SAMPLE_POS_4X(multi
.Sample
);
3454 GEN_SAMPLE_POS_8X(multi
.Sample
);
3464 genX(upload_multisample_state
)(struct brw_context
*brw
)
3466 assert(brw
->num_samples
> 0 && brw
->num_samples
<= 16);
3468 genX(emit_3dstate_multisample2
)(brw
, brw
->num_samples
);
3470 brw_batch_emit(brw
, GENX(3DSTATE_SAMPLE_MASK
), sm
) {
3471 sm
.SampleMask
= genX(determine_sample_mask
)(brw
);
3475 static const struct brw_tracked_state
genX(multisample_state
) = {
3477 .mesa
= _NEW_MULTISAMPLE
|
3478 (GEN_GEN
== 10 ? _NEW_BUFFERS
: 0),
3479 .brw
= BRW_NEW_BLORP
|
3481 BRW_NEW_NUM_SAMPLES
,
3483 .emit
= genX(upload_multisample_state
)
3487 /* ---------------------------------------------------------------------- */
3490 genX(upload_color_calc_state
)(struct brw_context
*brw
)
3492 struct gl_context
*ctx
= &brw
->ctx
;
3494 brw_state_emit(brw
, GENX(COLOR_CALC_STATE
), 64, &brw
->cc
.state_offset
, cc
) {
3496 cc
.IndependentAlphaBlendEnable
=
3497 set_blend_entry_bits(brw
, &cc
, 0, false);
3498 set_depth_stencil_bits(brw
, &cc
);
3500 if (ctx
->Color
.AlphaEnabled
&&
3501 ctx
->DrawBuffer
->_NumColorDrawBuffers
<= 1) {
3502 cc
.AlphaTestEnable
= true;
3503 cc
.AlphaTestFunction
=
3504 intel_translate_compare_func(ctx
->Color
.AlphaFunc
);
3507 cc
.ColorDitherEnable
= ctx
->Color
.DitherFlag
;
3509 cc
.StatisticsEnable
= brw
->stats_wm
;
3511 cc
.CCViewportStatePointer
=
3512 ro_bo(brw
->batch
.state
.bo
, brw
->cc
.vp_offset
);
3515 cc
.BlendConstantColorRed
= ctx
->Color
.BlendColorUnclamped
[0];
3516 cc
.BlendConstantColorGreen
= ctx
->Color
.BlendColorUnclamped
[1];
3517 cc
.BlendConstantColorBlue
= ctx
->Color
.BlendColorUnclamped
[2];
3518 cc
.BlendConstantColorAlpha
= ctx
->Color
.BlendColorUnclamped
[3];
3522 cc
.StencilReferenceValue
= _mesa_get_stencil_ref(ctx
, 0);
3523 cc
.BackfaceStencilReferenceValue
=
3524 _mesa_get_stencil_ref(ctx
, ctx
->Stencil
._BackFace
);
3530 UNCLAMPED_FLOAT_TO_UBYTE(cc
.AlphaReferenceValueAsUNORM8
,
3531 ctx
->Color
.AlphaRef
);
3535 brw_batch_emit(brw
, GENX(3DSTATE_CC_STATE_POINTERS
), ptr
) {
3536 ptr
.ColorCalcStatePointer
= brw
->cc
.state_offset
;
3538 ptr
.ColorCalcStatePointerValid
= true;
3542 brw
->ctx
.NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
3546 static const struct brw_tracked_state
genX(color_calc_state
) = {
3548 .mesa
= _NEW_COLOR
|
3550 (GEN_GEN
<= 5 ? _NEW_BUFFERS
|
3553 .brw
= BRW_NEW_BATCH
|
3555 (GEN_GEN
<= 5 ? BRW_NEW_CC_VP
|
3557 : BRW_NEW_CC_STATE
|
3558 BRW_NEW_STATE_BASE_ADDRESS
),
3560 .emit
= genX(upload_color_calc_state
),
3564 /* ---------------------------------------------------------------------- */
3568 genX(upload_sbe
)(struct brw_context
*brw
)
3570 struct gl_context
*ctx
= &brw
->ctx
;
3571 /* BRW_NEW_FRAGMENT_PROGRAM */
3572 UNUSED
const struct gl_program
*fp
= brw
->programs
[MESA_SHADER_FRAGMENT
];
3573 /* BRW_NEW_FS_PROG_DATA */
3574 const struct brw_wm_prog_data
*wm_prog_data
=
3575 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
3577 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) attr_overrides
[16] = { { 0 } };
3579 #define attr_overrides sbe.Attribute
3581 uint32_t urb_entry_read_length
;
3582 uint32_t urb_entry_read_offset
;
3583 uint32_t point_sprite_enables
;
3585 brw_batch_emit(brw
, GENX(3DSTATE_SBE
), sbe
) {
3586 sbe
.AttributeSwizzleEnable
= true;
3587 sbe
.NumberofSFOutputAttributes
= wm_prog_data
->num_varying_inputs
;
3590 bool flip_y
= ctx
->DrawBuffer
->FlipY
;
3594 * Window coordinates in an FBO are inverted, which means point
3595 * sprite origin must be inverted.
3597 if ((ctx
->Point
.SpriteOrigin
== GL_LOWER_LEFT
) == flip_y
)
3598 sbe
.PointSpriteTextureCoordinateOrigin
= LOWERLEFT
;
3600 sbe
.PointSpriteTextureCoordinateOrigin
= UPPERLEFT
;
3602 /* _NEW_POINT | _NEW_LIGHT | _NEW_PROGRAM,
3603 * BRW_NEW_FS_PROG_DATA | BRW_NEW_FRAGMENT_PROGRAM |
3604 * BRW_NEW_GS_PROG_DATA | BRW_NEW_PRIMITIVE | BRW_NEW_TES_PROG_DATA |
3605 * BRW_NEW_VUE_MAP_GEOM_OUT
3607 genX(calculate_attr_overrides
)(brw
,
3609 &point_sprite_enables
,
3610 &urb_entry_read_length
,
3611 &urb_entry_read_offset
);
3613 /* Typically, the URB entry read length and offset should be programmed
3614 * in 3DSTATE_VS and 3DSTATE_GS; SBE inherits it from the last active
3615 * stage which produces geometry. However, we don't know the proper
3616 * value until we call calculate_attr_overrides().
3618 * To fit with our existing code, we override the inherited values and
3619 * specify it here directly, as we did on previous generations.
3621 sbe
.VertexURBEntryReadLength
= urb_entry_read_length
;
3622 sbe
.VertexURBEntryReadOffset
= urb_entry_read_offset
;
3623 sbe
.PointSpriteTextureCoordinateEnable
= point_sprite_enables
;
3624 sbe
.ConstantInterpolationEnable
= wm_prog_data
->flat_inputs
;
3627 sbe
.ForceVertexURBEntryReadLength
= true;
3628 sbe
.ForceVertexURBEntryReadOffset
= true;
3632 /* prepare the active component dwords */
3633 for (int i
= 0; i
< 32; i
++)
3634 sbe
.AttributeActiveComponentFormat
[i
] = ACTIVE_COMPONENT_XYZW
;
3639 brw_batch_emit(brw
, GENX(3DSTATE_SBE_SWIZ
), sbes
) {
3640 for (int i
= 0; i
< 16; i
++)
3641 sbes
.Attribute
[i
] = attr_overrides
[i
];
3645 #undef attr_overrides
3648 static const struct brw_tracked_state
genX(sbe_state
) = {
3650 .mesa
= _NEW_BUFFERS
|
3655 .brw
= BRW_NEW_BLORP
|
3657 BRW_NEW_FRAGMENT_PROGRAM
|
3658 BRW_NEW_FS_PROG_DATA
|
3659 BRW_NEW_GS_PROG_DATA
|
3660 BRW_NEW_TES_PROG_DATA
|
3661 BRW_NEW_VUE_MAP_GEOM_OUT
|
3662 (GEN_GEN
== 7 ? BRW_NEW_PRIMITIVE
3665 .emit
= genX(upload_sbe
),
3669 /* ---------------------------------------------------------------------- */
3673 * Outputs the 3DSTATE_SO_DECL_LIST command.
3675 * The data output is a series of 64-bit entries containing a SO_DECL per
3676 * stream. We only have one stream of rendering coming out of the GS unit, so
3677 * we only emit stream 0 (low 16 bits) SO_DECLs.
3680 genX(upload_3dstate_so_decl_list
)(struct brw_context
*brw
,
3681 const struct brw_vue_map
*vue_map
)
3683 struct gl_context
*ctx
= &brw
->ctx
;
3684 /* BRW_NEW_TRANSFORM_FEEDBACK */
3685 struct gl_transform_feedback_object
*xfb_obj
=
3686 ctx
->TransformFeedback
.CurrentObject
;
3687 const struct gl_transform_feedback_info
*linked_xfb_info
=
3688 xfb_obj
->program
->sh
.LinkedTransformFeedback
;
3689 struct GENX(SO_DECL
) so_decl
[MAX_VERTEX_STREAMS
][128];
3690 int buffer_mask
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
3691 int next_offset
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
3692 int decls
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
3694 STATIC_ASSERT(ARRAY_SIZE(so_decl
[0]) >= MAX_PROGRAM_OUTPUTS
);
3696 memset(so_decl
, 0, sizeof(so_decl
));
3698 /* Construct the list of SO_DECLs to be emitted. The formatting of the
3699 * command feels strange -- each dword pair contains a SO_DECL per stream.
3701 for (unsigned i
= 0; i
< linked_xfb_info
->NumOutputs
; i
++) {
3702 const struct gl_transform_feedback_output
*output
=
3703 &linked_xfb_info
->Outputs
[i
];
3704 const int buffer
= output
->OutputBuffer
;
3705 const int varying
= output
->OutputRegister
;
3706 const unsigned stream_id
= output
->StreamId
;
3707 assert(stream_id
< MAX_VERTEX_STREAMS
);
3709 buffer_mask
[stream_id
] |= 1 << buffer
;
3711 assert(vue_map
->varying_to_slot
[varying
] >= 0);
3713 /* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
3714 * array. Instead, it simply increments DstOffset for the following
3715 * input by the number of components that should be skipped.
3717 * Our hardware is unusual in that it requires us to program SO_DECLs
3718 * for fake "hole" components, rather than simply taking the offset
3719 * for each real varying. Each hole can have size 1, 2, 3, or 4; we
3720 * program as many size = 4 holes as we can, then a final hole to
3721 * accommodate the final 1, 2, or 3 remaining.
3723 int skip_components
= output
->DstOffset
- next_offset
[buffer
];
3725 while (skip_components
> 0) {
3726 so_decl
[stream_id
][decls
[stream_id
]++] = (struct GENX(SO_DECL
)) {
3728 .OutputBufferSlot
= output
->OutputBuffer
,
3729 .ComponentMask
= (1 << MIN2(skip_components
, 4)) - 1,
3731 skip_components
-= 4;
3734 next_offset
[buffer
] = output
->DstOffset
+ output
->NumComponents
;
3736 so_decl
[stream_id
][decls
[stream_id
]++] = (struct GENX(SO_DECL
)) {
3737 .OutputBufferSlot
= output
->OutputBuffer
,
3738 .RegisterIndex
= vue_map
->varying_to_slot
[varying
],
3740 ((1 << output
->NumComponents
) - 1) << output
->ComponentOffset
,
3743 if (decls
[stream_id
] > max_decls
)
3744 max_decls
= decls
[stream_id
];
3748 dw
= brw_batch_emitn(brw
, GENX(3DSTATE_SO_DECL_LIST
), 3 + 2 * max_decls
,
3749 .StreamtoBufferSelects0
= buffer_mask
[0],
3750 .StreamtoBufferSelects1
= buffer_mask
[1],
3751 .StreamtoBufferSelects2
= buffer_mask
[2],
3752 .StreamtoBufferSelects3
= buffer_mask
[3],
3753 .NumEntries0
= decls
[0],
3754 .NumEntries1
= decls
[1],
3755 .NumEntries2
= decls
[2],
3756 .NumEntries3
= decls
[3]);
3758 for (int i
= 0; i
< max_decls
; i
++) {
3759 GENX(SO_DECL_ENTRY_pack
)(
3760 brw
, dw
+ 2 + i
* 2,
3761 &(struct GENX(SO_DECL_ENTRY
)) {
3762 .Stream0Decl
= so_decl
[0][i
],
3763 .Stream1Decl
= so_decl
[1][i
],
3764 .Stream2Decl
= so_decl
[2][i
],
3765 .Stream3Decl
= so_decl
[3][i
],
3771 genX(upload_3dstate_so_buffers
)(struct brw_context
*brw
)
3773 struct gl_context
*ctx
= &brw
->ctx
;
3774 /* BRW_NEW_TRANSFORM_FEEDBACK */
3775 struct gl_transform_feedback_object
*xfb_obj
=
3776 ctx
->TransformFeedback
.CurrentObject
;
3778 const struct gl_transform_feedback_info
*linked_xfb_info
=
3779 xfb_obj
->program
->sh
.LinkedTransformFeedback
;
3781 struct brw_transform_feedback_object
*brw_obj
=
3782 (struct brw_transform_feedback_object
*) xfb_obj
;
3783 uint32_t mocs_wb
= GEN_GEN
>= 9 ? SKL_MOCS_WB
: BDW_MOCS_WB
;
3786 /* Set up the up to 4 output buffers. These are the ranges defined in the
3787 * gl_transform_feedback_object.
3789 for (int i
= 0; i
< 4; i
++) {
3790 struct intel_buffer_object
*bufferobj
=
3791 intel_buffer_object(xfb_obj
->Buffers
[i
]);
3792 uint32_t start
= xfb_obj
->Offset
[i
];
3793 uint32_t end
= ALIGN(start
+ xfb_obj
->Size
[i
], 4);
3794 uint32_t const size
= end
- start
;
3796 if (!bufferobj
|| !size
) {
3797 brw_batch_emit(brw
, GENX(3DSTATE_SO_BUFFER
), sob
) {
3798 sob
.SOBufferIndex
= i
;
3803 assert(start
% 4 == 0);
3805 intel_bufferobj_buffer(brw
, bufferobj
, start
, size
, true);
3806 assert(end
<= bo
->size
);
3808 brw_batch_emit(brw
, GENX(3DSTATE_SO_BUFFER
), sob
) {
3809 sob
.SOBufferIndex
= i
;
3811 sob
.SurfaceBaseAddress
= rw_bo(bo
, start
);
3813 sob
.SurfacePitch
= linked_xfb_info
->Buffers
[i
].Stride
* 4;
3814 sob
.SurfaceEndAddress
= rw_bo(bo
, end
);
3816 sob
.SOBufferEnable
= true;
3817 sob
.StreamOffsetWriteEnable
= true;
3818 sob
.StreamOutputBufferOffsetAddressEnable
= true;
3819 sob
.SOBufferMOCS
= mocs_wb
;
3821 sob
.SurfaceSize
= MAX2(xfb_obj
->Size
[i
] / 4, 1) - 1;
3822 sob
.StreamOutputBufferOffsetAddress
=
3823 rw_bo(brw_obj
->offset_bo
, i
* sizeof(uint32_t));
3825 if (brw_obj
->zero_offsets
) {
3826 /* Zero out the offset and write that to offset_bo */
3827 sob
.StreamOffset
= 0;
3829 /* Use offset_bo as the "Stream Offset." */
3830 sob
.StreamOffset
= 0xFFFFFFFF;
3837 brw_obj
->zero_offsets
= false;
3842 query_active(struct gl_query_object
*q
)
3844 return q
&& q
->Active
;
3848 genX(upload_3dstate_streamout
)(struct brw_context
*brw
, bool active
,
3849 const struct brw_vue_map
*vue_map
)
3851 struct gl_context
*ctx
= &brw
->ctx
;
3852 /* BRW_NEW_TRANSFORM_FEEDBACK */
3853 struct gl_transform_feedback_object
*xfb_obj
=
3854 ctx
->TransformFeedback
.CurrentObject
;
3856 brw_batch_emit(brw
, GENX(3DSTATE_STREAMOUT
), sos
) {
3858 int urb_entry_read_offset
= 0;
3859 int urb_entry_read_length
= (vue_map
->num_slots
+ 1) / 2 -
3860 urb_entry_read_offset
;
3862 sos
.SOFunctionEnable
= true;
3863 sos
.SOStatisticsEnable
= true;
3865 /* BRW_NEW_RASTERIZER_DISCARD */
3866 if (ctx
->RasterDiscard
) {
3867 if (!query_active(ctx
->Query
.PrimitivesGenerated
[0])) {
3868 sos
.RenderingDisable
= true;
3870 perf_debug("Rasterizer discard with a GL_PRIMITIVES_GENERATED "
3871 "query active relies on the clipper.\n");
3876 if (ctx
->Light
.ProvokingVertex
!= GL_FIRST_VERTEX_CONVENTION
)
3877 sos
.ReorderMode
= TRAILING
;
3880 sos
.SOBufferEnable0
= xfb_obj
->Buffers
[0] != NULL
;
3881 sos
.SOBufferEnable1
= xfb_obj
->Buffers
[1] != NULL
;
3882 sos
.SOBufferEnable2
= xfb_obj
->Buffers
[2] != NULL
;
3883 sos
.SOBufferEnable3
= xfb_obj
->Buffers
[3] != NULL
;
3885 const struct gl_transform_feedback_info
*linked_xfb_info
=
3886 xfb_obj
->program
->sh
.LinkedTransformFeedback
;
3887 /* Set buffer pitches; 0 means unbound. */
3888 if (xfb_obj
->Buffers
[0])
3889 sos
.Buffer0SurfacePitch
= linked_xfb_info
->Buffers
[0].Stride
* 4;
3890 if (xfb_obj
->Buffers
[1])
3891 sos
.Buffer1SurfacePitch
= linked_xfb_info
->Buffers
[1].Stride
* 4;
3892 if (xfb_obj
->Buffers
[2])
3893 sos
.Buffer2SurfacePitch
= linked_xfb_info
->Buffers
[2].Stride
* 4;
3894 if (xfb_obj
->Buffers
[3])
3895 sos
.Buffer3SurfacePitch
= linked_xfb_info
->Buffers
[3].Stride
* 4;
3898 /* We always read the whole vertex. This could be reduced at some
3899 * point by reading less and offsetting the register index in the
3902 sos
.Stream0VertexReadOffset
= urb_entry_read_offset
;
3903 sos
.Stream0VertexReadLength
= urb_entry_read_length
- 1;
3904 sos
.Stream1VertexReadOffset
= urb_entry_read_offset
;
3905 sos
.Stream1VertexReadLength
= urb_entry_read_length
- 1;
3906 sos
.Stream2VertexReadOffset
= urb_entry_read_offset
;
3907 sos
.Stream2VertexReadLength
= urb_entry_read_length
- 1;
3908 sos
.Stream3VertexReadOffset
= urb_entry_read_offset
;
3909 sos
.Stream3VertexReadLength
= urb_entry_read_length
- 1;
3915 genX(upload_sol
)(struct brw_context
*brw
)
3917 struct gl_context
*ctx
= &brw
->ctx
;
3918 /* BRW_NEW_TRANSFORM_FEEDBACK */
3919 bool active
= _mesa_is_xfb_active_and_unpaused(ctx
);
3922 genX(upload_3dstate_so_buffers
)(brw
);
3924 /* BRW_NEW_VUE_MAP_GEOM_OUT */
3925 genX(upload_3dstate_so_decl_list
)(brw
, &brw
->vue_map_geom_out
);
3928 /* Finally, set up the SOL stage. This command must always follow updates to
3929 * the nonpipelined SOL state (3DSTATE_SO_BUFFER, 3DSTATE_SO_DECL_LIST) or
3930 * MMIO register updates (current performed by the kernel at each batch
3933 genX(upload_3dstate_streamout
)(brw
, active
, &brw
->vue_map_geom_out
);
3936 static const struct brw_tracked_state
genX(sol_state
) = {
3939 .brw
= BRW_NEW_BATCH
|
3941 BRW_NEW_RASTERIZER_DISCARD
|
3942 BRW_NEW_VUE_MAP_GEOM_OUT
|
3943 BRW_NEW_TRANSFORM_FEEDBACK
,
3945 .emit
= genX(upload_sol
),
3949 /* ---------------------------------------------------------------------- */
3953 genX(upload_ps
)(struct brw_context
*brw
)
3955 UNUSED
const struct gl_context
*ctx
= &brw
->ctx
;
3956 UNUSED
const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
3958 /* BRW_NEW_FS_PROG_DATA */
3959 const struct brw_wm_prog_data
*prog_data
=
3960 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
3961 const struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
3966 brw_batch_emit(brw
, GENX(3DSTATE_PS
), ps
) {
3967 /* Initialize the execution mask with VMask. Otherwise, derivatives are
3968 * incorrect for subspans where some of the pixels are unlit. We believe
3969 * the bit just didn't take effect in previous generations.
3971 ps
.VectorMaskEnable
= GEN_GEN
>= 8;
3974 DIV_ROUND_UP(CLAMP(stage_state
->sampler_count
, 0, 16), 4);
3976 /* BRW_NEW_FS_PROG_DATA */
3977 /* Gen 11 workarounds table #2056 WABTPPrefetchDisable suggests to disable
3978 * prefetching of binding tables in A0 and B0 steppings.
3979 * TODO: Revisit this workaround on C0 stepping.
3981 ps
.BindingTableEntryCount
= GEN_GEN
== 11 ?
3983 prog_data
->base
.binding_table
.size_bytes
/ 4;
3985 if (prog_data
->base
.use_alt_mode
)
3986 ps
.FloatingPointMode
= Alternate
;
3988 /* Haswell requires the sample mask to be set in this packet as well as
3989 * in 3DSTATE_SAMPLE_MASK; the values should match.
3992 /* _NEW_BUFFERS, _NEW_MULTISAMPLE */
3994 ps
.SampleMask
= genX(determine_sample_mask(brw
));
3997 /* 3DSTATE_PS expects the number of threads per PSD, which is always 64
3998 * for pre Gen11 and 128 for gen11+; On gen11+ If a programmed value is
3999 * k, it implies 2(k+1) threads. It implicitly scales for different GT
4000 * levels (which have some # of PSDs).
4002 * In Gen8 the format is U8-2 whereas in Gen9+ it is U9-1.
4005 ps
.MaximumNumberofThreadsPerPSD
= 64 - 1;
4007 ps
.MaximumNumberofThreadsPerPSD
= 64 - 2;
4009 ps
.MaximumNumberofThreads
= devinfo
->max_wm_threads
- 1;
4012 if (prog_data
->base
.nr_params
> 0 ||
4013 prog_data
->base
.ubo_ranges
[0].length
> 0)
4014 ps
.PushConstantEnable
= true;
4017 /* From the IVB PRM, volume 2 part 1, page 287:
4018 * "This bit is inserted in the PS payload header and made available to
4019 * the DataPort (either via the message header or via header bypass) to
4020 * indicate that oMask data (one or two phases) is included in Render
4021 * Target Write messages. If present, the oMask data is used to mask off
4024 ps
.oMaskPresenttoRenderTarget
= prog_data
->uses_omask
;
4026 /* The hardware wedges if you have this bit set but don't turn on any
4027 * dual source blend factors.
4029 * BRW_NEW_FS_PROG_DATA | _NEW_COLOR
4031 ps
.DualSourceBlendEnable
= prog_data
->dual_src_blend
&&
4032 (ctx
->Color
.BlendEnabled
& 1) &&
4033 ctx
->Color
.Blend
[0]._UsesDualSrc
;
4035 /* BRW_NEW_FS_PROG_DATA */
4036 ps
.AttributeEnable
= (prog_data
->num_varying_inputs
!= 0);
4039 /* From the documentation for this packet:
4040 * "If the PS kernel does not need the Position XY Offsets to
4041 * compute a Position Value, then this field should be programmed
4042 * to POSOFFSET_NONE."
4044 * "SW Recommendation: If the PS kernel needs the Position Offsets
4045 * to compute a Position XY value, this field should match Position
4046 * ZW Interpolation Mode to ensure a consistent position.xyzw
4049 * We only require XY sample offsets. So, this recommendation doesn't
4050 * look useful at the moment. We might need this in future.
4052 if (prog_data
->uses_pos_offset
)
4053 ps
.PositionXYOffsetSelect
= POSOFFSET_SAMPLE
;
4055 ps
.PositionXYOffsetSelect
= POSOFFSET_NONE
;
4057 ps
._8PixelDispatchEnable
= prog_data
->dispatch_8
;
4058 ps
._16PixelDispatchEnable
= prog_data
->dispatch_16
;
4059 ps
._32PixelDispatchEnable
= prog_data
->dispatch_32
;
4061 /* From the Sky Lake PRM 3DSTATE_PS::32 Pixel Dispatch Enable:
4063 * "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16, SIMD32
4064 * Dispatch must not be enabled for PER_PIXEL dispatch mode."
4066 * Since 16x MSAA is first introduced on SKL, we don't need to apply
4067 * the workaround on any older hardware.
4069 * BRW_NEW_NUM_SAMPLES
4071 if (GEN_GEN
>= 9 && !prog_data
->persample_dispatch
&&
4072 brw
->num_samples
== 16) {
4073 assert(ps
._8PixelDispatchEnable
|| ps
._16PixelDispatchEnable
);
4074 ps
._32PixelDispatchEnable
= false;
4077 ps
.DispatchGRFStartRegisterForConstantSetupData0
=
4078 brw_wm_prog_data_dispatch_grf_start_reg(prog_data
, ps
, 0);
4079 ps
.DispatchGRFStartRegisterForConstantSetupData1
=
4080 brw_wm_prog_data_dispatch_grf_start_reg(prog_data
, ps
, 1);
4081 ps
.DispatchGRFStartRegisterForConstantSetupData2
=
4082 brw_wm_prog_data_dispatch_grf_start_reg(prog_data
, ps
, 2);
4084 ps
.KernelStartPointer0
= stage_state
->prog_offset
+
4085 brw_wm_prog_data_prog_offset(prog_data
, ps
, 0);
4086 ps
.KernelStartPointer1
= stage_state
->prog_offset
+
4087 brw_wm_prog_data_prog_offset(prog_data
, ps
, 1);
4088 ps
.KernelStartPointer2
= stage_state
->prog_offset
+
4089 brw_wm_prog_data_prog_offset(prog_data
, ps
, 2);
4091 if (prog_data
->base
.total_scratch
) {
4092 ps
.ScratchSpaceBasePointer
=
4093 rw_32_bo(stage_state
->scratch_bo
,
4094 ffs(stage_state
->per_thread_scratch
) - 11);
4099 static const struct brw_tracked_state
genX(ps_state
) = {
4101 .mesa
= _NEW_MULTISAMPLE
|
4102 (GEN_GEN
< 8 ? _NEW_BUFFERS
|
4105 .brw
= BRW_NEW_BATCH
|
4107 BRW_NEW_FS_PROG_DATA
|
4108 (GEN_GEN
>= 9 ? BRW_NEW_NUM_SAMPLES
: 0),
4110 .emit
= genX(upload_ps
),
4114 /* ---------------------------------------------------------------------- */
4118 genX(upload_hs_state
)(struct brw_context
*brw
)
4120 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
4121 struct brw_stage_state
*stage_state
= &brw
->tcs
.base
;
4122 struct brw_stage_prog_data
*stage_prog_data
= stage_state
->prog_data
;
4123 const struct brw_vue_prog_data
*vue_prog_data
=
4124 brw_vue_prog_data(stage_prog_data
);
4126 /* BRW_NEW_TES_PROG_DATA */
4127 struct brw_tcs_prog_data
*tcs_prog_data
=
4128 brw_tcs_prog_data(stage_prog_data
);
4130 if (!tcs_prog_data
) {
4131 brw_batch_emit(brw
, GENX(3DSTATE_HS
), hs
);
4133 brw_batch_emit(brw
, GENX(3DSTATE_HS
), hs
) {
4134 INIT_THREAD_DISPATCH_FIELDS(hs
, Vertex
);
4136 hs
.InstanceCount
= tcs_prog_data
->instances
- 1;
4137 hs
.IncludeVertexHandles
= true;
4139 hs
.MaximumNumberofThreads
= devinfo
->max_tcs_threads
- 1;
4144 static const struct brw_tracked_state
genX(hs_state
) = {
4147 .brw
= BRW_NEW_BATCH
|
4149 BRW_NEW_TCS_PROG_DATA
|
4150 BRW_NEW_TESS_PROGRAMS
,
4152 .emit
= genX(upload_hs_state
),
4156 genX(upload_ds_state
)(struct brw_context
*brw
)
4158 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
4159 const struct brw_stage_state
*stage_state
= &brw
->tes
.base
;
4160 struct brw_stage_prog_data
*stage_prog_data
= stage_state
->prog_data
;
4162 /* BRW_NEW_TES_PROG_DATA */
4163 const struct brw_tes_prog_data
*tes_prog_data
=
4164 brw_tes_prog_data(stage_prog_data
);
4165 const struct brw_vue_prog_data
*vue_prog_data
=
4166 brw_vue_prog_data(stage_prog_data
);
4168 if (!tes_prog_data
) {
4169 brw_batch_emit(brw
, GENX(3DSTATE_DS
), ds
);
4171 assert(GEN_GEN
< 11 ||
4172 vue_prog_data
->dispatch_mode
== DISPATCH_MODE_SIMD8
);
4174 brw_batch_emit(brw
, GENX(3DSTATE_DS
), ds
) {
4175 INIT_THREAD_DISPATCH_FIELDS(ds
, Patch
);
4177 ds
.MaximumNumberofThreads
= devinfo
->max_tes_threads
- 1;
4178 ds
.ComputeWCoordinateEnable
=
4179 tes_prog_data
->domain
== BRW_TESS_DOMAIN_TRI
;
4182 if (vue_prog_data
->dispatch_mode
== DISPATCH_MODE_SIMD8
)
4183 ds
.DispatchMode
= DISPATCH_MODE_SIMD8_SINGLE_PATCH
;
4184 ds
.UserClipDistanceCullTestEnableBitmask
=
4185 vue_prog_data
->cull_distance_mask
;
4191 static const struct brw_tracked_state
genX(ds_state
) = {
4194 .brw
= BRW_NEW_BATCH
|
4196 BRW_NEW_TESS_PROGRAMS
|
4197 BRW_NEW_TES_PROG_DATA
,
4199 .emit
= genX(upload_ds_state
),
4202 /* ---------------------------------------------------------------------- */
4205 upload_te_state(struct brw_context
*brw
)
4207 /* BRW_NEW_TESS_PROGRAMS */
4208 bool active
= brw
->programs
[MESA_SHADER_TESS_EVAL
];
4210 /* BRW_NEW_TES_PROG_DATA */
4211 const struct brw_tes_prog_data
*tes_prog_data
=
4212 brw_tes_prog_data(brw
->tes
.base
.prog_data
);
4215 brw_batch_emit(brw
, GENX(3DSTATE_TE
), te
) {
4216 te
.Partitioning
= tes_prog_data
->partitioning
;
4217 te
.OutputTopology
= tes_prog_data
->output_topology
;
4218 te
.TEDomain
= tes_prog_data
->domain
;
4220 te
.MaximumTessellationFactorOdd
= 63.0;
4221 te
.MaximumTessellationFactorNotOdd
= 64.0;
4224 brw_batch_emit(brw
, GENX(3DSTATE_TE
), te
);
4228 static const struct brw_tracked_state
genX(te_state
) = {
4231 .brw
= BRW_NEW_BLORP
|
4233 BRW_NEW_TES_PROG_DATA
|
4234 BRW_NEW_TESS_PROGRAMS
,
4236 .emit
= upload_te_state
,
4239 /* ---------------------------------------------------------------------- */
4242 genX(upload_tes_push_constants
)(struct brw_context
*brw
)
4244 struct brw_stage_state
*stage_state
= &brw
->tes
.base
;
4245 /* BRW_NEW_TESS_PROGRAMS */
4246 const struct gl_program
*tep
= brw
->programs
[MESA_SHADER_TESS_EVAL
];
4248 /* BRW_NEW_TES_PROG_DATA */
4249 const struct brw_stage_prog_data
*prog_data
= brw
->tes
.base
.prog_data
;
4250 gen6_upload_push_constants(brw
, tep
, prog_data
, stage_state
);
4253 static const struct brw_tracked_state
genX(tes_push_constants
) = {
4255 .mesa
= _NEW_PROGRAM_CONSTANTS
,
4256 .brw
= BRW_NEW_BATCH
|
4258 BRW_NEW_TESS_PROGRAMS
|
4259 BRW_NEW_TES_PROG_DATA
,
4261 .emit
= genX(upload_tes_push_constants
),
4265 genX(upload_tcs_push_constants
)(struct brw_context
*brw
)
4267 struct brw_stage_state
*stage_state
= &brw
->tcs
.base
;
4268 /* BRW_NEW_TESS_PROGRAMS */
4269 const struct gl_program
*tcp
= brw
->programs
[MESA_SHADER_TESS_CTRL
];
4271 /* BRW_NEW_TCS_PROG_DATA */
4272 const struct brw_stage_prog_data
*prog_data
= brw
->tcs
.base
.prog_data
;
4274 gen6_upload_push_constants(brw
, tcp
, prog_data
, stage_state
);
4277 static const struct brw_tracked_state
genX(tcs_push_constants
) = {
4279 .mesa
= _NEW_PROGRAM_CONSTANTS
,
4280 .brw
= BRW_NEW_BATCH
|
4282 BRW_NEW_DEFAULT_TESS_LEVELS
|
4283 BRW_NEW_TESS_PROGRAMS
|
4284 BRW_NEW_TCS_PROG_DATA
,
4286 .emit
= genX(upload_tcs_push_constants
),
4291 /* ---------------------------------------------------------------------- */
4295 genX(upload_cs_push_constants
)(struct brw_context
*brw
)
4297 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
4299 /* BRW_NEW_COMPUTE_PROGRAM */
4300 const struct gl_program
*cp
= brw
->programs
[MESA_SHADER_COMPUTE
];
4303 /* BRW_NEW_CS_PROG_DATA */
4304 struct brw_cs_prog_data
*cs_prog_data
=
4305 brw_cs_prog_data(brw
->cs
.base
.prog_data
);
4307 _mesa_shader_write_subroutine_indices(&brw
->ctx
, MESA_SHADER_COMPUTE
);
4308 brw_upload_cs_push_constants(brw
, cp
, cs_prog_data
, stage_state
);
4312 const struct brw_tracked_state
genX(cs_push_constants
) = {
4314 .mesa
= _NEW_PROGRAM_CONSTANTS
,
4315 .brw
= BRW_NEW_BATCH
|
4317 BRW_NEW_COMPUTE_PROGRAM
|
4318 BRW_NEW_CS_PROG_DATA
,
4320 .emit
= genX(upload_cs_push_constants
),
4324 * Creates a new CS constant buffer reflecting the current CS program's
4325 * constants, if needed by the CS program.
4328 genX(upload_cs_pull_constants
)(struct brw_context
*brw
)
4330 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
4332 /* BRW_NEW_COMPUTE_PROGRAM */
4333 struct brw_program
*cp
=
4334 (struct brw_program
*) brw
->programs
[MESA_SHADER_COMPUTE
];
4336 /* BRW_NEW_CS_PROG_DATA */
4337 const struct brw_stage_prog_data
*prog_data
= brw
->cs
.base
.prog_data
;
4339 _mesa_shader_write_subroutine_indices(&brw
->ctx
, MESA_SHADER_COMPUTE
);
4340 /* _NEW_PROGRAM_CONSTANTS */
4341 brw_upload_pull_constants(brw
, BRW_NEW_SURFACES
, &cp
->program
,
4342 stage_state
, prog_data
);
4345 const struct brw_tracked_state
genX(cs_pull_constants
) = {
4347 .mesa
= _NEW_PROGRAM_CONSTANTS
,
4348 .brw
= BRW_NEW_BATCH
|
4350 BRW_NEW_COMPUTE_PROGRAM
|
4351 BRW_NEW_CS_PROG_DATA
,
4353 .emit
= genX(upload_cs_pull_constants
),
4357 genX(upload_cs_state
)(struct brw_context
*brw
)
4359 if (!brw
->cs
.base
.prog_data
)
4363 uint32_t *desc
= (uint32_t*) brw_state_batch(
4364 brw
, GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t), 64,
4367 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
4368 struct brw_stage_prog_data
*prog_data
= stage_state
->prog_data
;
4369 struct brw_cs_prog_data
*cs_prog_data
= brw_cs_prog_data(prog_data
);
4370 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
4372 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
4373 brw_emit_buffer_surface_state(
4374 brw
, &stage_state
->surf_offset
[
4375 prog_data
->binding_table
.shader_time_start
],
4376 brw
->shader_time
.bo
, 0, ISL_FORMAT_RAW
,
4377 brw
->shader_time
.bo
->size
, 1,
4381 uint32_t *bind
= brw_state_batch(brw
, prog_data
->binding_table
.size_bytes
,
4382 32, &stage_state
->bind_bo_offset
);
4384 /* The MEDIA_VFE_STATE documentation for Gen8+ says:
4386 * "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
4387 * the only bits that are changed are scoreboard related: Scoreboard
4388 * Enable, Scoreboard Type, Scoreboard Mask, Scoreboard * Delta. For
4389 * these scoreboard related states, a MEDIA_STATE_FLUSH is sufficient."
4391 * Earlier generations say "MI_FLUSH" instead of "stalling PIPE_CONTROL",
4392 * but MI_FLUSH isn't really a thing, so we assume they meant PIPE_CONTROL.
4394 brw_emit_pipe_control_flush(brw
, PIPE_CONTROL_CS_STALL
);
4396 brw_batch_emit(brw
, GENX(MEDIA_VFE_STATE
), vfe
) {
4397 if (prog_data
->total_scratch
) {
4398 uint32_t per_thread_scratch_value
;
4401 /* Broadwell's Per Thread Scratch Space is in the range [0, 11]
4402 * where 0 = 1k, 1 = 2k, 2 = 4k, ..., 11 = 2M.
4404 per_thread_scratch_value
= ffs(stage_state
->per_thread_scratch
) - 11;
4405 } else if (GEN_IS_HASWELL
) {
4406 /* Haswell's Per Thread Scratch Space is in the range [0, 10]
4407 * where 0 = 2k, 1 = 4k, 2 = 8k, ..., 10 = 2M.
4409 per_thread_scratch_value
= ffs(stage_state
->per_thread_scratch
) - 12;
4411 /* Earlier platforms use the range [0, 11] to mean [1kB, 12kB]
4412 * where 0 = 1kB, 1 = 2kB, 2 = 3kB, ..., 11 = 12kB.
4414 per_thread_scratch_value
= stage_state
->per_thread_scratch
/ 1024 - 1;
4416 vfe
.ScratchSpaceBasePointer
= rw_32_bo(stage_state
->scratch_bo
, 0);
4417 vfe
.PerThreadScratchSpace
= per_thread_scratch_value
;
4420 /* If brw->screen->subslice_total is greater than one, then
4421 * devinfo->max_cs_threads stores number of threads per sub-slice;
4422 * thus we need to multiply by that number by subslices to get
4423 * the actual maximum number of threads; the -1 is because the HW
4424 * has a bias of 1 (would not make sense to say the maximum number
4427 const uint32_t subslices
= MAX2(brw
->screen
->subslice_total
, 1);
4428 vfe
.MaximumNumberofThreads
= devinfo
->max_cs_threads
* subslices
- 1;
4429 vfe
.NumberofURBEntries
= GEN_GEN
>= 8 ? 2 : 0;
4431 vfe
.ResetGatewayTimer
=
4432 Resettingrelativetimerandlatchingtheglobaltimestamp
;
4435 vfe
.BypassGatewayControl
= BypassingOpenGatewayCloseGatewayprotocol
;
4441 /* We are uploading duplicated copies of push constant uniforms for each
4442 * thread. Although the local id data needs to vary per thread, it won't
4443 * change for other uniform data. Unfortunately this duplication is
4444 * required for gen7. As of Haswell, this duplication can be avoided,
4445 * but this older mechanism with duplicated data continues to work.
4447 * FINISHME: As of Haswell, we could make use of the
4448 * INTERFACE_DESCRIPTOR_DATA "Cross-Thread Constant Data Read Length"
4449 * field to only store one copy of uniform data.
4451 * FINISHME: Broadwell adds a new alternative "Indirect Payload Storage"
4452 * which is described in the GPGPU_WALKER command and in the Broadwell
4453 * PRM Volume 7: 3D Media GPGPU, under Media GPGPU Pipeline => Mode of
4454 * Operations => GPGPU Mode => Indirect Payload Storage.
4456 * Note: The constant data is built in brw_upload_cs_push_constants
4459 vfe
.URBEntryAllocationSize
= GEN_GEN
>= 8 ? 2 : 0;
4461 const uint32_t vfe_curbe_allocation
=
4462 ALIGN(cs_prog_data
->push
.per_thread
.regs
* cs_prog_data
->threads
+
4463 cs_prog_data
->push
.cross_thread
.regs
, 2);
4464 vfe
.CURBEAllocationSize
= vfe_curbe_allocation
;
4467 if (cs_prog_data
->push
.total
.size
> 0) {
4468 brw_batch_emit(brw
, GENX(MEDIA_CURBE_LOAD
), curbe
) {
4469 curbe
.CURBETotalDataLength
=
4470 ALIGN(cs_prog_data
->push
.total
.size
, 64);
4471 curbe
.CURBEDataStartAddress
= stage_state
->push_const_offset
;
4475 /* BRW_NEW_SURFACES and BRW_NEW_*_CONSTBUF */
4476 memcpy(bind
, stage_state
->surf_offset
,
4477 prog_data
->binding_table
.size_bytes
);
4478 const struct GENX(INTERFACE_DESCRIPTOR_DATA
) idd
= {
4479 .KernelStartPointer
= brw
->cs
.base
.prog_offset
,
4480 .SamplerStatePointer
= stage_state
->sampler_offset
,
4481 .SamplerCount
= DIV_ROUND_UP(CLAMP(stage_state
->sampler_count
, 0, 16), 4),
4482 .BindingTablePointer
= stage_state
->bind_bo_offset
,
4483 .ConstantURBEntryReadLength
= cs_prog_data
->push
.per_thread
.regs
,
4484 .NumberofThreadsinGPGPUThreadGroup
= cs_prog_data
->threads
,
4485 .SharedLocalMemorySize
= encode_slm_size(GEN_GEN
,
4486 prog_data
->total_shared
),
4487 .BarrierEnable
= cs_prog_data
->uses_barrier
,
4488 #if GEN_GEN >= 8 || GEN_IS_HASWELL
4489 .CrossThreadConstantDataReadLength
=
4490 cs_prog_data
->push
.cross_thread
.regs
,
4494 GENX(INTERFACE_DESCRIPTOR_DATA_pack
)(brw
, desc
, &idd
);
4496 brw_batch_emit(brw
, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
), load
) {
4497 load
.InterfaceDescriptorTotalLength
=
4498 GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
4499 load
.InterfaceDescriptorDataStartAddress
= offset
;
4503 static const struct brw_tracked_state
genX(cs_state
) = {
4505 .mesa
= _NEW_PROGRAM_CONSTANTS
,
4506 .brw
= BRW_NEW_BATCH
|
4508 BRW_NEW_CS_PROG_DATA
|
4509 BRW_NEW_SAMPLER_STATE_TABLE
|
4512 .emit
= genX(upload_cs_state
)
4517 /* ---------------------------------------------------------------------- */
4521 genX(upload_raster
)(struct brw_context
*brw
)
4523 const struct gl_context
*ctx
= &brw
->ctx
;
4526 const bool flip_y
= ctx
->DrawBuffer
->FlipY
;
4529 const struct gl_polygon_attrib
*polygon
= &ctx
->Polygon
;
4532 const struct gl_point_attrib
*point
= &ctx
->Point
;
4534 brw_batch_emit(brw
, GENX(3DSTATE_RASTER
), raster
) {
4535 if (brw
->polygon_front_bit
!= flip_y
)
4536 raster
.FrontWinding
= CounterClockwise
;
4538 if (polygon
->CullFlag
) {
4539 switch (polygon
->CullFaceMode
) {
4541 raster
.CullMode
= CULLMODE_FRONT
;
4544 raster
.CullMode
= CULLMODE_BACK
;
4546 case GL_FRONT_AND_BACK
:
4547 raster
.CullMode
= CULLMODE_BOTH
;
4550 unreachable("not reached");
4553 raster
.CullMode
= CULLMODE_NONE
;
4556 raster
.SmoothPointEnable
= point
->SmoothFlag
;
4558 raster
.DXMultisampleRasterizationEnable
=
4559 _mesa_is_multisample_enabled(ctx
);
4561 raster
.GlobalDepthOffsetEnableSolid
= polygon
->OffsetFill
;
4562 raster
.GlobalDepthOffsetEnableWireframe
= polygon
->OffsetLine
;
4563 raster
.GlobalDepthOffsetEnablePoint
= polygon
->OffsetPoint
;
4565 switch (polygon
->FrontMode
) {
4567 raster
.FrontFaceFillMode
= FILL_MODE_SOLID
;
4570 raster
.FrontFaceFillMode
= FILL_MODE_WIREFRAME
;
4573 raster
.FrontFaceFillMode
= FILL_MODE_POINT
;
4576 unreachable("not reached");
4579 switch (polygon
->BackMode
) {
4581 raster
.BackFaceFillMode
= FILL_MODE_SOLID
;
4584 raster
.BackFaceFillMode
= FILL_MODE_WIREFRAME
;
4587 raster
.BackFaceFillMode
= FILL_MODE_POINT
;
4590 unreachable("not reached");
4594 raster
.AntialiasingEnable
= ctx
->Line
.SmoothFlag
;
4598 * Antialiasing Enable bit MUST not be set when NUM_MULTISAMPLES > 1.
4600 const bool multisampled_fbo
=
4601 _mesa_geometric_samples(ctx
->DrawBuffer
) > 1;
4602 if (multisampled_fbo
)
4603 raster
.AntialiasingEnable
= false;
4607 raster
.ScissorRectangleEnable
= ctx
->Scissor
.EnableFlags
;
4609 /* _NEW_TRANSFORM */
4610 if (!(ctx
->Transform
.DepthClampNear
&&
4611 ctx
->Transform
.DepthClampFar
)) {
4613 raster
.ViewportZFarClipTestEnable
= true;
4614 raster
.ViewportZNearClipTestEnable
= true;
4616 raster
.ViewportZClipTestEnable
= true;
4620 /* BRW_NEW_CONSERVATIVE_RASTERIZATION */
4622 raster
.ConservativeRasterizationEnable
=
4623 ctx
->IntelConservativeRasterization
;
4626 raster
.GlobalDepthOffsetClamp
= polygon
->OffsetClamp
;
4627 raster
.GlobalDepthOffsetScale
= polygon
->OffsetFactor
;
4629 raster
.GlobalDepthOffsetConstant
= polygon
->OffsetUnits
* 2;
4633 static const struct brw_tracked_state
genX(raster_state
) = {
4635 .mesa
= _NEW_BUFFERS
|
4642 .brw
= BRW_NEW_BLORP
|
4644 BRW_NEW_CONSERVATIVE_RASTERIZATION
,
4646 .emit
= genX(upload_raster
),
4650 /* ---------------------------------------------------------------------- */
4654 genX(upload_ps_extra
)(struct brw_context
*brw
)
4656 UNUSED
struct gl_context
*ctx
= &brw
->ctx
;
4658 const struct brw_wm_prog_data
*prog_data
=
4659 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
4661 brw_batch_emit(brw
, GENX(3DSTATE_PS_EXTRA
), psx
) {
4662 psx
.PixelShaderValid
= true;
4663 psx
.PixelShaderComputedDepthMode
= prog_data
->computed_depth_mode
;
4664 psx
.PixelShaderKillsPixel
= prog_data
->uses_kill
;
4665 psx
.AttributeEnable
= prog_data
->num_varying_inputs
!= 0;
4666 psx
.PixelShaderUsesSourceDepth
= prog_data
->uses_src_depth
;
4667 psx
.PixelShaderUsesSourceW
= prog_data
->uses_src_w
;
4668 psx
.PixelShaderIsPerSample
= prog_data
->persample_dispatch
;
4670 /* _NEW_MULTISAMPLE | BRW_NEW_CONSERVATIVE_RASTERIZATION */
4671 if (prog_data
->uses_sample_mask
) {
4673 if (prog_data
->post_depth_coverage
)
4674 psx
.InputCoverageMaskState
= ICMS_DEPTH_COVERAGE
;
4675 else if (prog_data
->inner_coverage
&& ctx
->IntelConservativeRasterization
)
4676 psx
.InputCoverageMaskState
= ICMS_INNER_CONSERVATIVE
;
4678 psx
.InputCoverageMaskState
= ICMS_NORMAL
;
4680 psx
.PixelShaderUsesInputCoverageMask
= true;
4684 psx
.oMaskPresenttoRenderTarget
= prog_data
->uses_omask
;
4686 psx
.PixelShaderPullsBary
= prog_data
->pulls_bary
;
4687 psx
.PixelShaderComputesStencil
= prog_data
->computed_stencil
;
4690 /* The stricter cross-primitive coherency guarantees that the hardware
4691 * gives us with the "Accesses UAV" bit set for at least one shader stage
4692 * and the "UAV coherency required" bit set on the 3DPRIMITIVE command
4693 * are redundant within the current image, atomic counter and SSBO GL
4694 * APIs, which all have very loose ordering and coherency requirements
4695 * and generally rely on the application to insert explicit barriers when
4696 * a shader invocation is expected to see the memory writes performed by
4697 * the invocations of some previous primitive. Regardless of the value
4698 * of "UAV coherency required", the "Accesses UAV" bits will implicitly
4699 * cause an in most cases useless DC flush when the lowermost stage with
4700 * the bit set finishes execution.
4702 * It would be nice to disable it, but in some cases we can't because on
4703 * Gen8+ it also has an influence on rasterization via the PS UAV-only
4704 * signal (which could be set independently from the coherency mechanism
4705 * in the 3DSTATE_WM command on Gen7), and because in some cases it will
4706 * determine whether the hardware skips execution of the fragment shader
4707 * or not via the ThreadDispatchEnable signal. However if we know that
4708 * GEN8_PS_BLEND_HAS_WRITEABLE_RT is going to be set and
4709 * GEN8_PSX_PIXEL_SHADER_NO_RT_WRITE is not set it shouldn't make any
4710 * difference so we may just disable it here.
4712 * Gen8 hardware tries to compute ThreadDispatchEnable for us but doesn't
4713 * take into account KillPixels when no depth or stencil writes are
4714 * enabled. In order for occlusion queries to work correctly with no
4715 * attachments, we need to force-enable here.
4717 * BRW_NEW_FS_PROG_DATA | BRW_NEW_FRAGMENT_PROGRAM | _NEW_BUFFERS |
4720 if ((prog_data
->has_side_effects
|| prog_data
->uses_kill
) &&
4721 !brw_color_buffer_write_enabled(brw
))
4722 psx
.PixelShaderHasUAV
= true;
4726 const struct brw_tracked_state
genX(ps_extra
) = {
4728 .mesa
= _NEW_BUFFERS
| _NEW_COLOR
,
4729 .brw
= BRW_NEW_BLORP
|
4731 BRW_NEW_FRAGMENT_PROGRAM
|
4732 BRW_NEW_FS_PROG_DATA
|
4733 BRW_NEW_CONSERVATIVE_RASTERIZATION
,
4735 .emit
= genX(upload_ps_extra
),
4739 /* ---------------------------------------------------------------------- */
4743 genX(upload_ps_blend
)(struct brw_context
*brw
)
4745 struct gl_context
*ctx
= &brw
->ctx
;
4748 struct gl_renderbuffer
*rb
= ctx
->DrawBuffer
->_ColorDrawBuffers
[0];
4749 const bool buffer0_is_integer
= ctx
->DrawBuffer
->_IntegerBuffers
& 0x1;
4752 struct gl_colorbuffer_attrib
*color
= &ctx
->Color
;
4754 brw_batch_emit(brw
, GENX(3DSTATE_PS_BLEND
), pb
) {
4755 /* BRW_NEW_FRAGMENT_PROGRAM | _NEW_BUFFERS | _NEW_COLOR */
4756 pb
.HasWriteableRT
= brw_color_buffer_write_enabled(brw
);
4758 bool alpha_to_one
= false;
4760 if (!buffer0_is_integer
) {
4761 /* _NEW_MULTISAMPLE */
4763 if (_mesa_is_multisample_enabled(ctx
)) {
4764 pb
.AlphaToCoverageEnable
= ctx
->Multisample
.SampleAlphaToCoverage
;
4765 alpha_to_one
= ctx
->Multisample
.SampleAlphaToOne
;
4768 pb
.AlphaTestEnable
= color
->AlphaEnabled
;
4771 /* Used for implementing the following bit of GL_EXT_texture_integer:
4772 * "Per-fragment operations that require floating-point color
4773 * components, including multisample alpha operations, alpha test,
4774 * blending, and dithering, have no effect when the corresponding
4775 * colors are written to an integer color buffer."
4777 * The OpenGL specification 3.3 (page 196), section 4.1.3 says:
4778 * "If drawbuffer zero is not NONE and the buffer it references has an
4779 * integer format, the SAMPLE_ALPHA_TO_COVERAGE and SAMPLE_ALPHA_TO_ONE
4780 * operations are skipped."
4782 if (rb
&& !buffer0_is_integer
&& (color
->BlendEnabled
& 1)) {
4783 GLenum eqRGB
= color
->Blend
[0].EquationRGB
;
4784 GLenum eqA
= color
->Blend
[0].EquationA
;
4785 GLenum srcRGB
= color
->Blend
[0].SrcRGB
;
4786 GLenum dstRGB
= color
->Blend
[0].DstRGB
;
4787 GLenum srcA
= color
->Blend
[0].SrcA
;
4788 GLenum dstA
= color
->Blend
[0].DstA
;
4790 if (eqRGB
== GL_MIN
|| eqRGB
== GL_MAX
)
4791 srcRGB
= dstRGB
= GL_ONE
;
4793 if (eqA
== GL_MIN
|| eqA
== GL_MAX
)
4794 srcA
= dstA
= GL_ONE
;
4796 /* Due to hardware limitations, the destination may have information
4797 * in an alpha channel even when the format specifies no alpha
4798 * channel. In order to avoid getting any incorrect blending due to
4799 * that alpha channel, coerce the blend factors to values that will
4800 * not read the alpha channel, but will instead use the correct
4801 * implicit value for alpha.
4803 if (!_mesa_base_format_has_channel(rb
->_BaseFormat
,
4804 GL_TEXTURE_ALPHA_TYPE
)) {
4805 srcRGB
= brw_fix_xRGB_alpha(srcRGB
);
4806 srcA
= brw_fix_xRGB_alpha(srcA
);
4807 dstRGB
= brw_fix_xRGB_alpha(dstRGB
);
4808 dstA
= brw_fix_xRGB_alpha(dstA
);
4811 /* Alpha to One doesn't work with Dual Color Blending. Override
4812 * SRC1_ALPHA to ONE and ONE_MINUS_SRC1_ALPHA to ZERO.
4814 if (alpha_to_one
&& color
->Blend
[0]._UsesDualSrc
) {
4815 srcRGB
= fix_dual_blend_alpha_to_one(srcRGB
);
4816 srcA
= fix_dual_blend_alpha_to_one(srcA
);
4817 dstRGB
= fix_dual_blend_alpha_to_one(dstRGB
);
4818 dstA
= fix_dual_blend_alpha_to_one(dstA
);
4821 pb
.ColorBufferBlendEnable
= true;
4822 pb
.SourceAlphaBlendFactor
= brw_translate_blend_factor(srcA
);
4823 pb
.DestinationAlphaBlendFactor
= brw_translate_blend_factor(dstA
);
4824 pb
.SourceBlendFactor
= brw_translate_blend_factor(srcRGB
);
4825 pb
.DestinationBlendFactor
= brw_translate_blend_factor(dstRGB
);
4827 pb
.IndependentAlphaBlendEnable
=
4828 srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
;
4833 static const struct brw_tracked_state
genX(ps_blend
) = {
4835 .mesa
= _NEW_BUFFERS
|
4838 .brw
= BRW_NEW_BLORP
|
4840 BRW_NEW_FRAGMENT_PROGRAM
,
4842 .emit
= genX(upload_ps_blend
)
4846 /* ---------------------------------------------------------------------- */
4850 genX(emit_vf_topology
)(struct brw_context
*brw
)
4852 brw_batch_emit(brw
, GENX(3DSTATE_VF_TOPOLOGY
), vftopo
) {
4853 vftopo
.PrimitiveTopologyType
= brw
->primitive
;
4857 static const struct brw_tracked_state
genX(vf_topology
) = {
4860 .brw
= BRW_NEW_BLORP
|
4863 .emit
= genX(emit_vf_topology
),
4867 /* ---------------------------------------------------------------------- */
4871 genX(emit_mi_report_perf_count
)(struct brw_context
*brw
,
4873 uint32_t offset_in_bytes
,
4876 brw_batch_emit(brw
, GENX(MI_REPORT_PERF_COUNT
), mi_rpc
) {
4877 mi_rpc
.MemoryAddress
= ggtt_bo(bo
, offset_in_bytes
);
4878 mi_rpc
.ReportID
= report_id
;
4883 /* ---------------------------------------------------------------------- */
4886 * Emit a 3DSTATE_SAMPLER_STATE_POINTERS_{VS,HS,GS,DS,PS} packet.
4889 genX(emit_sampler_state_pointers_xs
)(MAYBE_UNUSED
struct brw_context
*brw
,
4890 MAYBE_UNUSED
struct brw_stage_state
*stage_state
)
4893 static const uint16_t packet_headers
[] = {
4894 [MESA_SHADER_VERTEX
] = 43,
4895 [MESA_SHADER_TESS_CTRL
] = 44,
4896 [MESA_SHADER_TESS_EVAL
] = 45,
4897 [MESA_SHADER_GEOMETRY
] = 46,
4898 [MESA_SHADER_FRAGMENT
] = 47,
4901 /* Ivybridge requires a workaround flush before VS packets. */
4902 if (GEN_GEN
== 7 && !GEN_IS_HASWELL
&&
4903 stage_state
->stage
== MESA_SHADER_VERTEX
) {
4904 gen7_emit_vs_workaround_flush(brw
);
4907 brw_batch_emit(brw
, GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS
), ptr
) {
4908 ptr
._3DCommandSubOpcode
= packet_headers
[stage_state
->stage
];
4909 ptr
.PointertoVSSamplerState
= stage_state
->sampler_offset
;
4915 has_component(mesa_format format
, int i
)
4917 if (_mesa_is_format_color_format(format
))
4918 return _mesa_format_has_color_component(format
, i
);
4920 /* depth and stencil have only one component */
4925 * Upload SAMPLER_BORDER_COLOR_STATE.
4928 genX(upload_default_color
)(struct brw_context
*brw
,
4929 const struct gl_sampler_object
*sampler
,
4930 MAYBE_UNUSED mesa_format format
, GLenum base_format
,
4931 bool is_integer_format
, bool is_stencil_sampling
,
4932 uint32_t *sdc_offset
)
4934 union gl_color_union color
;
4936 switch (base_format
) {
4937 case GL_DEPTH_COMPONENT
:
4938 /* GL specs that border color for depth textures is taken from the
4939 * R channel, while the hardware uses A. Spam R into all the
4940 * channels for safety.
4942 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4943 color
.ui
[1] = sampler
->BorderColor
.ui
[0];
4944 color
.ui
[2] = sampler
->BorderColor
.ui
[0];
4945 color
.ui
[3] = sampler
->BorderColor
.ui
[0];
4951 color
.ui
[3] = sampler
->BorderColor
.ui
[3];
4954 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4955 color
.ui
[1] = sampler
->BorderColor
.ui
[0];
4956 color
.ui
[2] = sampler
->BorderColor
.ui
[0];
4957 color
.ui
[3] = sampler
->BorderColor
.ui
[0];
4960 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4961 color
.ui
[1] = sampler
->BorderColor
.ui
[0];
4962 color
.ui
[2] = sampler
->BorderColor
.ui
[0];
4963 color
.ui
[3] = float_as_int(1.0);
4965 case GL_LUMINANCE_ALPHA
:
4966 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4967 color
.ui
[1] = sampler
->BorderColor
.ui
[0];
4968 color
.ui
[2] = sampler
->BorderColor
.ui
[0];
4969 color
.ui
[3] = sampler
->BorderColor
.ui
[3];
4972 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4973 color
.ui
[1] = sampler
->BorderColor
.ui
[1];
4974 color
.ui
[2] = sampler
->BorderColor
.ui
[2];
4975 color
.ui
[3] = sampler
->BorderColor
.ui
[3];
4979 /* In some cases we use an RGBA surface format for GL RGB textures,
4980 * where we've initialized the A channel to 1.0. We also have to set
4981 * the border color alpha to 1.0 in that case.
4983 if (base_format
== GL_RGB
)
4984 color
.ui
[3] = float_as_int(1.0);
4989 } else if (GEN_IS_HASWELL
&& (is_integer_format
|| is_stencil_sampling
)) {
4993 uint32_t *sdc
= brw_state_batch(
4994 brw
, GENX(SAMPLER_BORDER_COLOR_STATE_length
) * sizeof(uint32_t),
4995 alignment
, sdc_offset
);
4997 struct GENX(SAMPLER_BORDER_COLOR_STATE
) state
= { 0 };
4999 #define ASSIGN(dst, src) \
5004 #define ASSIGNu16(dst, src) \
5006 dst = (uint16_t)src; \
5009 #define ASSIGNu8(dst, src) \
5011 dst = (uint8_t)src; \
5014 #define BORDER_COLOR_ATTR(macro, _color_type, src) \
5015 macro(state.BorderColor ## _color_type ## Red, src[0]); \
5016 macro(state.BorderColor ## _color_type ## Green, src[1]); \
5017 macro(state.BorderColor ## _color_type ## Blue, src[2]); \
5018 macro(state.BorderColor ## _color_type ## Alpha, src[3]);
5021 /* On Broadwell, the border color is represented as four 32-bit floats,
5022 * integers, or unsigned values, interpreted according to the surface
5023 * format. This matches the sampler->BorderColor union exactly; just
5024 * memcpy the values.
5026 BORDER_COLOR_ATTR(ASSIGN
, 32bit
, color
.ui
);
5027 #elif GEN_IS_HASWELL
5028 if (is_integer_format
|| is_stencil_sampling
) {
5029 bool stencil
= format
== MESA_FORMAT_S_UINT8
|| is_stencil_sampling
;
5030 const int bits_per_channel
=
5031 _mesa_get_format_bits(format
, stencil
? GL_STENCIL_BITS
: GL_RED_BITS
);
5033 /* From the Haswell PRM, "Command Reference: Structures", Page 36:
5034 * "If any color channel is missing from the surface format,
5035 * corresponding border color should be programmed as zero and if
5036 * alpha channel is missing, corresponding Alpha border color should
5037 * be programmed as 1."
5039 unsigned c
[4] = { 0, 0, 0, 1 };
5040 for (int i
= 0; i
< 4; i
++) {
5041 if (has_component(format
, i
))
5045 switch (bits_per_channel
) {
5047 /* Copy RGBA in order. */
5048 BORDER_COLOR_ATTR(ASSIGNu8
, 8bit
, c
);
5051 /* R10G10B10A2_UINT is treated like a 16-bit format. */
5053 BORDER_COLOR_ATTR(ASSIGNu16
, 16bit
, c
);
5056 if (base_format
== GL_RG
) {
5057 /* Careful inspection of the tables reveals that for RG32 formats,
5058 * the green channel needs to go where blue normally belongs.
5060 state
.BorderColor32bitRed
= c
[0];
5061 state
.BorderColor32bitBlue
= c
[1];
5062 state
.BorderColor32bitAlpha
= 1;
5064 /* Copy RGBA in order. */
5065 BORDER_COLOR_ATTR(ASSIGN
, 32bit
, c
);
5069 assert(!"Invalid number of bits per channel in integer format.");
5073 BORDER_COLOR_ATTR(ASSIGN
, Float
, color
.f
);
5075 #elif GEN_GEN == 5 || GEN_GEN == 6
5076 BORDER_COLOR_ATTR(UNCLAMPED_FLOAT_TO_UBYTE
, Unorm
, color
.f
);
5077 BORDER_COLOR_ATTR(UNCLAMPED_FLOAT_TO_USHORT
, Unorm16
, color
.f
);
5078 BORDER_COLOR_ATTR(UNCLAMPED_FLOAT_TO_SHORT
, Snorm16
, color
.f
);
5080 #define MESA_FLOAT_TO_HALF(dst, src) \
5081 dst = _mesa_float_to_half(src);
5083 BORDER_COLOR_ATTR(MESA_FLOAT_TO_HALF
, Float16
, color
.f
);
5085 #undef MESA_FLOAT_TO_HALF
5087 state
.BorderColorSnorm8Red
= state
.BorderColorSnorm16Red
>> 8;
5088 state
.BorderColorSnorm8Green
= state
.BorderColorSnorm16Green
>> 8;
5089 state
.BorderColorSnorm8Blue
= state
.BorderColorSnorm16Blue
>> 8;
5090 state
.BorderColorSnorm8Alpha
= state
.BorderColorSnorm16Alpha
>> 8;
5092 BORDER_COLOR_ATTR(ASSIGN
, Float
, color
.f
);
5094 BORDER_COLOR_ATTR(ASSIGN
, , color
.f
);
5096 BORDER_COLOR_ATTR(ASSIGN
, Float
, color
.f
);
5100 #undef BORDER_COLOR_ATTR
5102 GENX(SAMPLER_BORDER_COLOR_STATE_pack
)(brw
, sdc
, &state
);
5106 translate_wrap_mode(GLenum wrap
, MAYBE_UNUSED
bool using_nearest
)
5113 /* GL_CLAMP is the weird mode where coordinates are clamped to
5114 * [0.0, 1.0], so linear filtering of coordinates outside of
5115 * [0.0, 1.0] give you half edge texel value and half border
5118 * Gen8+ supports this natively.
5120 return TCM_HALF_BORDER
;
5122 /* On Gen4-7.5, we clamp the coordinates in the fragment shader
5123 * and set clamp_border here, which gets the result desired.
5124 * We just use clamp(_to_edge) for nearest, because for nearest
5125 * clamping to 1.0 gives border color instead of the desired
5131 return TCM_CLAMP_BORDER
;
5133 case GL_CLAMP_TO_EDGE
:
5135 case GL_CLAMP_TO_BORDER
:
5136 return TCM_CLAMP_BORDER
;
5137 case GL_MIRRORED_REPEAT
:
5139 case GL_MIRROR_CLAMP_TO_EDGE
:
5140 return TCM_MIRROR_ONCE
;
5147 * Return true if the given wrap mode requires the border color to exist.
5150 wrap_mode_needs_border_color(unsigned wrap_mode
)
5153 return wrap_mode
== TCM_CLAMP_BORDER
||
5154 wrap_mode
== TCM_HALF_BORDER
;
5156 return wrap_mode
== TCM_CLAMP_BORDER
;
5161 * Sets the sampler state for a single unit based off of the sampler key
5165 genX(update_sampler_state
)(struct brw_context
*brw
,
5166 GLenum target
, bool tex_cube_map_seamless
,
5167 GLfloat tex_unit_lod_bias
,
5168 mesa_format format
, GLenum base_format
,
5169 const struct gl_texture_object
*texObj
,
5170 const struct gl_sampler_object
*sampler
,
5171 uint32_t *sampler_state
)
5173 struct GENX(SAMPLER_STATE
) samp_st
= { 0 };
5175 /* Select min and mip filters. */
5176 switch (sampler
->MinFilter
) {
5178 samp_st
.MinModeFilter
= MAPFILTER_NEAREST
;
5179 samp_st
.MipModeFilter
= MIPFILTER_NONE
;
5182 samp_st
.MinModeFilter
= MAPFILTER_LINEAR
;
5183 samp_st
.MipModeFilter
= MIPFILTER_NONE
;
5185 case GL_NEAREST_MIPMAP_NEAREST
:
5186 samp_st
.MinModeFilter
= MAPFILTER_NEAREST
;
5187 samp_st
.MipModeFilter
= MIPFILTER_NEAREST
;
5189 case GL_LINEAR_MIPMAP_NEAREST
:
5190 samp_st
.MinModeFilter
= MAPFILTER_LINEAR
;
5191 samp_st
.MipModeFilter
= MIPFILTER_NEAREST
;
5193 case GL_NEAREST_MIPMAP_LINEAR
:
5194 samp_st
.MinModeFilter
= MAPFILTER_NEAREST
;
5195 samp_st
.MipModeFilter
= MIPFILTER_LINEAR
;
5197 case GL_LINEAR_MIPMAP_LINEAR
:
5198 samp_st
.MinModeFilter
= MAPFILTER_LINEAR
;
5199 samp_st
.MipModeFilter
= MIPFILTER_LINEAR
;
5202 unreachable("not reached");
5205 /* Select mag filter. */
5206 samp_st
.MagModeFilter
= sampler
->MagFilter
== GL_LINEAR
?
5207 MAPFILTER_LINEAR
: MAPFILTER_NEAREST
;
5209 /* Enable anisotropic filtering if desired. */
5210 samp_st
.MaximumAnisotropy
= RATIO21
;
5212 if (sampler
->MaxAnisotropy
> 1.0f
) {
5213 if (samp_st
.MinModeFilter
== MAPFILTER_LINEAR
)
5214 samp_st
.MinModeFilter
= MAPFILTER_ANISOTROPIC
;
5215 if (samp_st
.MagModeFilter
== MAPFILTER_LINEAR
)
5216 samp_st
.MagModeFilter
= MAPFILTER_ANISOTROPIC
;
5218 if (sampler
->MaxAnisotropy
> 2.0f
) {
5219 samp_st
.MaximumAnisotropy
=
5220 MIN2((sampler
->MaxAnisotropy
- 2) / 2, RATIO161
);
5224 /* Set address rounding bits if not using nearest filtering. */
5225 if (samp_st
.MinModeFilter
!= MAPFILTER_NEAREST
) {
5226 samp_st
.UAddressMinFilterRoundingEnable
= true;
5227 samp_st
.VAddressMinFilterRoundingEnable
= true;
5228 samp_st
.RAddressMinFilterRoundingEnable
= true;
5231 if (samp_st
.MagModeFilter
!= MAPFILTER_NEAREST
) {
5232 samp_st
.UAddressMagFilterRoundingEnable
= true;
5233 samp_st
.VAddressMagFilterRoundingEnable
= true;
5234 samp_st
.RAddressMagFilterRoundingEnable
= true;
5237 bool either_nearest
=
5238 sampler
->MinFilter
== GL_NEAREST
|| sampler
->MagFilter
== GL_NEAREST
;
5239 unsigned wrap_s
= translate_wrap_mode(sampler
->WrapS
, either_nearest
);
5240 unsigned wrap_t
= translate_wrap_mode(sampler
->WrapT
, either_nearest
);
5241 unsigned wrap_r
= translate_wrap_mode(sampler
->WrapR
, either_nearest
);
5243 if (target
== GL_TEXTURE_CUBE_MAP
||
5244 target
== GL_TEXTURE_CUBE_MAP_ARRAY
) {
5245 /* Cube maps must use the same wrap mode for all three coordinate
5246 * dimensions. Prior to Haswell, only CUBE and CLAMP are valid.
5248 * Ivybridge and Baytrail seem to have problems with CUBE mode and
5249 * integer formats. Fall back to CLAMP for now.
5251 if ((tex_cube_map_seamless
|| sampler
->CubeMapSeamless
) &&
5252 !(GEN_GEN
== 7 && !GEN_IS_HASWELL
&& texObj
->_IsIntegerFormat
)) {
5261 } else if (target
== GL_TEXTURE_1D
) {
5262 /* There's a bug in 1D texture sampling - it actually pays
5263 * attention to the wrap_t value, though it should not.
5264 * Override the wrap_t value here to GL_REPEAT to keep
5265 * any nonexistent border pixels from floating in.
5270 samp_st
.TCXAddressControlMode
= wrap_s
;
5271 samp_st
.TCYAddressControlMode
= wrap_t
;
5272 samp_st
.TCZAddressControlMode
= wrap_r
;
5274 samp_st
.ShadowFunction
=
5275 sampler
->CompareMode
== GL_COMPARE_R_TO_TEXTURE_ARB
?
5276 intel_translate_shadow_compare_func(sampler
->CompareFunc
) : 0;
5279 /* Set shadow function. */
5280 samp_st
.AnisotropicAlgorithm
=
5281 samp_st
.MinModeFilter
== MAPFILTER_ANISOTROPIC
?
5282 EWAApproximation
: LEGACY
;
5286 samp_st
.NonnormalizedCoordinateEnable
= target
== GL_TEXTURE_RECTANGLE
;
5289 const float hw_max_lod
= GEN_GEN
>= 7 ? 14 : 13;
5290 samp_st
.MinLOD
= CLAMP(sampler
->MinLod
, 0, hw_max_lod
);
5291 samp_st
.MaxLOD
= CLAMP(sampler
->MaxLod
, 0, hw_max_lod
);
5292 samp_st
.TextureLODBias
=
5293 CLAMP(tex_unit_lod_bias
+ sampler
->LodBias
, -16, 15);
5296 samp_st
.BaseMipLevel
=
5297 CLAMP(texObj
->MinLevel
+ texObj
->BaseLevel
, 0, hw_max_lod
);
5298 samp_st
.MinandMagStateNotEqual
=
5299 samp_st
.MinModeFilter
!= samp_st
.MagModeFilter
;
5302 /* Upload the border color if necessary. If not, just point it at
5303 * offset 0 (the start of the batch) - the color should be ignored,
5304 * but that address won't fault in case something reads it anyway.
5306 uint32_t border_color_offset
= 0;
5307 if (wrap_mode_needs_border_color(wrap_s
) ||
5308 wrap_mode_needs_border_color(wrap_t
) ||
5309 wrap_mode_needs_border_color(wrap_r
)) {
5310 genX(upload_default_color
)(brw
, sampler
, format
, base_format
,
5311 texObj
->_IsIntegerFormat
,
5312 texObj
->StencilSampling
,
5313 &border_color_offset
);
5316 samp_st
.BorderColorPointer
=
5317 ro_bo(brw
->batch
.state
.bo
, border_color_offset
);
5319 samp_st
.BorderColorPointer
= border_color_offset
;
5323 samp_st
.LODPreClampMode
= CLAMP_MODE_OGL
;
5325 samp_st
.LODPreClampEnable
= true;
5328 GENX(SAMPLER_STATE_pack
)(brw
, sampler_state
, &samp_st
);
5332 update_sampler_state(struct brw_context
*brw
,
5334 uint32_t *sampler_state
)
5336 struct gl_context
*ctx
= &brw
->ctx
;
5337 const struct gl_texture_unit
*texUnit
= &ctx
->Texture
.Unit
[unit
];
5338 const struct gl_texture_object
*texObj
= texUnit
->_Current
;
5339 const struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, unit
);
5341 /* These don't use samplers at all. */
5342 if (texObj
->Target
== GL_TEXTURE_BUFFER
)
5345 struct gl_texture_image
*firstImage
= texObj
->Image
[0][texObj
->BaseLevel
];
5346 genX(update_sampler_state
)(brw
, texObj
->Target
,
5347 ctx
->Texture
.CubeMapSeamless
,
5349 firstImage
->TexFormat
, firstImage
->_BaseFormat
,
5355 genX(upload_sampler_state_table
)(struct brw_context
*brw
,
5356 struct gl_program
*prog
,
5357 struct brw_stage_state
*stage_state
)
5359 struct gl_context
*ctx
= &brw
->ctx
;
5360 uint32_t sampler_count
= stage_state
->sampler_count
;
5362 GLbitfield SamplersUsed
= prog
->SamplersUsed
;
5364 if (sampler_count
== 0)
5367 /* SAMPLER_STATE is 4 DWords on all platforms. */
5368 const int dwords
= GENX(SAMPLER_STATE_length
);
5369 const int size_in_bytes
= dwords
* sizeof(uint32_t);
5371 uint32_t *sampler_state
= brw_state_batch(brw
,
5372 sampler_count
* size_in_bytes
,
5373 32, &stage_state
->sampler_offset
);
5374 /* memset(sampler_state, 0, sampler_count * size_in_bytes); */
5376 for (unsigned s
= 0; s
< sampler_count
; s
++) {
5377 if (SamplersUsed
& (1 << s
)) {
5378 const unsigned unit
= prog
->SamplerUnits
[s
];
5379 if (ctx
->Texture
.Unit
[unit
]._Current
) {
5380 update_sampler_state(brw
, unit
, sampler_state
);
5384 sampler_state
+= dwords
;
5387 if (GEN_GEN
>= 7 && stage_state
->stage
!= MESA_SHADER_COMPUTE
) {
5388 /* Emit a 3DSTATE_SAMPLER_STATE_POINTERS_XS packet. */
5389 genX(emit_sampler_state_pointers_xs
)(brw
, stage_state
);
5391 /* Flag that the sampler state table pointer has changed; later atoms
5394 brw
->ctx
.NewDriverState
|= BRW_NEW_SAMPLER_STATE_TABLE
;
5399 genX(upload_fs_samplers
)(struct brw_context
*brw
)
5401 /* BRW_NEW_FRAGMENT_PROGRAM */
5402 struct gl_program
*fs
= brw
->programs
[MESA_SHADER_FRAGMENT
];
5403 genX(upload_sampler_state_table
)(brw
, fs
, &brw
->wm
.base
);
5406 static const struct brw_tracked_state
genX(fs_samplers
) = {
5408 .mesa
= _NEW_TEXTURE
,
5409 .brw
= BRW_NEW_BATCH
|
5411 BRW_NEW_FRAGMENT_PROGRAM
,
5413 .emit
= genX(upload_fs_samplers
),
5417 genX(upload_vs_samplers
)(struct brw_context
*brw
)
5419 /* BRW_NEW_VERTEX_PROGRAM */
5420 struct gl_program
*vs
= brw
->programs
[MESA_SHADER_VERTEX
];
5421 genX(upload_sampler_state_table
)(brw
, vs
, &brw
->vs
.base
);
5424 static const struct brw_tracked_state
genX(vs_samplers
) = {
5426 .mesa
= _NEW_TEXTURE
,
5427 .brw
= BRW_NEW_BATCH
|
5429 BRW_NEW_VERTEX_PROGRAM
,
5431 .emit
= genX(upload_vs_samplers
),
5436 genX(upload_gs_samplers
)(struct brw_context
*brw
)
5438 /* BRW_NEW_GEOMETRY_PROGRAM */
5439 struct gl_program
*gs
= brw
->programs
[MESA_SHADER_GEOMETRY
];
5443 genX(upload_sampler_state_table
)(brw
, gs
, &brw
->gs
.base
);
5447 static const struct brw_tracked_state
genX(gs_samplers
) = {
5449 .mesa
= _NEW_TEXTURE
,
5450 .brw
= BRW_NEW_BATCH
|
5452 BRW_NEW_GEOMETRY_PROGRAM
,
5454 .emit
= genX(upload_gs_samplers
),
5460 genX(upload_tcs_samplers
)(struct brw_context
*brw
)
5462 /* BRW_NEW_TESS_PROGRAMS */
5463 struct gl_program
*tcs
= brw
->programs
[MESA_SHADER_TESS_CTRL
];
5467 genX(upload_sampler_state_table
)(brw
, tcs
, &brw
->tcs
.base
);
5470 static const struct brw_tracked_state
genX(tcs_samplers
) = {
5472 .mesa
= _NEW_TEXTURE
,
5473 .brw
= BRW_NEW_BATCH
|
5475 BRW_NEW_TESS_PROGRAMS
,
5477 .emit
= genX(upload_tcs_samplers
),
5483 genX(upload_tes_samplers
)(struct brw_context
*brw
)
5485 /* BRW_NEW_TESS_PROGRAMS */
5486 struct gl_program
*tes
= brw
->programs
[MESA_SHADER_TESS_EVAL
];
5490 genX(upload_sampler_state_table
)(brw
, tes
, &brw
->tes
.base
);
5493 static const struct brw_tracked_state
genX(tes_samplers
) = {
5495 .mesa
= _NEW_TEXTURE
,
5496 .brw
= BRW_NEW_BATCH
|
5498 BRW_NEW_TESS_PROGRAMS
,
5500 .emit
= genX(upload_tes_samplers
),
5506 genX(upload_cs_samplers
)(struct brw_context
*brw
)
5508 /* BRW_NEW_COMPUTE_PROGRAM */
5509 struct gl_program
*cs
= brw
->programs
[MESA_SHADER_COMPUTE
];
5513 genX(upload_sampler_state_table
)(brw
, cs
, &brw
->cs
.base
);
5516 const struct brw_tracked_state
genX(cs_samplers
) = {
5518 .mesa
= _NEW_TEXTURE
,
5519 .brw
= BRW_NEW_BATCH
|
5521 BRW_NEW_COMPUTE_PROGRAM
,
5523 .emit
= genX(upload_cs_samplers
),
5527 /* ---------------------------------------------------------------------- */
5531 static void genX(upload_blend_constant_color
)(struct brw_context
*brw
)
5533 struct gl_context
*ctx
= &brw
->ctx
;
5535 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_COLOR
), blend_cc
) {
5536 blend_cc
.BlendConstantColorRed
= ctx
->Color
.BlendColorUnclamped
[0];
5537 blend_cc
.BlendConstantColorGreen
= ctx
->Color
.BlendColorUnclamped
[1];
5538 blend_cc
.BlendConstantColorBlue
= ctx
->Color
.BlendColorUnclamped
[2];
5539 blend_cc
.BlendConstantColorAlpha
= ctx
->Color
.BlendColorUnclamped
[3];
5543 static const struct brw_tracked_state
genX(blend_constant_color
) = {
5546 .brw
= BRW_NEW_CONTEXT
|
5549 .emit
= genX(upload_blend_constant_color
)
5553 /* ---------------------------------------------------------------------- */
5556 genX(init_atoms
)(struct brw_context
*brw
)
5559 static const struct brw_tracked_state
*render_atoms
[] =
5561 /* Once all the programs are done, we know how large urb entry
5562 * sizes need to be and can decide if we need to change the urb
5566 &brw_recalculate_urb_fence
,
5569 &genX(color_calc_state
),
5571 /* Surface state setup. Must come before the VS/WM unit. The binding
5572 * table upload must be last.
5574 &brw_vs_pull_constants
,
5575 &brw_wm_pull_constants
,
5576 &brw_renderbuffer_surfaces
,
5577 &brw_renderbuffer_read_surfaces
,
5578 &brw_texture_surfaces
,
5579 &brw_vs_binding_table
,
5580 &brw_wm_binding_table
,
5585 /* These set up state for brw_psp_urb_cbs */
5587 &genX(sf_clip_viewport
),
5589 &genX(vs_state
), /* always required, enabled or not */
5595 &brw_binding_table_pointers
,
5596 &genX(blend_constant_color
),
5600 &genX(polygon_stipple
),
5601 &genX(polygon_stipple_offset
),
5603 &genX(line_stipple
),
5607 &genX(drawing_rect
),
5608 &brw_indices
, /* must come before brw_vertices */
5609 &genX(index_buffer
),
5612 &brw_constant_buffer
5615 static const struct brw_tracked_state
*render_atoms
[] =
5617 &genX(sf_clip_viewport
),
5619 /* Command packets: */
5624 &genX(blend_state
), /* must do before cc unit */
5625 &genX(color_calc_state
), /* must do before cc unit */
5626 &genX(depth_stencil_state
), /* must do before cc unit */
5628 &genX(vs_push_constants
), /* Before vs_state */
5629 &genX(gs_push_constants
), /* Before gs_state */
5630 &genX(wm_push_constants
), /* Before wm_state */
5632 /* Surface state setup. Must come before the VS/WM unit. The binding
5633 * table upload must be last.
5635 &brw_vs_pull_constants
,
5636 &brw_vs_ubo_surfaces
,
5637 &brw_gs_pull_constants
,
5638 &brw_gs_ubo_surfaces
,
5639 &brw_wm_pull_constants
,
5640 &brw_wm_ubo_surfaces
,
5641 &gen6_renderbuffer_surfaces
,
5642 &brw_renderbuffer_read_surfaces
,
5643 &brw_texture_surfaces
,
5645 &brw_vs_binding_table
,
5646 &gen6_gs_binding_table
,
5647 &brw_wm_binding_table
,
5652 &gen6_sampler_state
,
5653 &genX(multisample_state
),
5661 &genX(scissor_state
),
5663 &gen6_binding_table_pointers
,
5667 &genX(polygon_stipple
),
5668 &genX(polygon_stipple_offset
),
5670 &genX(line_stipple
),
5672 &genX(drawing_rect
),
5674 &brw_indices
, /* must come before brw_vertices */
5675 &genX(index_buffer
),
5679 static const struct brw_tracked_state
*render_atoms
[] =
5681 /* Command packets: */
5684 &genX(sf_clip_viewport
),
5687 &gen7_push_constant_space
,
5689 &genX(blend_state
), /* must do before cc unit */
5690 &genX(color_calc_state
), /* must do before cc unit */
5691 &genX(depth_stencil_state
), /* must do before cc unit */
5693 &brw_vs_image_surfaces
, /* Before vs push/pull constants and binding table */
5694 &brw_tcs_image_surfaces
, /* Before tcs push/pull constants and binding table */
5695 &brw_tes_image_surfaces
, /* Before tes push/pull constants and binding table */
5696 &brw_gs_image_surfaces
, /* Before gs push/pull constants and binding table */
5697 &brw_wm_image_surfaces
, /* Before wm push/pull constants and binding table */
5699 &genX(vs_push_constants
), /* Before vs_state */
5700 &genX(tcs_push_constants
),
5701 &genX(tes_push_constants
),
5702 &genX(gs_push_constants
), /* Before gs_state */
5703 &genX(wm_push_constants
), /* Before wm_surfaces and constant_buffer */
5705 /* Surface state setup. Must come before the VS/WM unit. The binding
5706 * table upload must be last.
5708 &brw_vs_pull_constants
,
5709 &brw_vs_ubo_surfaces
,
5710 &brw_tcs_pull_constants
,
5711 &brw_tcs_ubo_surfaces
,
5712 &brw_tes_pull_constants
,
5713 &brw_tes_ubo_surfaces
,
5714 &brw_gs_pull_constants
,
5715 &brw_gs_ubo_surfaces
,
5716 &brw_wm_pull_constants
,
5717 &brw_wm_ubo_surfaces
,
5718 &gen6_renderbuffer_surfaces
,
5719 &brw_renderbuffer_read_surfaces
,
5720 &brw_texture_surfaces
,
5722 &genX(push_constant_packets
),
5724 &brw_vs_binding_table
,
5725 &brw_tcs_binding_table
,
5726 &brw_tes_binding_table
,
5727 &brw_gs_binding_table
,
5728 &brw_wm_binding_table
,
5732 &genX(tcs_samplers
),
5733 &genX(tes_samplers
),
5735 &genX(multisample_state
),
5749 &genX(scissor_state
),
5753 &genX(polygon_stipple
),
5754 &genX(polygon_stipple_offset
),
5756 &genX(line_stipple
),
5758 &genX(drawing_rect
),
5760 &brw_indices
, /* must come before brw_vertices */
5761 &genX(index_buffer
),
5769 static const struct brw_tracked_state
*render_atoms
[] =
5772 &genX(sf_clip_viewport
),
5775 &gen7_push_constant_space
,
5778 &genX(color_calc_state
),
5780 &brw_vs_image_surfaces
, /* Before vs push/pull constants and binding table */
5781 &brw_tcs_image_surfaces
, /* Before tcs push/pull constants and binding table */
5782 &brw_tes_image_surfaces
, /* Before tes push/pull constants and binding table */
5783 &brw_gs_image_surfaces
, /* Before gs push/pull constants and binding table */
5784 &brw_wm_image_surfaces
, /* Before wm push/pull constants and binding table */
5786 &genX(vs_push_constants
), /* Before vs_state */
5787 &genX(tcs_push_constants
),
5788 &genX(tes_push_constants
),
5789 &genX(gs_push_constants
), /* Before gs_state */
5790 &genX(wm_push_constants
), /* Before wm_surfaces and constant_buffer */
5792 /* Surface state setup. Must come before the VS/WM unit. The binding
5793 * table upload must be last.
5795 &brw_vs_pull_constants
,
5796 &brw_vs_ubo_surfaces
,
5797 &brw_tcs_pull_constants
,
5798 &brw_tcs_ubo_surfaces
,
5799 &brw_tes_pull_constants
,
5800 &brw_tes_ubo_surfaces
,
5801 &brw_gs_pull_constants
,
5802 &brw_gs_ubo_surfaces
,
5803 &brw_wm_pull_constants
,
5804 &brw_wm_ubo_surfaces
,
5805 &gen6_renderbuffer_surfaces
,
5806 &brw_renderbuffer_read_surfaces
,
5807 &brw_texture_surfaces
,
5809 &genX(push_constant_packets
),
5811 &brw_vs_binding_table
,
5812 &brw_tcs_binding_table
,
5813 &brw_tes_binding_table
,
5814 &brw_gs_binding_table
,
5815 &brw_wm_binding_table
,
5819 &genX(tcs_samplers
),
5820 &genX(tes_samplers
),
5822 &genX(multisample_state
),
5831 &genX(raster_state
),
5837 &genX(depth_stencil_state
),
5840 &genX(scissor_state
),
5844 &genX(polygon_stipple
),
5845 &genX(polygon_stipple_offset
),
5847 &genX(line_stipple
),
5849 &genX(drawing_rect
),
5854 &genX(index_buffer
),
5862 STATIC_ASSERT(ARRAY_SIZE(render_atoms
) <= ARRAY_SIZE(brw
->render_atoms
));
5863 brw_copy_pipeline_atoms(brw
, BRW_RENDER_PIPELINE
,
5864 render_atoms
, ARRAY_SIZE(render_atoms
));
5867 static const struct brw_tracked_state
*compute_atoms
[] =
5870 &brw_cs_image_surfaces
,
5871 &genX(cs_push_constants
),
5872 &genX(cs_pull_constants
),
5873 &brw_cs_ubo_surfaces
,
5874 &brw_cs_texture_surfaces
,
5875 &brw_cs_work_groups_surface
,
5880 STATIC_ASSERT(ARRAY_SIZE(compute_atoms
) <= ARRAY_SIZE(brw
->compute_atoms
));
5881 brw_copy_pipeline_atoms(brw
, BRW_COMPUTE_PIPELINE
,
5882 compute_atoms
, ARRAY_SIZE(compute_atoms
));
5884 brw
->vtbl
.emit_mi_report_perf_count
= genX(emit_mi_report_perf_count
);