2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "dev/gen_device_info.h"
27 #include "common/gen_sample_positions.h"
28 #include "genxml/gen_macros.h"
29 #include "common/gen_guardband.h"
31 #include "main/bufferobj.h"
32 #include "main/context.h"
33 #include "main/enums.h"
34 #include "main/macros.h"
35 #include "main/state.h"
37 #include "genX_boilerplate.h"
39 #include "brw_context.h"
41 #include "brw_multisample_state.h"
42 #include "brw_state.h"
46 #include "intel_batchbuffer.h"
47 #include "intel_buffer_objects.h"
48 #include "intel_fbo.h"
50 #include "main/enums.h"
51 #include "main/fbobject.h"
52 #include "main/framebuffer.h"
53 #include "main/glformats.h"
54 #include "main/samplerobj.h"
55 #include "main/shaderapi.h"
56 #include "main/stencil.h"
57 #include "main/transformfeedback.h"
58 #include "main/varray.h"
59 #include "main/viewport.h"
60 #include "util/half_float.h"
63 static struct brw_address
64 KSP(struct brw_context
*brw
, uint32_t offset
)
66 return ro_bo(brw
->cache
.bo
, offset
);
70 KSP(UNUSED
struct brw_context
*brw
, uint32_t offset
)
78 emit_lrm(struct brw_context
*brw
, uint32_t reg
, struct brw_address addr
)
80 brw_batch_emit(brw
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
81 lrm
.RegisterAddress
= reg
;
82 lrm
.MemoryAddress
= addr
;
89 emit_lri(struct brw_context
*brw
, uint32_t reg
, uint32_t imm
)
91 brw_batch_emit(brw
, GENX(MI_LOAD_REGISTER_IMM
), lri
) {
92 lri
.RegisterOffset
= reg
;
99 * Polygon stipple packet
102 genX(upload_polygon_stipple
)(struct brw_context
*brw
)
104 struct gl_context
*ctx
= &brw
->ctx
;
107 if (!ctx
->Polygon
.StippleFlag
)
110 brw_batch_emit(brw
, GENX(3DSTATE_POLY_STIPPLE_PATTERN
), poly
) {
111 /* Polygon stipple is provided in OpenGL order, i.e. bottom
112 * row first. If we're rendering to a window (i.e. the
113 * default frame buffer object, 0), then we need to invert
114 * it to match our pixel layout. But if we're rendering
115 * to a FBO (i.e. any named frame buffer object), we *don't*
116 * need to invert - we already match the layout.
118 if (ctx
->DrawBuffer
->FlipY
) {
119 for (unsigned i
= 0; i
< 32; i
++)
120 poly
.PatternRow
[i
] = ctx
->PolygonStipple
[31 - i
]; /* invert */
122 for (unsigned i
= 0; i
< 32; i
++)
123 poly
.PatternRow
[i
] = ctx
->PolygonStipple
[i
];
128 static const struct brw_tracked_state
genX(polygon_stipple
) = {
130 .mesa
= _NEW_POLYGON
|
132 .brw
= BRW_NEW_CONTEXT
,
134 .emit
= genX(upload_polygon_stipple
),
138 * Polygon stipple offset packet
141 genX(upload_polygon_stipple_offset
)(struct brw_context
*brw
)
143 struct gl_context
*ctx
= &brw
->ctx
;
146 if (!ctx
->Polygon
.StippleFlag
)
149 brw_batch_emit(brw
, GENX(3DSTATE_POLY_STIPPLE_OFFSET
), poly
) {
152 * If we're drawing to a system window we have to invert the Y axis
153 * in order to match the OpenGL pixel coordinate system, and our
154 * offset must be matched to the window position. If we're drawing
155 * to a user-created FBO then our native pixel coordinate system
156 * works just fine, and there's no window system to worry about.
158 if (ctx
->DrawBuffer
->FlipY
) {
159 poly
.PolygonStippleYOffset
=
160 (32 - (_mesa_geometric_height(ctx
->DrawBuffer
) & 31)) & 31;
165 static const struct brw_tracked_state
genX(polygon_stipple_offset
) = {
167 .mesa
= _NEW_BUFFERS
|
169 .brw
= BRW_NEW_CONTEXT
,
171 .emit
= genX(upload_polygon_stipple_offset
),
175 * Line stipple packet
178 genX(upload_line_stipple
)(struct brw_context
*brw
)
180 struct gl_context
*ctx
= &brw
->ctx
;
182 if (!ctx
->Line
.StippleFlag
)
185 brw_batch_emit(brw
, GENX(3DSTATE_LINE_STIPPLE
), line
) {
186 line
.LineStipplePattern
= ctx
->Line
.StipplePattern
;
188 line
.LineStippleInverseRepeatCount
= 1.0f
/ ctx
->Line
.StippleFactor
;
189 line
.LineStippleRepeatCount
= ctx
->Line
.StippleFactor
;
193 static const struct brw_tracked_state
genX(line_stipple
) = {
196 .brw
= BRW_NEW_CONTEXT
,
198 .emit
= genX(upload_line_stipple
),
201 /* Constant single cliprect for framebuffer object or DRI2 drawing */
203 genX(upload_drawing_rect
)(struct brw_context
*brw
)
205 struct gl_context
*ctx
= &brw
->ctx
;
206 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
207 const unsigned int fb_width
= _mesa_geometric_width(fb
);
208 const unsigned int fb_height
= _mesa_geometric_height(fb
);
210 brw_batch_emit(brw
, GENX(3DSTATE_DRAWING_RECTANGLE
), rect
) {
211 rect
.ClippedDrawingRectangleXMax
= fb_width
- 1;
212 rect
.ClippedDrawingRectangleYMax
= fb_height
- 1;
216 static const struct brw_tracked_state
genX(drawing_rect
) = {
218 .mesa
= _NEW_BUFFERS
,
219 .brw
= BRW_NEW_BLORP
|
222 .emit
= genX(upload_drawing_rect
),
226 genX(emit_vertex_buffer_state
)(struct brw_context
*brw
,
230 unsigned start_offset
,
231 UNUSED
unsigned end_offset
,
233 UNUSED
unsigned step_rate
)
235 struct GENX(VERTEX_BUFFER_STATE
) buf_state
= {
236 .VertexBufferIndex
= buffer_nr
,
237 .BufferPitch
= stride
,
239 /* The VF cache designers apparently cut corners, and made the cache
240 * only consider the bottom 32 bits of memory addresses. If you happen
241 * to have two vertex buffers which get placed exactly 4 GiB apart and
242 * use them in back-to-back draw calls, you can get collisions. To work
243 * around this problem, we restrict vertex buffers to the low 32 bits of
246 .BufferStartingAddress
= ro_32_bo(bo
, start_offset
),
248 .BufferSize
= end_offset
- start_offset
,
252 .AddressModifyEnable
= true,
256 .BufferAccessType
= step_rate
? INSTANCEDATA
: VERTEXDATA
,
257 .InstanceDataStepRate
= step_rate
,
259 .EndAddress
= ro_bo(bo
, end_offset
- 1),
272 .MOCS
= GEN7_MOCS_L3
,
276 GENX(VERTEX_BUFFER_STATE_pack
)(brw
, dw
, &buf_state
);
277 return dw
+ GENX(VERTEX_BUFFER_STATE_length
);
281 is_passthru_format(uint32_t format
)
284 case ISL_FORMAT_R64_PASSTHRU
:
285 case ISL_FORMAT_R64G64_PASSTHRU
:
286 case ISL_FORMAT_R64G64B64_PASSTHRU
:
287 case ISL_FORMAT_R64G64B64A64_PASSTHRU
:
295 uploads_needed(uint32_t format
,
298 if (!is_passthru_format(format
))
305 case ISL_FORMAT_R64_PASSTHRU
:
306 case ISL_FORMAT_R64G64_PASSTHRU
:
308 case ISL_FORMAT_R64G64B64_PASSTHRU
:
309 case ISL_FORMAT_R64G64B64A64_PASSTHRU
:
312 unreachable("not reached");
317 * Returns the format that we are finally going to use when upload a vertex
318 * element. It will only change if we are using *64*PASSTHRU formats, as for
319 * gen < 8 they need to be splitted on two *32*FLOAT formats.
321 * @upload points in which upload we are. Valid values are [0,1]
324 downsize_format_if_needed(uint32_t format
,
327 assert(upload
== 0 || upload
== 1);
329 if (!is_passthru_format(format
))
332 /* ISL_FORMAT_R64_PASSTHRU and ISL_FORMAT_R64G64_PASSTHRU with an upload ==
333 * 1 means that we have been forced to do 2 uploads for a size <= 2. This
334 * happens with gen < 8 and dvec3 or dvec4 vertex shader input
335 * variables. In those cases, we return ISL_FORMAT_R32_FLOAT as a way of
336 * flagging that we want to fill with zeroes this second forced upload.
339 case ISL_FORMAT_R64_PASSTHRU
:
340 return upload
== 0 ? ISL_FORMAT_R32G32_FLOAT
341 : ISL_FORMAT_R32_FLOAT
;
342 case ISL_FORMAT_R64G64_PASSTHRU
:
343 return upload
== 0 ? ISL_FORMAT_R32G32B32A32_FLOAT
344 : ISL_FORMAT_R32_FLOAT
;
345 case ISL_FORMAT_R64G64B64_PASSTHRU
:
346 return upload
== 0 ? ISL_FORMAT_R32G32B32A32_FLOAT
347 : ISL_FORMAT_R32G32_FLOAT
;
348 case ISL_FORMAT_R64G64B64A64_PASSTHRU
:
349 return ISL_FORMAT_R32G32B32A32_FLOAT
;
351 unreachable("not reached");
356 * Returns the number of componentes associated with a format that is used on
357 * a 64 to 32 format split. See downsize_format()
360 upload_format_size(uint32_t upload_format
)
362 switch (upload_format
) {
363 case ISL_FORMAT_R32_FLOAT
:
365 /* downsized_format has returned this one in order to flag that we are
366 * performing a second upload which we want to have filled with
367 * zeroes. This happens with gen < 8, a size <= 2, and dvec3 or dvec4
368 * vertex shader input variables.
372 case ISL_FORMAT_R32G32_FLOAT
:
374 case ISL_FORMAT_R32G32B32A32_FLOAT
:
377 unreachable("not reached");
381 static UNUSED
uint16_t
382 pinned_bo_high_bits(struct brw_bo
*bo
)
384 return (bo
->kflags
& EXEC_OBJECT_PINNED
) ? bo
->gtt_offset
>> 32ull : 0;
387 /* The VF cache designers apparently cut corners, and made the cache key's
388 * <VertexBufferIndex, Memory Address> tuple only consider the bottom 32 bits
389 * of the address. If you happen to have two vertex buffers which get placed
390 * exactly 4 GiB apart and use them in back-to-back draw calls, you can get
391 * collisions. (These collisions can happen within a single batch.)
393 * In the soft-pin world, we'd like to assign addresses up front, and never
394 * move buffers. So, we need to do a VF cache invalidate if the buffer for
395 * a particular VB slot has different [48:32] address bits than the last one.
397 * In the relocation world, we have no idea what the addresses will be, so
398 * we can't apply this workaround. Instead, we tell the kernel to move it
399 * to the low 4GB regardless.
401 * This HW issue is gone on Gen11+.
404 vf_invalidate_for_vb_48bit_transitions(struct brw_context
*brw
)
406 #if GEN_GEN >= 8 && GEN_GEN < 11
407 bool need_invalidate
= false;
409 for (unsigned i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
410 uint16_t high_bits
= pinned_bo_high_bits(brw
->vb
.buffers
[i
].bo
);
412 if (high_bits
!= brw
->vb
.last_bo_high_bits
[i
]) {
413 need_invalidate
= true;
414 brw
->vb
.last_bo_high_bits
[i
] = high_bits
;
418 if (brw
->draw
.draw_params_bo
) {
419 uint16_t high_bits
= pinned_bo_high_bits(brw
->draw
.draw_params_bo
);
421 if (brw
->vb
.last_bo_high_bits
[brw
->vb
.nr_buffers
] != high_bits
) {
422 need_invalidate
= true;
423 brw
->vb
.last_bo_high_bits
[brw
->vb
.nr_buffers
] = high_bits
;
427 if (brw
->draw
.derived_draw_params_bo
) {
428 uint16_t high_bits
= pinned_bo_high_bits(brw
->draw
.derived_draw_params_bo
);
430 if (brw
->vb
.last_bo_high_bits
[brw
->vb
.nr_buffers
+ 1] != high_bits
) {
431 need_invalidate
= true;
432 brw
->vb
.last_bo_high_bits
[brw
->vb
.nr_buffers
+ 1] = high_bits
;
436 if (need_invalidate
) {
437 brw_emit_pipe_control_flush(brw
, PIPE_CONTROL_VF_CACHE_INVALIDATE
| PIPE_CONTROL_CS_STALL
);
443 vf_invalidate_for_ib_48bit_transition(struct brw_context
*brw
)
446 uint16_t high_bits
= pinned_bo_high_bits(brw
->ib
.bo
);
448 if (high_bits
!= brw
->ib
.last_bo_high_bits
) {
449 brw_emit_pipe_control_flush(brw
, PIPE_CONTROL_VF_CACHE_INVALIDATE
);
450 brw
->ib
.last_bo_high_bits
= high_bits
;
456 genX(emit_vertices
)(struct brw_context
*brw
)
458 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
461 brw_prepare_vertices(brw
);
462 brw_prepare_shader_draw_parameters(brw
);
465 brw_emit_query_begin(brw
);
468 const struct brw_vs_prog_data
*vs_prog_data
=
469 brw_vs_prog_data(brw
->vs
.base
.prog_data
);
472 struct gl_context
*ctx
= &brw
->ctx
;
473 const bool uses_edge_flag
= (ctx
->Polygon
.FrontMode
!= GL_FILL
||
474 ctx
->Polygon
.BackMode
!= GL_FILL
);
476 if (vs_prog_data
->uses_vertexid
|| vs_prog_data
->uses_instanceid
) {
477 unsigned vue
= brw
->vb
.nr_enabled
;
479 /* The element for the edge flags must always be last, so we have to
480 * insert the SGVS before it in that case.
482 if (uses_edge_flag
) {
488 "Trying to insert VID/IID past 33rd vertex element, "
489 "need to reorder the vertex attrbutes.");
491 brw_batch_emit(brw
, GENX(3DSTATE_VF_SGVS
), vfs
) {
492 if (vs_prog_data
->uses_vertexid
) {
493 vfs
.VertexIDEnable
= true;
494 vfs
.VertexIDComponentNumber
= 2;
495 vfs
.VertexIDElementOffset
= vue
;
498 if (vs_prog_data
->uses_instanceid
) {
499 vfs
.InstanceIDEnable
= true;
500 vfs
.InstanceIDComponentNumber
= 3;
501 vfs
.InstanceIDElementOffset
= vue
;
505 brw_batch_emit(brw
, GENX(3DSTATE_VF_INSTANCING
), vfi
) {
506 vfi
.InstancingEnable
= true;
507 vfi
.VertexElementIndex
= vue
;
510 brw_batch_emit(brw
, GENX(3DSTATE_VF_SGVS
), vfs
);
514 const bool uses_draw_params
=
515 vs_prog_data
->uses_firstvertex
||
516 vs_prog_data
->uses_baseinstance
;
518 const bool uses_derived_draw_params
=
519 vs_prog_data
->uses_drawid
||
520 vs_prog_data
->uses_is_indexed_draw
;
522 const bool needs_sgvs_element
= (uses_draw_params
||
523 vs_prog_data
->uses_instanceid
||
524 vs_prog_data
->uses_vertexid
);
526 unsigned nr_elements
=
527 brw
->vb
.nr_enabled
+ needs_sgvs_element
+ uses_derived_draw_params
;
530 /* If any of the formats of vb.enabled needs more that one upload, we need
531 * to add it to nr_elements
533 for (unsigned i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
534 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
535 const struct gl_array_attributes
*glattrib
= input
->glattrib
;
536 uint32_t format
= brw_get_vertex_surface_type(brw
, &glattrib
->Format
);
538 if (uploads_needed(format
, input
->is_dual_slot
) > 1)
543 /* If the VS doesn't read any inputs (calculating vertex position from
544 * a state variable for some reason, for example), emit a single pad
545 * VERTEX_ELEMENT struct and bail.
547 * The stale VB state stays in place, but they don't do anything unless
548 * a VE loads from them.
550 if (nr_elements
== 0) {
551 dw
= brw_batch_emitn(brw
, GENX(3DSTATE_VERTEX_ELEMENTS
),
552 1 + GENX(VERTEX_ELEMENT_STATE_length
));
553 struct GENX(VERTEX_ELEMENT_STATE
) elem
= {
555 .SourceElementFormat
= ISL_FORMAT_R32G32B32A32_FLOAT
,
556 .Component0Control
= VFCOMP_STORE_0
,
557 .Component1Control
= VFCOMP_STORE_0
,
558 .Component2Control
= VFCOMP_STORE_0
,
559 .Component3Control
= VFCOMP_STORE_1_FP
,
561 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem
);
565 /* Now emit 3DSTATE_VERTEX_BUFFERS and 3DSTATE_VERTEX_ELEMENTS packets. */
566 const unsigned nr_buffers
= brw
->vb
.nr_buffers
+
567 uses_draw_params
+ uses_derived_draw_params
;
569 vf_invalidate_for_vb_48bit_transitions(brw
);
572 assert(nr_buffers
<= (GEN_GEN
>= 6 ? 33 : 17));
574 dw
= brw_batch_emitn(brw
, GENX(3DSTATE_VERTEX_BUFFERS
),
575 1 + GENX(VERTEX_BUFFER_STATE_length
) * nr_buffers
);
577 for (unsigned i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
578 const struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[i
];
579 /* Prior to Haswell and Bay Trail we have to use 4-component formats
580 * to fake 3-component ones. In particular, we do this for
581 * half-float and 8 and 16-bit integer formats. This means that the
582 * vertex element may poke over the end of the buffer by 2 bytes.
584 const unsigned padding
=
585 (GEN_GEN
<= 7 && !GEN_IS_HASWELL
&& !devinfo
->is_baytrail
) * 2;
586 const unsigned end
= buffer
->offset
+ buffer
->size
+ padding
;
587 dw
= genX(emit_vertex_buffer_state
)(brw
, dw
, i
, buffer
->bo
,
594 if (uses_draw_params
) {
595 dw
= genX(emit_vertex_buffer_state
)(brw
, dw
, brw
->vb
.nr_buffers
,
596 brw
->draw
.draw_params_bo
,
597 brw
->draw
.draw_params_offset
,
598 brw
->draw
.draw_params_bo
->size
,
603 if (uses_derived_draw_params
) {
604 dw
= genX(emit_vertex_buffer_state
)(brw
, dw
, brw
->vb
.nr_buffers
+ 1,
605 brw
->draw
.derived_draw_params_bo
,
606 brw
->draw
.derived_draw_params_offset
,
607 brw
->draw
.derived_draw_params_bo
->size
,
613 /* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS,
614 * presumably for VertexID/InstanceID.
617 assert(nr_elements
<= 34);
618 const struct brw_vertex_element
*gen6_edgeflag_input
= NULL
;
620 assert(nr_elements
<= 18);
623 dw
= brw_batch_emitn(brw
, GENX(3DSTATE_VERTEX_ELEMENTS
),
624 1 + GENX(VERTEX_ELEMENT_STATE_length
) * nr_elements
);
626 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
627 const struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
628 const struct gl_array_attributes
*glattrib
= input
->glattrib
;
629 uint32_t format
= brw_get_vertex_surface_type(brw
, &glattrib
->Format
);
630 uint32_t comp0
= VFCOMP_STORE_SRC
;
631 uint32_t comp1
= VFCOMP_STORE_SRC
;
632 uint32_t comp2
= VFCOMP_STORE_SRC
;
633 uint32_t comp3
= VFCOMP_STORE_SRC
;
634 const unsigned num_uploads
= GEN_GEN
< 8 ?
635 uploads_needed(format
, input
->is_dual_slot
) : 1;
638 /* From the BDW PRM, Volume 2d, page 588 (VERTEX_ELEMENT_STATE):
639 * "Any SourceElementFormat of *64*_PASSTHRU cannot be used with an
640 * element which has edge flag enabled."
642 assert(!(is_passthru_format(format
) && uses_edge_flag
));
645 /* The gen4 driver expects edgeflag to come in as a float, and passes
646 * that float on to the tests in the clipper. Mesa's current vertex
647 * attribute value for EdgeFlag is stored as a float, which works out.
648 * glEdgeFlagPointer, on the other hand, gives us an unnormalized
649 * integer ubyte. Just rewrite that to convert to a float.
651 * Gen6+ passes edgeflag as sideband along with the vertex, instead
652 * of in the VUE. We have to upload it sideband as the last vertex
653 * element according to the B-Spec.
656 if (input
== &brw
->vb
.inputs
[VERT_ATTRIB_EDGEFLAG
]) {
657 gen6_edgeflag_input
= input
;
662 for (unsigned c
= 0; c
< num_uploads
; c
++) {
663 const uint32_t upload_format
= GEN_GEN
>= 8 ? format
:
664 downsize_format_if_needed(format
, c
);
665 /* If we need more that one upload, the offset stride would be 128
666 * bits (16 bytes), as for previous uploads we are using the full
668 const unsigned offset
= input
->offset
+ c
* 16;
670 const struct gl_array_attributes
*glattrib
= input
->glattrib
;
671 const int size
= (GEN_GEN
< 8 && is_passthru_format(format
)) ?
672 upload_format_size(upload_format
) : glattrib
->Format
.Size
;
675 case 0: comp0
= VFCOMP_STORE_0
;
676 case 1: comp1
= VFCOMP_STORE_0
;
677 case 2: comp2
= VFCOMP_STORE_0
;
679 if (GEN_GEN
>= 8 && glattrib
->Format
.Doubles
) {
680 comp3
= VFCOMP_STORE_0
;
681 } else if (glattrib
->Format
.Integer
) {
682 comp3
= VFCOMP_STORE_1_INT
;
684 comp3
= VFCOMP_STORE_1_FP
;
691 /* From the BDW PRM, Volume 2d, page 586 (VERTEX_ELEMENT_STATE):
693 * "When SourceElementFormat is set to one of the *64*_PASSTHRU
694 * formats, 64-bit components are stored in the URB without any
695 * conversion. In this case, vertex elements must be written as 128
696 * or 256 bits, with VFCOMP_STORE_0 being used to pad the output as
697 * required. E.g., if R64_PASSTHRU is used to copy a 64-bit Red
698 * component into the URB, Component 1 must be specified as
699 * VFCOMP_STORE_0 (with Components 2,3 set to VFCOMP_NOSTORE) in
700 * order to output a 128-bit vertex element, or Components 1-3 must
701 * be specified as VFCOMP_STORE_0 in order to output a 256-bit vertex
702 * element. Likewise, use of R64G64B64_PASSTHRU requires Component 3
703 * to be specified as VFCOMP_STORE_0 in order to output a 256-bit
706 if (glattrib
->Format
.Doubles
&& !input
->is_dual_slot
) {
707 /* Store vertex elements which correspond to double and dvec2 vertex
708 * shader inputs as 128-bit vertex elements, instead of 256-bits.
710 comp2
= VFCOMP_NOSTORE
;
711 comp3
= VFCOMP_NOSTORE
;
715 struct GENX(VERTEX_ELEMENT_STATE
) elem_state
= {
716 .VertexBufferIndex
= input
->buffer
,
718 .SourceElementFormat
= upload_format
,
719 .SourceElementOffset
= offset
,
720 .Component0Control
= comp0
,
721 .Component1Control
= comp1
,
722 .Component2Control
= comp2
,
723 .Component3Control
= comp3
,
725 .DestinationElementOffset
= i
* 4,
729 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem_state
);
730 dw
+= GENX(VERTEX_ELEMENT_STATE_length
);
734 if (needs_sgvs_element
) {
735 struct GENX(VERTEX_ELEMENT_STATE
) elem_state
= {
737 .Component0Control
= VFCOMP_STORE_0
,
738 .Component1Control
= VFCOMP_STORE_0
,
739 .Component2Control
= VFCOMP_STORE_0
,
740 .Component3Control
= VFCOMP_STORE_0
,
742 .DestinationElementOffset
= i
* 4,
747 if (uses_draw_params
) {
748 elem_state
.VertexBufferIndex
= brw
->vb
.nr_buffers
;
749 elem_state
.SourceElementFormat
= ISL_FORMAT_R32G32_UINT
;
750 elem_state
.Component0Control
= VFCOMP_STORE_SRC
;
751 elem_state
.Component1Control
= VFCOMP_STORE_SRC
;
754 elem_state
.VertexBufferIndex
= brw
->vb
.nr_buffers
;
755 elem_state
.SourceElementFormat
= ISL_FORMAT_R32G32_UINT
;
756 if (uses_draw_params
) {
757 elem_state
.Component0Control
= VFCOMP_STORE_SRC
;
758 elem_state
.Component1Control
= VFCOMP_STORE_SRC
;
761 if (vs_prog_data
->uses_vertexid
)
762 elem_state
.Component2Control
= VFCOMP_STORE_VID
;
764 if (vs_prog_data
->uses_instanceid
)
765 elem_state
.Component3Control
= VFCOMP_STORE_IID
;
768 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem_state
);
769 dw
+= GENX(VERTEX_ELEMENT_STATE_length
);
772 if (uses_derived_draw_params
) {
773 struct GENX(VERTEX_ELEMENT_STATE
) elem_state
= {
775 .VertexBufferIndex
= brw
->vb
.nr_buffers
+ 1,
776 .SourceElementFormat
= ISL_FORMAT_R32G32_UINT
,
777 .Component0Control
= VFCOMP_STORE_SRC
,
778 .Component1Control
= VFCOMP_STORE_SRC
,
779 .Component2Control
= VFCOMP_STORE_0
,
780 .Component3Control
= VFCOMP_STORE_0
,
782 .DestinationElementOffset
= i
* 4,
786 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem_state
);
787 dw
+= GENX(VERTEX_ELEMENT_STATE_length
);
791 if (gen6_edgeflag_input
) {
792 const struct gl_array_attributes
*glattrib
= gen6_edgeflag_input
->glattrib
;
793 const uint32_t format
= brw_get_vertex_surface_type(brw
, &glattrib
->Format
);
795 struct GENX(VERTEX_ELEMENT_STATE
) elem_state
= {
797 .VertexBufferIndex
= gen6_edgeflag_input
->buffer
,
798 .EdgeFlagEnable
= true,
799 .SourceElementFormat
= format
,
800 .SourceElementOffset
= gen6_edgeflag_input
->offset
,
801 .Component0Control
= VFCOMP_STORE_SRC
,
802 .Component1Control
= VFCOMP_STORE_0
,
803 .Component2Control
= VFCOMP_STORE_0
,
804 .Component3Control
= VFCOMP_STORE_0
,
807 GENX(VERTEX_ELEMENT_STATE_pack
)(brw
, dw
, &elem_state
);
808 dw
+= GENX(VERTEX_ELEMENT_STATE_length
);
813 for (unsigned i
= 0, j
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
814 const struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
815 const struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[input
->buffer
];
816 unsigned element_index
;
818 /* The edge flag element is reordered to be the last one in the code
819 * above so we need to compensate for that in the element indices used
822 if (input
== gen6_edgeflag_input
)
823 element_index
= nr_elements
- 1;
827 brw_batch_emit(brw
, GENX(3DSTATE_VF_INSTANCING
), vfi
) {
828 vfi
.VertexElementIndex
= element_index
;
829 vfi
.InstancingEnable
= buffer
->step_rate
!= 0;
830 vfi
.InstanceDataStepRate
= buffer
->step_rate
;
834 if (vs_prog_data
->uses_drawid
) {
835 const unsigned element
= brw
->vb
.nr_enabled
+ needs_sgvs_element
;
837 brw_batch_emit(brw
, GENX(3DSTATE_VF_INSTANCING
), vfi
) {
838 vfi
.VertexElementIndex
= element
;
844 static const struct brw_tracked_state
genX(vertices
) = {
846 .mesa
= _NEW_POLYGON
,
847 .brw
= BRW_NEW_BATCH
|
849 BRW_NEW_VERTEX_PROGRAM
|
851 BRW_NEW_VS_PROG_DATA
,
853 .emit
= genX(emit_vertices
),
857 genX(emit_index_buffer
)(struct brw_context
*brw
)
859 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
861 if (index_buffer
== NULL
)
864 vf_invalidate_for_ib_48bit_transition(brw
);
866 brw_batch_emit(brw
, GENX(3DSTATE_INDEX_BUFFER
), ib
) {
867 #if GEN_GEN < 8 && !GEN_IS_HASWELL
868 assert(brw
->ib
.enable_cut_index
== brw
->prim_restart
.enable_cut_index
);
869 ib
.CutIndexEnable
= brw
->ib
.enable_cut_index
;
871 ib
.IndexFormat
= brw_get_index_type(index_buffer
->index_size
);
873 /* The VF cache designers apparently cut corners, and made the cache
874 * only consider the bottom 32 bits of memory addresses. If you happen
875 * to have two index buffers which get placed exactly 4 GiB apart and
876 * use them in back-to-back draw calls, you can get collisions. To work
877 * around this problem, we restrict index buffers to the low 32 bits of
880 ib
.BufferStartingAddress
= ro_32_bo(brw
->ib
.bo
, 0);
882 ib
.MOCS
= GEN_GEN
>= 9 ? SKL_MOCS_WB
: BDW_MOCS_WB
;
883 ib
.BufferSize
= brw
->ib
.size
;
885 ib
.BufferEndingAddress
= ro_bo(brw
->ib
.bo
, brw
->ib
.size
- 1);
890 static const struct brw_tracked_state
genX(index_buffer
) = {
893 .brw
= BRW_NEW_BATCH
|
895 BRW_NEW_INDEX_BUFFER
,
897 .emit
= genX(emit_index_buffer
),
900 #if GEN_IS_HASWELL || GEN_GEN >= 8
902 genX(upload_cut_index
)(struct brw_context
*brw
)
904 const struct gl_context
*ctx
= &brw
->ctx
;
906 brw_batch_emit(brw
, GENX(3DSTATE_VF
), vf
) {
907 if (ctx
->Array
._PrimitiveRestart
&& brw
->ib
.ib
) {
908 vf
.IndexedDrawCutIndexEnable
= true;
909 vf
.CutIndex
= _mesa_primitive_restart_index(ctx
, brw
->ib
.index_size
);
914 const struct brw_tracked_state
genX(cut_index
) = {
916 .mesa
= _NEW_TRANSFORM
,
917 .brw
= BRW_NEW_INDEX_BUFFER
,
919 .emit
= genX(upload_cut_index
),
924 genX(upload_vf_statistics
)(struct brw_context
*brw
)
926 brw_batch_emit(brw
, GENX(3DSTATE_VF_STATISTICS
), vf
) {
927 vf
.StatisticsEnable
= true;
931 const struct brw_tracked_state
genX(vf_statistics
) = {
934 .brw
= BRW_NEW_BLORP
| BRW_NEW_CONTEXT
,
936 .emit
= genX(upload_vf_statistics
),
941 * Determine the appropriate attribute override value to store into the
942 * 3DSTATE_SF structure for a given fragment shader attribute. The attribute
943 * override value contains two pieces of information: the location of the
944 * attribute in the VUE (relative to urb_entry_read_offset, see below), and a
945 * flag indicating whether to "swizzle" the attribute based on the direction
946 * the triangle is facing.
948 * If an attribute is "swizzled", then the given VUE location is used for
949 * front-facing triangles, and the VUE location that immediately follows is
950 * used for back-facing triangles. We use this to implement the mapping from
951 * gl_FrontColor/gl_BackColor to gl_Color.
953 * urb_entry_read_offset is the offset into the VUE at which the SF unit is
954 * being instructed to begin reading attribute data. It can be set to a
955 * nonzero value to prevent the SF unit from wasting time reading elements of
956 * the VUE that are not needed by the fragment shader. It is measured in
957 * 256-bit increments.
960 genX(get_attr_override
)(struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) *attr
,
961 const struct brw_vue_map
*vue_map
,
962 int urb_entry_read_offset
, int fs_attr
,
963 bool two_side_color
, uint32_t *max_source_attr
)
965 /* Find the VUE slot for this attribute. */
966 int slot
= vue_map
->varying_to_slot
[fs_attr
];
968 /* Viewport and Layer are stored in the VUE header. We need to override
969 * them to zero if earlier stages didn't write them, as GL requires that
970 * they read back as zero when not explicitly set.
972 if (fs_attr
== VARYING_SLOT_VIEWPORT
|| fs_attr
== VARYING_SLOT_LAYER
) {
973 attr
->ComponentOverrideX
= true;
974 attr
->ComponentOverrideW
= true;
975 attr
->ConstantSource
= CONST_0000
;
977 if (!(vue_map
->slots_valid
& VARYING_BIT_LAYER
))
978 attr
->ComponentOverrideY
= true;
979 if (!(vue_map
->slots_valid
& VARYING_BIT_VIEWPORT
))
980 attr
->ComponentOverrideZ
= true;
985 /* If there was only a back color written but not front, use back
986 * as the color instead of undefined
988 if (slot
== -1 && fs_attr
== VARYING_SLOT_COL0
)
989 slot
= vue_map
->varying_to_slot
[VARYING_SLOT_BFC0
];
990 if (slot
== -1 && fs_attr
== VARYING_SLOT_COL1
)
991 slot
= vue_map
->varying_to_slot
[VARYING_SLOT_BFC1
];
994 /* This attribute does not exist in the VUE--that means that the vertex
995 * shader did not write to it. This means that either:
997 * (a) This attribute is a texture coordinate, and it is going to be
998 * replaced with point coordinates (as a consequence of a call to
999 * glTexEnvi(GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE)), so the
1000 * hardware will ignore whatever attribute override we supply.
1002 * (b) This attribute is read by the fragment shader but not written by
1003 * the vertex shader, so its value is undefined. Therefore the
1004 * attribute override we supply doesn't matter.
1006 * (c) This attribute is gl_PrimitiveID, and it wasn't written by the
1007 * previous shader stage.
1009 * Note that we don't have to worry about the cases where the attribute
1010 * is gl_PointCoord or is undergoing point sprite coordinate
1011 * replacement, because in those cases, this function isn't called.
1013 * In case (c), we need to program the attribute overrides so that the
1014 * primitive ID will be stored in this slot. In every other case, the
1015 * attribute override we supply doesn't matter. So just go ahead and
1016 * program primitive ID in every case.
1018 attr
->ComponentOverrideW
= true;
1019 attr
->ComponentOverrideX
= true;
1020 attr
->ComponentOverrideY
= true;
1021 attr
->ComponentOverrideZ
= true;
1022 attr
->ConstantSource
= PRIM_ID
;
1026 /* Compute the location of the attribute relative to urb_entry_read_offset.
1027 * Each increment of urb_entry_read_offset represents a 256-bit value, so
1028 * it counts for two 128-bit VUE slots.
1030 int source_attr
= slot
- 2 * urb_entry_read_offset
;
1031 assert(source_attr
>= 0 && source_attr
< 32);
1033 /* If we are doing two-sided color, and the VUE slot following this one
1034 * represents a back-facing color, then we need to instruct the SF unit to
1035 * do back-facing swizzling.
1037 bool swizzling
= two_side_color
&&
1038 ((vue_map
->slot_to_varying
[slot
] == VARYING_SLOT_COL0
&&
1039 vue_map
->slot_to_varying
[slot
+1] == VARYING_SLOT_BFC0
) ||
1040 (vue_map
->slot_to_varying
[slot
] == VARYING_SLOT_COL1
&&
1041 vue_map
->slot_to_varying
[slot
+1] == VARYING_SLOT_BFC1
));
1043 /* Update max_source_attr. If swizzling, the SF will read this slot + 1. */
1044 if (*max_source_attr
< source_attr
+ swizzling
)
1045 *max_source_attr
= source_attr
+ swizzling
;
1047 attr
->SourceAttribute
= source_attr
;
1049 attr
->SwizzleSelect
= INPUTATTR_FACING
;
1054 genX(calculate_attr_overrides
)(const struct brw_context
*brw
,
1055 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) *attr_overrides
,
1056 uint32_t *point_sprite_enables
,
1057 uint32_t *urb_entry_read_length
,
1058 uint32_t *urb_entry_read_offset
)
1060 const struct gl_context
*ctx
= &brw
->ctx
;
1063 const struct gl_point_attrib
*point
= &ctx
->Point
;
1065 /* BRW_NEW_FRAGMENT_PROGRAM */
1066 const struct gl_program
*fp
= brw
->programs
[MESA_SHADER_FRAGMENT
];
1068 /* BRW_NEW_FS_PROG_DATA */
1069 const struct brw_wm_prog_data
*wm_prog_data
=
1070 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1071 uint32_t max_source_attr
= 0;
1073 *point_sprite_enables
= 0;
1076 brw_compute_first_urb_slot_required(fp
->info
.inputs_read
,
1077 &brw
->vue_map_geom_out
);
1079 /* Each URB offset packs two varying slots */
1080 assert(first_slot
% 2 == 0);
1081 *urb_entry_read_offset
= first_slot
/ 2;
1083 /* From the Ivybridge PRM, Vol 2 Part 1, 3DSTATE_SBE,
1084 * description of dw10 Point Sprite Texture Coordinate Enable:
1086 * "This field must be programmed to zero when non-point primitives
1089 * The SandyBridge PRM doesn't explicitly say that point sprite enables
1090 * must be programmed to zero when rendering non-point primitives, but
1091 * the IvyBridge PRM does, and if we don't, we get garbage.
1093 * This is not required on Haswell, as the hardware ignores this state
1094 * when drawing non-points -- although we do still need to be careful to
1095 * correctly set the attr overrides.
1098 * BRW_NEW_PRIMITIVE | BRW_NEW_GS_PROG_DATA | BRW_NEW_TES_PROG_DATA
1100 bool drawing_points
= brw_is_drawing_points(brw
);
1102 for (int attr
= 0; attr
< VARYING_SLOT_MAX
; attr
++) {
1103 int input_index
= wm_prog_data
->urb_setup
[attr
];
1105 if (input_index
< 0)
1109 bool point_sprite
= false;
1110 if (drawing_points
) {
1111 if (point
->PointSprite
&&
1112 (attr
>= VARYING_SLOT_TEX0
&& attr
<= VARYING_SLOT_TEX7
) &&
1113 (point
->CoordReplace
& (1u << (attr
- VARYING_SLOT_TEX0
)))) {
1114 point_sprite
= true;
1117 if (attr
== VARYING_SLOT_PNTC
)
1118 point_sprite
= true;
1121 *point_sprite_enables
|= (1 << input_index
);
1124 /* BRW_NEW_VUE_MAP_GEOM_OUT | _NEW_LIGHT | _NEW_PROGRAM */
1125 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) attribute
= { 0 };
1127 if (!point_sprite
) {
1128 genX(get_attr_override
)(&attribute
,
1129 &brw
->vue_map_geom_out
,
1130 *urb_entry_read_offset
, attr
,
1131 _mesa_vertex_program_two_side_enabled(ctx
),
1135 /* The hardware can only do the overrides on 16 overrides at a
1136 * time, and the other up to 16 have to be lined up so that the
1137 * input index = the output index. We'll need to do some
1138 * tweaking to make sure that's the case.
1140 if (input_index
< 16)
1141 attr_overrides
[input_index
] = attribute
;
1143 assert(attribute
.SourceAttribute
== input_index
);
1146 /* From the Sandy Bridge PRM, Volume 2, Part 1, documentation for
1147 * 3DSTATE_SF DWord 1 bits 15:11, "Vertex URB Entry Read Length":
1149 * "This field should be set to the minimum length required to read the
1150 * maximum source attribute. The maximum source attribute is indicated
1151 * by the maximum value of the enabled Attribute # Source Attribute if
1152 * Attribute Swizzle Enable is set, Number of Output Attributes-1 if
1153 * enable is not set.
1154 * read_length = ceiling((max_source_attr + 1) / 2)
1156 * [errata] Corruption/Hang possible if length programmed larger than
1159 * Similar text exists for Ivy Bridge.
1161 *urb_entry_read_length
= DIV_ROUND_UP(max_source_attr
+ 1, 2);
1165 /* ---------------------------------------------------------------------- */
1168 typedef struct GENX(3DSTATE_WM_DEPTH_STENCIL
) DEPTH_STENCIL_GENXML
;
1170 typedef struct GENX(DEPTH_STENCIL_STATE
) DEPTH_STENCIL_GENXML
;
1172 typedef struct GENX(COLOR_CALC_STATE
) DEPTH_STENCIL_GENXML
;
1176 set_depth_stencil_bits(struct brw_context
*brw
, DEPTH_STENCIL_GENXML
*ds
)
1178 struct gl_context
*ctx
= &brw
->ctx
;
1181 struct intel_renderbuffer
*depth_irb
=
1182 intel_get_renderbuffer(ctx
->DrawBuffer
, BUFFER_DEPTH
);
1185 struct gl_depthbuffer_attrib
*depth
= &ctx
->Depth
;
1188 struct gl_stencil_attrib
*stencil
= &ctx
->Stencil
;
1189 const int b
= stencil
->_BackFace
;
1191 if (depth
->Test
&& depth_irb
) {
1192 ds
->DepthTestEnable
= true;
1193 ds
->DepthBufferWriteEnable
= brw_depth_writes_enabled(brw
);
1194 ds
->DepthTestFunction
= intel_translate_compare_func(depth
->Func
);
1197 if (brw
->stencil_enabled
) {
1198 ds
->StencilTestEnable
= true;
1199 ds
->StencilWriteMask
= stencil
->WriteMask
[0] & 0xff;
1200 ds
->StencilTestMask
= stencil
->ValueMask
[0] & 0xff;
1202 ds
->StencilTestFunction
=
1203 intel_translate_compare_func(stencil
->Function
[0]);
1205 intel_translate_stencil_op(stencil
->FailFunc
[0]);
1206 ds
->StencilPassDepthPassOp
=
1207 intel_translate_stencil_op(stencil
->ZPassFunc
[0]);
1208 ds
->StencilPassDepthFailOp
=
1209 intel_translate_stencil_op(stencil
->ZFailFunc
[0]);
1211 ds
->StencilBufferWriteEnable
= brw
->stencil_write_enabled
;
1213 if (brw
->stencil_two_sided
) {
1214 ds
->DoubleSidedStencilEnable
= true;
1215 ds
->BackfaceStencilWriteMask
= stencil
->WriteMask
[b
] & 0xff;
1216 ds
->BackfaceStencilTestMask
= stencil
->ValueMask
[b
] & 0xff;
1218 ds
->BackfaceStencilTestFunction
=
1219 intel_translate_compare_func(stencil
->Function
[b
]);
1220 ds
->BackfaceStencilFailOp
=
1221 intel_translate_stencil_op(stencil
->FailFunc
[b
]);
1222 ds
->BackfaceStencilPassDepthPassOp
=
1223 intel_translate_stencil_op(stencil
->ZPassFunc
[b
]);
1224 ds
->BackfaceStencilPassDepthFailOp
=
1225 intel_translate_stencil_op(stencil
->ZFailFunc
[b
]);
1228 #if GEN_GEN <= 5 || GEN_GEN >= 9
1229 ds
->StencilReferenceValue
= _mesa_get_stencil_ref(ctx
, 0);
1230 ds
->BackfaceStencilReferenceValue
= _mesa_get_stencil_ref(ctx
, b
);
1237 genX(upload_depth_stencil_state
)(struct brw_context
*brw
)
1240 brw_batch_emit(brw
, GENX(3DSTATE_WM_DEPTH_STENCIL
), wmds
) {
1241 set_depth_stencil_bits(brw
, &wmds
);
1245 brw_state_emit(brw
, GENX(DEPTH_STENCIL_STATE
), 64, &ds_offset
, ds
) {
1246 set_depth_stencil_bits(brw
, &ds
);
1249 /* Now upload a pointer to the indirect state */
1251 brw_batch_emit(brw
, GENX(3DSTATE_CC_STATE_POINTERS
), ptr
) {
1252 ptr
.PointertoDEPTH_STENCIL_STATE
= ds_offset
;
1253 ptr
.DEPTH_STENCIL_STATEChange
= true;
1256 brw_batch_emit(brw
, GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS
), ptr
) {
1257 ptr
.PointertoDEPTH_STENCIL_STATE
= ds_offset
;
1263 static const struct brw_tracked_state
genX(depth_stencil_state
) = {
1265 .mesa
= _NEW_BUFFERS
|
1268 .brw
= BRW_NEW_BLORP
|
1269 (GEN_GEN
>= 8 ? BRW_NEW_CONTEXT
1271 BRW_NEW_STATE_BASE_ADDRESS
),
1273 .emit
= genX(upload_depth_stencil_state
),
1277 /* ---------------------------------------------------------------------- */
1282 genX(upload_clip_state
)(struct brw_context
*brw
)
1284 struct gl_context
*ctx
= &brw
->ctx
;
1286 ctx
->NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
1287 brw_state_emit(brw
, GENX(CLIP_STATE
), 32, &brw
->clip
.state_offset
, clip
) {
1288 clip
.KernelStartPointer
= KSP(brw
, brw
->clip
.prog_offset
);
1289 clip
.GRFRegisterCount
=
1290 DIV_ROUND_UP(brw
->clip
.prog_data
->total_grf
, 16) - 1;
1291 clip
.FloatingPointMode
= FLOATING_POINT_MODE_Alternate
;
1292 clip
.SingleProgramFlow
= true;
1293 clip
.VertexURBEntryReadLength
= brw
->clip
.prog_data
->urb_read_length
;
1294 clip
.ConstantURBEntryReadLength
= brw
->clip
.prog_data
->curb_read_length
;
1296 /* BRW_NEW_PUSH_CONSTANT_ALLOCATION */
1297 clip
.ConstantURBEntryReadOffset
= brw
->curbe
.clip_start
* 2;
1298 clip
.DispatchGRFStartRegisterForURBData
= 1;
1299 clip
.VertexURBEntryReadOffset
= 0;
1301 /* BRW_NEW_URB_FENCE */
1302 clip
.NumberofURBEntries
= brw
->urb
.nr_clip_entries
;
1303 clip
.URBEntryAllocationSize
= brw
->urb
.vsize
- 1;
1305 if (brw
->urb
.nr_clip_entries
>= 10) {
1306 /* Half of the URB entries go to each thread, and it has to be an
1309 assert(brw
->urb
.nr_clip_entries
% 2 == 0);
1311 /* Although up to 16 concurrent Clip threads are allowed on Ironlake,
1312 * only 2 threads can output VUEs at a time.
1314 clip
.MaximumNumberofThreads
= (GEN_GEN
== 5 ? 16 : 2) - 1;
1316 assert(brw
->urb
.nr_clip_entries
>= 5);
1317 clip
.MaximumNumberofThreads
= 1 - 1;
1320 clip
.VertexPositionSpace
= VPOS_NDCSPACE
;
1321 clip
.UserClipFlagsMustClipEnable
= true;
1322 clip
.GuardbandClipTestEnable
= true;
1324 clip
.ClipperViewportStatePointer
=
1325 ro_bo(brw
->batch
.state
.bo
, brw
->clip
.vp_offset
);
1327 clip
.ScreenSpaceViewportXMin
= -1;
1328 clip
.ScreenSpaceViewportXMax
= 1;
1329 clip
.ScreenSpaceViewportYMin
= -1;
1330 clip
.ScreenSpaceViewportYMax
= 1;
1332 clip
.ViewportXYClipTestEnable
= true;
1333 clip
.ViewportZClipTestEnable
= !(ctx
->Transform
.DepthClampNear
&&
1334 ctx
->Transform
.DepthClampFar
);
1336 /* _NEW_TRANSFORM */
1337 if (GEN_GEN
== 5 || GEN_IS_G4X
) {
1338 clip
.UserClipDistanceClipTestEnableBitmask
=
1339 ctx
->Transform
.ClipPlanesEnabled
;
1341 /* Up to 6 actual clip flags, plus the 7th for the negative RHW
1344 clip
.UserClipDistanceClipTestEnableBitmask
=
1345 (ctx
->Transform
.ClipPlanesEnabled
& 0x3f) | 0x40;
1348 if (ctx
->Transform
.ClipDepthMode
== GL_ZERO_TO_ONE
)
1349 clip
.APIMode
= APIMODE_D3D
;
1351 clip
.APIMode
= APIMODE_OGL
;
1353 clip
.GuardbandClipTestEnable
= true;
1355 clip
.ClipMode
= brw
->clip
.prog_data
->clip_mode
;
1358 clip
.NegativeWClipTestEnable
= true;
1363 const struct brw_tracked_state
genX(clip_state
) = {
1365 .mesa
= _NEW_TRANSFORM
|
1367 .brw
= BRW_NEW_BATCH
|
1369 BRW_NEW_CLIP_PROG_DATA
|
1370 BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
1371 BRW_NEW_PROGRAM_CACHE
|
1374 .emit
= genX(upload_clip_state
),
1380 genX(upload_clip_state
)(struct brw_context
*brw
)
1382 struct gl_context
*ctx
= &brw
->ctx
;
1385 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1387 /* BRW_NEW_FS_PROG_DATA */
1388 struct brw_wm_prog_data
*wm_prog_data
=
1389 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1391 brw_batch_emit(brw
, GENX(3DSTATE_CLIP
), clip
) {
1392 clip
.StatisticsEnable
= !brw
->meta_in_progress
;
1394 if (wm_prog_data
->barycentric_interp_modes
&
1395 BRW_BARYCENTRIC_NONPERSPECTIVE_BITS
)
1396 clip
.NonPerspectiveBarycentricEnable
= true;
1399 clip
.EarlyCullEnable
= true;
1403 clip
.FrontWinding
= brw
->polygon_front_bit
!= fb
->FlipY
;
1405 if (ctx
->Polygon
.CullFlag
) {
1406 switch (ctx
->Polygon
.CullFaceMode
) {
1408 clip
.CullMode
= CULLMODE_FRONT
;
1411 clip
.CullMode
= CULLMODE_BACK
;
1413 case GL_FRONT_AND_BACK
:
1414 clip
.CullMode
= CULLMODE_BOTH
;
1417 unreachable("Should not get here: invalid CullFlag");
1420 clip
.CullMode
= CULLMODE_NONE
;
1425 clip
.UserClipDistanceCullTestEnableBitmask
=
1426 brw_vue_prog_data(brw
->vs
.base
.prog_data
)->cull_distance_mask
;
1428 clip
.ViewportZClipTestEnable
= !(ctx
->Transform
.DepthClampNear
&&
1429 ctx
->Transform
.DepthClampFar
);
1433 if (ctx
->Light
.ProvokingVertex
== GL_FIRST_VERTEX_CONVENTION
) {
1434 clip
.TriangleStripListProvokingVertexSelect
= 0;
1435 clip
.TriangleFanProvokingVertexSelect
= 1;
1436 clip
.LineStripListProvokingVertexSelect
= 0;
1438 clip
.TriangleStripListProvokingVertexSelect
= 2;
1439 clip
.TriangleFanProvokingVertexSelect
= 2;
1440 clip
.LineStripListProvokingVertexSelect
= 1;
1443 /* _NEW_TRANSFORM */
1444 clip
.UserClipDistanceClipTestEnableBitmask
=
1445 ctx
->Transform
.ClipPlanesEnabled
;
1448 clip
.ForceUserClipDistanceClipTestEnableBitmask
= true;
1451 if (ctx
->Transform
.ClipDepthMode
== GL_ZERO_TO_ONE
)
1452 clip
.APIMode
= APIMODE_D3D
;
1454 clip
.APIMode
= APIMODE_OGL
;
1456 clip
.GuardbandClipTestEnable
= true;
1458 /* BRW_NEW_VIEWPORT_COUNT */
1459 const unsigned viewport_count
= brw
->clip
.viewport_count
;
1461 if (ctx
->RasterDiscard
) {
1462 clip
.ClipMode
= CLIPMODE_REJECT_ALL
;
1464 perf_debug("Rasterizer discard is currently implemented via the "
1465 "clipper; having the GS not write primitives would "
1466 "likely be faster.\n");
1469 clip
.ClipMode
= CLIPMODE_NORMAL
;
1472 clip
.ClipEnable
= true;
1475 * BRW_NEW_GEOMETRY_PROGRAM | BRW_NEW_TES_PROG_DATA | BRW_NEW_PRIMITIVE
1477 if (!brw_is_drawing_points(brw
) && !brw_is_drawing_lines(brw
))
1478 clip
.ViewportXYClipTestEnable
= true;
1480 clip
.MinimumPointWidth
= 0.125;
1481 clip
.MaximumPointWidth
= 255.875;
1482 clip
.MaximumVPIndex
= viewport_count
- 1;
1483 if (_mesa_geometric_layers(fb
) == 0)
1484 clip
.ForceZeroRTAIndexEnable
= true;
1488 static const struct brw_tracked_state
genX(clip_state
) = {
1490 .mesa
= _NEW_BUFFERS
|
1494 .brw
= BRW_NEW_BLORP
|
1496 BRW_NEW_FS_PROG_DATA
|
1497 BRW_NEW_GS_PROG_DATA
|
1498 BRW_NEW_VS_PROG_DATA
|
1499 BRW_NEW_META_IN_PROGRESS
|
1501 BRW_NEW_RASTERIZER_DISCARD
|
1502 BRW_NEW_TES_PROG_DATA
|
1503 BRW_NEW_VIEWPORT_COUNT
,
1505 .emit
= genX(upload_clip_state
),
1509 /* ---------------------------------------------------------------------- */
1512 genX(upload_sf
)(struct brw_context
*brw
)
1514 struct gl_context
*ctx
= &brw
->ctx
;
1519 bool flip_y
= ctx
->DrawBuffer
->FlipY
;
1520 UNUSED
const bool multisampled_fbo
=
1521 _mesa_geometric_samples(ctx
->DrawBuffer
) > 1;
1525 const struct brw_sf_prog_data
*sf_prog_data
= brw
->sf
.prog_data
;
1527 ctx
->NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
1529 brw_state_emit(brw
, GENX(SF_STATE
), 64, &brw
->sf
.state_offset
, sf
) {
1530 sf
.KernelStartPointer
= KSP(brw
, brw
->sf
.prog_offset
);
1531 sf
.FloatingPointMode
= FLOATING_POINT_MODE_Alternate
;
1532 sf
.GRFRegisterCount
= DIV_ROUND_UP(sf_prog_data
->total_grf
, 16) - 1;
1533 sf
.DispatchGRFStartRegisterForURBData
= 3;
1534 sf
.VertexURBEntryReadOffset
= BRW_SF_URB_ENTRY_READ_OFFSET
;
1535 sf
.VertexURBEntryReadLength
= sf_prog_data
->urb_read_length
;
1536 sf
.NumberofURBEntries
= brw
->urb
.nr_sf_entries
;
1537 sf
.URBEntryAllocationSize
= brw
->urb
.sfsize
- 1;
1539 /* STATE_PREFETCH command description describes this state as being
1540 * something loaded through the GPE (L2 ISC), so it's INSTRUCTION
1543 sf
.SetupViewportStateOffset
=
1544 ro_bo(brw
->batch
.state
.bo
, brw
->sf
.vp_offset
);
1546 sf
.PointRasterizationRule
= RASTRULE_UPPER_RIGHT
;
1548 /* sf.ConstantURBEntryReadLength = stage_prog_data->curb_read_length; */
1549 /* sf.ConstantURBEntryReadOffset = brw->curbe.vs_start * 2; */
1551 sf
.MaximumNumberofThreads
=
1552 MIN2(GEN_GEN
== 5 ? 48 : 24, brw
->urb
.nr_sf_entries
) - 1;
1554 sf
.SpritePointEnable
= ctx
->Point
.PointSprite
;
1556 sf
.DestinationOriginHorizontalBias
= 0.5;
1557 sf
.DestinationOriginVerticalBias
= 0.5;
1559 brw_batch_emit(brw
, GENX(3DSTATE_SF
), sf
) {
1560 sf
.StatisticsEnable
= true;
1562 sf
.ViewportTransformEnable
= true;
1566 sf
.DepthBufferSurfaceFormat
= brw_depthbuffer_format(brw
);
1571 sf
.FrontWinding
= brw
->polygon_front_bit
!= flip_y
;
1573 sf
.GlobalDepthOffsetEnableSolid
= ctx
->Polygon
.OffsetFill
;
1574 sf
.GlobalDepthOffsetEnableWireframe
= ctx
->Polygon
.OffsetLine
;
1575 sf
.GlobalDepthOffsetEnablePoint
= ctx
->Polygon
.OffsetPoint
;
1577 switch (ctx
->Polygon
.FrontMode
) {
1579 sf
.FrontFaceFillMode
= FILL_MODE_SOLID
;
1582 sf
.FrontFaceFillMode
= FILL_MODE_WIREFRAME
;
1585 sf
.FrontFaceFillMode
= FILL_MODE_POINT
;
1588 unreachable("not reached");
1591 switch (ctx
->Polygon
.BackMode
) {
1593 sf
.BackFaceFillMode
= FILL_MODE_SOLID
;
1596 sf
.BackFaceFillMode
= FILL_MODE_WIREFRAME
;
1599 sf
.BackFaceFillMode
= FILL_MODE_POINT
;
1602 unreachable("not reached");
1605 if (multisampled_fbo
&& ctx
->Multisample
.Enabled
)
1606 sf
.MultisampleRasterizationMode
= MSRASTMODE_ON_PATTERN
;
1608 sf
.GlobalDepthOffsetConstant
= ctx
->Polygon
.OffsetUnits
* 2;
1609 sf
.GlobalDepthOffsetScale
= ctx
->Polygon
.OffsetFactor
;
1610 sf
.GlobalDepthOffsetClamp
= ctx
->Polygon
.OffsetClamp
;
1613 sf
.ScissorRectangleEnable
= true;
1615 if (ctx
->Polygon
.CullFlag
) {
1616 switch (ctx
->Polygon
.CullFaceMode
) {
1618 sf
.CullMode
= CULLMODE_FRONT
;
1621 sf
.CullMode
= CULLMODE_BACK
;
1623 case GL_FRONT_AND_BACK
:
1624 sf
.CullMode
= CULLMODE_BOTH
;
1627 unreachable("not reached");
1630 sf
.CullMode
= CULLMODE_NONE
;
1634 sf
.LineStippleEnable
= ctx
->Line
.StippleFlag
;
1641 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1643 if (devinfo
->is_cherryview
)
1644 sf
.CHVLineWidth
= brw_get_line_width(brw
);
1646 sf
.LineWidth
= brw_get_line_width(brw
);
1648 sf
.LineWidth
= brw_get_line_width(brw
);
1651 if (ctx
->Line
.SmoothFlag
) {
1652 sf
.LineEndCapAntialiasingRegionWidth
= _10pixels
;
1654 sf
.AntialiasingEnable
= true;
1658 /* _NEW_POINT - Clamp to ARB_point_parameters user limits */
1659 point_size
= CLAMP(ctx
->Point
.Size
, ctx
->Point
.MinSize
, ctx
->Point
.MaxSize
);
1660 /* Clamp to the hardware limits */
1661 sf
.PointWidth
= CLAMP(point_size
, 0.125f
, 255.875f
);
1663 /* _NEW_PROGRAM | _NEW_POINT, BRW_NEW_VUE_MAP_GEOM_OUT */
1664 if (use_state_point_size(brw
))
1665 sf
.PointWidthSource
= State
;
1668 /* _NEW_POINT | _NEW_MULTISAMPLE */
1669 if ((ctx
->Point
.SmoothFlag
|| _mesa_is_multisample_enabled(ctx
)) &&
1670 !ctx
->Point
.PointSprite
)
1671 sf
.SmoothPointEnable
= true;
1676 * Smooth Point Enable bit MUST not be set when NUM_MULTISAMPLES > 1.
1678 const bool multisampled_fbo
=
1679 _mesa_geometric_samples(ctx
->DrawBuffer
) > 1;
1680 if (multisampled_fbo
)
1681 sf
.SmoothPointEnable
= false;
1684 #if GEN_IS_G4X || GEN_GEN >= 5
1685 sf
.AALineDistanceMode
= AALINEDISTANCE_TRUE
;
1689 if (ctx
->Light
.ProvokingVertex
!= GL_FIRST_VERTEX_CONVENTION
) {
1690 sf
.TriangleStripListProvokingVertexSelect
= 2;
1691 sf
.TriangleFanProvokingVertexSelect
= 2;
1692 sf
.LineStripListProvokingVertexSelect
= 1;
1694 sf
.TriangleFanProvokingVertexSelect
= 1;
1698 /* BRW_NEW_FS_PROG_DATA */
1699 const struct brw_wm_prog_data
*wm_prog_data
=
1700 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1702 sf
.AttributeSwizzleEnable
= true;
1703 sf
.NumberofSFOutputAttributes
= wm_prog_data
->num_varying_inputs
;
1706 * Window coordinates in an FBO are inverted, which means point
1707 * sprite origin must be inverted, too.
1709 if ((ctx
->Point
.SpriteOrigin
== GL_LOWER_LEFT
) == flip_y
) {
1710 sf
.PointSpriteTextureCoordinateOrigin
= LOWERLEFT
;
1712 sf
.PointSpriteTextureCoordinateOrigin
= UPPERLEFT
;
1715 /* BRW_NEW_VUE_MAP_GEOM_OUT | BRW_NEW_FRAGMENT_PROGRAM |
1716 * _NEW_POINT | _NEW_LIGHT | _NEW_PROGRAM | BRW_NEW_FS_PROG_DATA
1718 uint32_t urb_entry_read_length
;
1719 uint32_t urb_entry_read_offset
;
1720 uint32_t point_sprite_enables
;
1721 genX(calculate_attr_overrides
)(brw
, sf
.Attribute
, &point_sprite_enables
,
1722 &urb_entry_read_length
,
1723 &urb_entry_read_offset
);
1724 sf
.VertexURBEntryReadLength
= urb_entry_read_length
;
1725 sf
.VertexURBEntryReadOffset
= urb_entry_read_offset
;
1726 sf
.PointSpriteTextureCoordinateEnable
= point_sprite_enables
;
1727 sf
.ConstantInterpolationEnable
= wm_prog_data
->flat_inputs
;
1732 static const struct brw_tracked_state
genX(sf_state
) = {
1734 .mesa
= _NEW_LIGHT
|
1738 (GEN_GEN
>= 6 ? _NEW_MULTISAMPLE
: 0) |
1739 (GEN_GEN
<= 7 ? _NEW_BUFFERS
| _NEW_POLYGON
: 0) |
1740 (GEN_GEN
== 10 ? _NEW_BUFFERS
: 0),
1741 .brw
= BRW_NEW_BLORP
|
1742 BRW_NEW_VUE_MAP_GEOM_OUT
|
1743 (GEN_GEN
<= 5 ? BRW_NEW_BATCH
|
1744 BRW_NEW_PROGRAM_CACHE
|
1745 BRW_NEW_SF_PROG_DATA
|
1749 (GEN_GEN
>= 6 ? BRW_NEW_CONTEXT
: 0) |
1750 (GEN_GEN
>= 6 && GEN_GEN
<= 7 ?
1751 BRW_NEW_GS_PROG_DATA
|
1753 BRW_NEW_TES_PROG_DATA
1755 (GEN_GEN
== 6 ? BRW_NEW_FS_PROG_DATA
|
1756 BRW_NEW_FRAGMENT_PROGRAM
1759 .emit
= genX(upload_sf
),
1762 /* ---------------------------------------------------------------------- */
1765 brw_color_buffer_write_enabled(struct brw_context
*brw
)
1767 struct gl_context
*ctx
= &brw
->ctx
;
1768 /* BRW_NEW_FRAGMENT_PROGRAM */
1769 const struct gl_program
*fp
= brw
->programs
[MESA_SHADER_FRAGMENT
];
1773 for (i
= 0; i
< ctx
->DrawBuffer
->_NumColorDrawBuffers
; i
++) {
1774 struct gl_renderbuffer
*rb
= ctx
->DrawBuffer
->_ColorDrawBuffers
[i
];
1775 uint64_t outputs_written
= fp
->info
.outputs_written
;
1778 if (rb
&& (outputs_written
& BITFIELD64_BIT(FRAG_RESULT_COLOR
) ||
1779 outputs_written
& BITFIELD64_BIT(FRAG_RESULT_DATA0
+ i
)) &&
1780 GET_COLORMASK(ctx
->Color
.ColorMask
, i
)) {
1789 genX(upload_wm
)(struct brw_context
*brw
)
1791 struct gl_context
*ctx
= &brw
->ctx
;
1793 /* BRW_NEW_FS_PROG_DATA */
1794 const struct brw_wm_prog_data
*wm_prog_data
=
1795 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1797 UNUSED
bool writes_depth
=
1798 wm_prog_data
->computed_depth_mode
!= BRW_PSCDEPTH_OFF
;
1799 UNUSED
struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
1800 UNUSED
const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1803 /* We can't fold this into gen6_upload_wm_push_constants(), because
1804 * according to the SNB PRM, vol 2 part 1 section 7.2.2
1805 * (3DSTATE_CONSTANT_PS [DevSNB]):
1807 * "[DevSNB]: This packet must be followed by WM_STATE."
1809 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_PS
), wmcp
) {
1810 if (wm_prog_data
->base
.nr_params
!= 0) {
1811 wmcp
.Buffer0Valid
= true;
1812 /* Pointer to the WM constant buffer. Covered by the set of
1813 * state flags from gen6_upload_wm_push_constants.
1815 wmcp
.ConstantBody
.PointertoConstantBuffer0
= stage_state
->push_const_offset
;
1816 wmcp
.ConstantBody
.ConstantBuffer0ReadLength
= stage_state
->push_const_size
- 1;
1822 brw_batch_emit(brw
, GENX(3DSTATE_WM
), wm
) {
1824 ctx
->NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
1825 brw_state_emit(brw
, GENX(WM_STATE
), 64, &stage_state
->state_offset
, wm
) {
1829 wm
._8PixelDispatchEnable
= wm_prog_data
->dispatch_8
;
1830 wm
._16PixelDispatchEnable
= wm_prog_data
->dispatch_16
;
1831 wm
._32PixelDispatchEnable
= wm_prog_data
->dispatch_32
;
1835 /* On gen4, we only have one shader kernel */
1836 if (brw_wm_state_has_ksp(wm
, 0)) {
1837 assert(brw_wm_prog_data_prog_offset(wm_prog_data
, wm
, 0) == 0);
1838 wm
.KernelStartPointer0
= KSP(brw
, stage_state
->prog_offset
);
1839 wm
.GRFRegisterCount0
= brw_wm_prog_data_reg_blocks(wm_prog_data
, wm
, 0);
1840 wm
.DispatchGRFStartRegisterForConstantSetupData0
=
1841 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, wm
, 0);
1844 /* On gen5, we have multiple shader kernels but only one GRF start
1845 * register for all kernels
1847 wm
.KernelStartPointer0
= stage_state
->prog_offset
+
1848 brw_wm_prog_data_prog_offset(wm_prog_data
, wm
, 0);
1849 wm
.KernelStartPointer1
= stage_state
->prog_offset
+
1850 brw_wm_prog_data_prog_offset(wm_prog_data
, wm
, 1);
1851 wm
.KernelStartPointer2
= stage_state
->prog_offset
+
1852 brw_wm_prog_data_prog_offset(wm_prog_data
, wm
, 2);
1854 wm
.GRFRegisterCount0
= brw_wm_prog_data_reg_blocks(wm_prog_data
, wm
, 0);
1855 wm
.GRFRegisterCount1
= brw_wm_prog_data_reg_blocks(wm_prog_data
, wm
, 1);
1856 wm
.GRFRegisterCount2
= brw_wm_prog_data_reg_blocks(wm_prog_data
, wm
, 2);
1858 wm
.DispatchGRFStartRegisterForConstantSetupData0
=
1859 wm_prog_data
->base
.dispatch_grf_start_reg
;
1861 /* Dispatch GRF Start should be the same for all shaders on gen5 */
1862 if (brw_wm_state_has_ksp(wm
, 1)) {
1863 assert(wm_prog_data
->base
.dispatch_grf_start_reg
==
1864 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, wm
, 1));
1866 if (brw_wm_state_has_ksp(wm
, 2)) {
1867 assert(wm_prog_data
->base
.dispatch_grf_start_reg
==
1868 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, wm
, 2));
1871 /* On gen6, we have multiple shader kernels and we no longer specify a
1872 * register count for each one.
1874 wm
.KernelStartPointer0
= stage_state
->prog_offset
+
1875 brw_wm_prog_data_prog_offset(wm_prog_data
, wm
, 0);
1876 wm
.KernelStartPointer1
= stage_state
->prog_offset
+
1877 brw_wm_prog_data_prog_offset(wm_prog_data
, wm
, 1);
1878 wm
.KernelStartPointer2
= stage_state
->prog_offset
+
1879 brw_wm_prog_data_prog_offset(wm_prog_data
, wm
, 2);
1881 wm
.DispatchGRFStartRegisterForConstantSetupData0
=
1882 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, wm
, 0);
1883 wm
.DispatchGRFStartRegisterForConstantSetupData1
=
1884 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, wm
, 1);
1885 wm
.DispatchGRFStartRegisterForConstantSetupData2
=
1886 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, wm
, 2);
1890 wm
.ConstantURBEntryReadLength
= wm_prog_data
->base
.curb_read_length
;
1891 /* BRW_NEW_PUSH_CONSTANT_ALLOCATION */
1892 wm
.ConstantURBEntryReadOffset
= brw
->curbe
.wm_start
* 2;
1893 wm
.SetupURBEntryReadLength
= wm_prog_data
->num_varying_inputs
* 2;
1894 wm
.SetupURBEntryReadOffset
= 0;
1895 wm
.EarlyDepthTestEnable
= true;
1899 wm
.LineAntialiasingRegionWidth
= _10pixels
;
1900 wm
.LineEndCapAntialiasingRegionWidth
= _05pixels
;
1902 wm
.PointRasterizationRule
= RASTRULE_UPPER_RIGHT
;
1903 wm
.BarycentricInterpolationMode
= wm_prog_data
->barycentric_interp_modes
;
1905 if (stage_state
->sampler_count
)
1906 wm
.SamplerStatePointer
=
1907 ro_bo(brw
->batch
.state
.bo
, stage_state
->sampler_offset
);
1909 wm
.LineAntialiasingRegionWidth
= _05pixels
;
1910 wm
.LineEndCapAntialiasingRegionWidth
= _10pixels
;
1913 if (ctx
->Polygon
.OffsetFill
) {
1914 wm
.GlobalDepthOffsetEnable
= true;
1915 /* Something weird going on with legacy_global_depth_bias,
1916 * offset_constant, scaling and MRD. This value passes glean
1917 * but gives some odd results elsewere (eg. the
1918 * quad-offset-units test).
1920 wm
.GlobalDepthOffsetConstant
= ctx
->Polygon
.OffsetUnits
* 2;
1922 /* This is the only value that passes glean:
1924 wm
.GlobalDepthOffsetScale
= ctx
->Polygon
.OffsetFactor
;
1927 wm
.DepthCoefficientURBReadOffset
= 1;
1930 /* BRW_NEW_STATS_WM */
1931 wm
.StatisticsEnable
= GEN_GEN
>= 6 || brw
->stats_wm
;
1934 if (wm_prog_data
->base
.use_alt_mode
)
1935 wm
.FloatingPointMode
= FLOATING_POINT_MODE_Alternate
;
1937 wm
.SamplerCount
= GEN_GEN
== 5 ?
1938 0 : DIV_ROUND_UP(stage_state
->sampler_count
, 4);
1940 wm
.BindingTableEntryCount
=
1941 wm_prog_data
->base
.binding_table
.size_bytes
/ 4;
1942 wm
.MaximumNumberofThreads
= devinfo
->max_wm_threads
- 1;
1945 wm
.DualSourceBlendEnable
=
1946 wm_prog_data
->dual_src_blend
&& (ctx
->Color
.BlendEnabled
& 1) &&
1947 ctx
->Color
.Blend
[0]._UsesDualSrc
;
1948 wm
.oMaskPresenttoRenderTarget
= wm_prog_data
->uses_omask
;
1949 wm
.NumberofSFOutputAttributes
= wm_prog_data
->num_varying_inputs
;
1951 /* From the SNB PRM, volume 2 part 1, page 281:
1952 * "If the PS kernel does not need the Position XY Offsets
1953 * to compute a Position XY value, then this field should be
1954 * programmed to POSOFFSET_NONE."
1956 * "SW Recommendation: If the PS kernel needs the Position Offsets
1957 * to compute a Position XY value, this field should match Position
1958 * ZW Interpolation Mode to ensure a consistent position.xyzw
1960 * We only require XY sample offsets. So, this recommendation doesn't
1961 * look useful at the moment. We might need this in future.
1963 if (wm_prog_data
->uses_pos_offset
)
1964 wm
.PositionXYOffsetSelect
= POSOFFSET_SAMPLE
;
1966 wm
.PositionXYOffsetSelect
= POSOFFSET_NONE
;
1969 if (wm_prog_data
->base
.total_scratch
) {
1970 wm
.ScratchSpaceBasePointer
= rw_32_bo(stage_state
->scratch_bo
, 0);
1971 wm
.PerThreadScratchSpace
=
1972 ffs(stage_state
->per_thread_scratch
) - 11;
1975 wm
.PixelShaderComputedDepth
= writes_depth
;
1979 wm
.LineStippleEnable
= ctx
->Line
.StippleFlag
;
1982 wm
.PolygonStippleEnable
= ctx
->Polygon
.StippleFlag
;
1987 wm
.PixelShaderUsesSourceW
= wm_prog_data
->uses_src_w
;
1990 const bool multisampled_fbo
= _mesa_geometric_samples(ctx
->DrawBuffer
) > 1;
1992 if (multisampled_fbo
) {
1993 /* _NEW_MULTISAMPLE */
1994 if (ctx
->Multisample
.Enabled
)
1995 wm
.MultisampleRasterizationMode
= MSRASTMODE_ON_PATTERN
;
1997 wm
.MultisampleRasterizationMode
= MSRASTMODE_OFF_PIXEL
;
1999 if (wm_prog_data
->persample_dispatch
)
2000 wm
.MultisampleDispatchMode
= MSDISPMODE_PERSAMPLE
;
2002 wm
.MultisampleDispatchMode
= MSDISPMODE_PERPIXEL
;
2004 wm
.MultisampleRasterizationMode
= MSRASTMODE_OFF_PIXEL
;
2005 wm
.MultisampleDispatchMode
= MSDISPMODE_PERSAMPLE
;
2008 wm
.PixelShaderUsesSourceDepth
= wm_prog_data
->uses_src_depth
;
2009 if (wm_prog_data
->uses_kill
||
2010 _mesa_is_alpha_test_enabled(ctx
) ||
2011 _mesa_is_alpha_to_coverage_enabled(ctx
) ||
2012 (GEN_GEN
>= 6 && wm_prog_data
->uses_omask
)) {
2013 wm
.PixelShaderKillsPixel
= true;
2016 /* _NEW_BUFFERS | _NEW_COLOR */
2017 if (brw_color_buffer_write_enabled(brw
) || writes_depth
||
2018 wm
.PixelShaderKillsPixel
||
2019 (GEN_GEN
>= 6 && wm_prog_data
->has_side_effects
)) {
2020 wm
.ThreadDispatchEnable
= true;
2024 wm
.PixelShaderComputedDepthMode
= wm_prog_data
->computed_depth_mode
;
2025 wm
.PixelShaderUsesInputCoverageMask
= wm_prog_data
->uses_sample_mask
;
2028 /* The "UAV access enable" bits are unnecessary on HSW because they only
2029 * seem to have an effect on the HW-assisted coherency mechanism which we
2030 * don't need, and the rasterization-related UAV_ONLY flag and the
2031 * DISPATCH_ENABLE bit can be set independently from it.
2032 * C.f. gen8_upload_ps_extra().
2034 * BRW_NEW_FRAGMENT_PROGRAM | BRW_NEW_FS_PROG_DATA | _NEW_BUFFERS |
2038 if (!(brw_color_buffer_write_enabled(brw
) || writes_depth
) &&
2039 wm_prog_data
->has_side_effects
)
2045 /* BRW_NEW_FS_PROG_DATA */
2046 if (wm_prog_data
->early_fragment_tests
)
2047 wm
.EarlyDepthStencilControl
= EDSC_PREPS
;
2048 else if (wm_prog_data
->has_side_effects
)
2049 wm
.EarlyDepthStencilControl
= EDSC_PSEXEC
;
2054 if (brw
->wm
.offset_clamp
!= ctx
->Polygon
.OffsetClamp
) {
2055 brw_batch_emit(brw
, GENX(3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP
), clamp
) {
2056 clamp
.GlobalDepthOffsetClamp
= ctx
->Polygon
.OffsetClamp
;
2059 brw
->wm
.offset_clamp
= ctx
->Polygon
.OffsetClamp
;
2064 static const struct brw_tracked_state
genX(wm_state
) = {
2068 (GEN_GEN
< 8 ? _NEW_BUFFERS
|
2071 (GEN_GEN
== 6 ? _NEW_PROGRAM_CONSTANTS
: 0) |
2072 (GEN_GEN
< 6 ? _NEW_POLYGONSTIPPLE
: 0) |
2073 (GEN_GEN
< 8 && GEN_GEN
>= 6 ? _NEW_MULTISAMPLE
: 0),
2074 .brw
= BRW_NEW_BLORP
|
2075 BRW_NEW_FS_PROG_DATA
|
2076 (GEN_GEN
< 6 ? BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
2077 BRW_NEW_FRAGMENT_PROGRAM
|
2078 BRW_NEW_PROGRAM_CACHE
|
2079 BRW_NEW_SAMPLER_STATE_TABLE
|
2082 (GEN_GEN
< 7 ? BRW_NEW_BATCH
: BRW_NEW_CONTEXT
),
2084 .emit
= genX(upload_wm
),
2087 /* ---------------------------------------------------------------------- */
2089 /* We restrict scratch buffers to the bottom 32 bits of the address space
2090 * by using rw_32_bo().
2092 * General State Base Address is a bit broken. If the address + size as
2093 * seen by STATE_BASE_ADDRESS overflows 48 bits, the GPU appears to treat
2094 * all accesses to the buffer as being out of bounds and returns zero.
2097 #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix) \
2098 pkt.KernelStartPointer = KSP(brw, stage_state->prog_offset); \
2099 /* WA_1606682166 */ \
2100 pkt.SamplerCount = \
2103 DIV_ROUND_UP(CLAMP(stage_state->sampler_count, 0, 16), 4); \
2104 /* Gen 11 workarounds table #2056 WABTPPrefetchDisable suggests to \
2105 * disable prefetching of binding tables in A0 and B0 steppings. \
2106 * TODO: Revisit this WA on C0 stepping. \
2108 pkt.BindingTableEntryCount = \
2111 stage_prog_data->binding_table.size_bytes / 4; \
2112 pkt.FloatingPointMode = stage_prog_data->use_alt_mode; \
2114 if (stage_prog_data->total_scratch) { \
2115 pkt.ScratchSpaceBasePointer = rw_32_bo(stage_state->scratch_bo, 0); \
2116 pkt.PerThreadScratchSpace = \
2117 ffs(stage_state->per_thread_scratch) - 11; \
2120 pkt.DispatchGRFStartRegisterForURBData = \
2121 stage_prog_data->dispatch_grf_start_reg; \
2122 pkt.prefix##URBEntryReadLength = vue_prog_data->urb_read_length; \
2123 pkt.prefix##URBEntryReadOffset = 0; \
2125 pkt.StatisticsEnable = true; \
2129 genX(upload_vs_state
)(struct brw_context
*brw
)
2131 UNUSED
struct gl_context
*ctx
= &brw
->ctx
;
2132 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
2133 struct brw_stage_state
*stage_state
= &brw
->vs
.base
;
2135 /* BRW_NEW_VS_PROG_DATA */
2136 const struct brw_vue_prog_data
*vue_prog_data
=
2137 brw_vue_prog_data(brw
->vs
.base
.prog_data
);
2138 const struct brw_stage_prog_data
*stage_prog_data
= &vue_prog_data
->base
;
2140 assert(vue_prog_data
->dispatch_mode
== DISPATCH_MODE_SIMD8
||
2141 vue_prog_data
->dispatch_mode
== DISPATCH_MODE_4X2_DUAL_OBJECT
);
2142 assert(GEN_GEN
< 11 ||
2143 vue_prog_data
->dispatch_mode
== DISPATCH_MODE_SIMD8
);
2146 /* From the BSpec, 3D Pipeline > Geometry > Vertex Shader > State,
2147 * 3DSTATE_VS, Dword 5.0 "VS Function Enable":
2149 * [DevSNB] A pipeline flush must be programmed prior to a 3DSTATE_VS
2150 * command that causes the VS Function Enable to toggle. Pipeline
2151 * flush can be executed by sending a PIPE_CONTROL command with CS
2152 * stall bit set and a post sync operation.
2154 * We've already done such a flush at the start of state upload, so we
2155 * don't need to do another one here.
2157 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_VS
), cvs
) {
2158 if (stage_state
->push_const_size
!= 0) {
2159 cvs
.Buffer0Valid
= true;
2160 cvs
.ConstantBody
.PointertoConstantBuffer0
= stage_state
->push_const_offset
;
2161 cvs
.ConstantBody
.ConstantBuffer0ReadLength
= stage_state
->push_const_size
- 1;
2166 if (GEN_GEN
== 7 && devinfo
->is_ivybridge
)
2167 gen7_emit_vs_workaround_flush(brw
);
2170 brw_batch_emit(brw
, GENX(3DSTATE_VS
), vs
) {
2172 ctx
->NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
2173 brw_state_emit(brw
, GENX(VS_STATE
), 32, &stage_state
->state_offset
, vs
) {
2175 INIT_THREAD_DISPATCH_FIELDS(vs
, Vertex
);
2177 vs
.MaximumNumberofThreads
= devinfo
->max_vs_threads
- 1;
2180 vs
.GRFRegisterCount
= DIV_ROUND_UP(vue_prog_data
->total_grf
, 16) - 1;
2181 vs
.ConstantURBEntryReadLength
= stage_prog_data
->curb_read_length
;
2182 vs
.ConstantURBEntryReadOffset
= brw
->curbe
.vs_start
* 2;
2184 vs
.NumberofURBEntries
= brw
->urb
.nr_vs_entries
>> (GEN_GEN
== 5 ? 2 : 0);
2185 vs
.URBEntryAllocationSize
= brw
->urb
.vsize
- 1;
2187 vs
.MaximumNumberofThreads
=
2188 CLAMP(brw
->urb
.nr_vs_entries
/ 2, 1, devinfo
->max_vs_threads
) - 1;
2190 vs
.StatisticsEnable
= false;
2191 vs
.SamplerStatePointer
=
2192 ro_bo(brw
->batch
.state
.bo
, stage_state
->sampler_offset
);
2196 /* Force single program flow on Ironlake. We cannot reliably get
2197 * all applications working without it. See:
2198 * https://bugs.freedesktop.org/show_bug.cgi?id=29172
2200 * The most notable and reliably failing application is the Humus
2203 vs
.SingleProgramFlow
= true;
2204 vs
.SamplerCount
= 0; /* hardware requirement */
2208 vs
.SIMD8DispatchEnable
=
2209 vue_prog_data
->dispatch_mode
== DISPATCH_MODE_SIMD8
;
2211 vs
.UserClipDistanceCullTestEnableBitmask
=
2212 vue_prog_data
->cull_distance_mask
;
2217 /* Based on my reading of the simulator, the VS constants don't get
2218 * pulled into the VS FF unit until an appropriate pipeline flush
2219 * happens, and instead the 3DSTATE_CONSTANT_VS packet just adds
2220 * references to them into a little FIFO. The flushes are common,
2221 * but don't reliably happen between this and a 3DPRIMITIVE, causing
2222 * the primitive to use the wrong constants. Then the FIFO
2223 * containing the constant setup gets added to again on the next
2224 * constants change, and eventually when a flush does happen the
2225 * unit is overwhelmed by constant changes and dies.
2227 * To avoid this, send a PIPE_CONTROL down the line that will
2228 * update the unit immediately loading the constants. The flush
2229 * type bits here were those set by the STATE_BASE_ADDRESS whose
2230 * move in a82a43e8d99e1715dd11c9c091b5ab734079b6a6 triggered the
2231 * bug reports that led to this workaround, and may be more than
2232 * what is strictly required to avoid the issue.
2234 brw_emit_pipe_control_flush(brw
,
2235 PIPE_CONTROL_DEPTH_STALL
|
2236 PIPE_CONTROL_INSTRUCTION_INVALIDATE
|
2237 PIPE_CONTROL_STATE_CACHE_INVALIDATE
);
2241 static const struct brw_tracked_state
genX(vs_state
) = {
2243 .mesa
= (GEN_GEN
== 6 ? (_NEW_PROGRAM_CONSTANTS
| _NEW_TRANSFORM
) : 0),
2244 .brw
= BRW_NEW_BATCH
|
2247 BRW_NEW_VS_PROG_DATA
|
2248 (GEN_GEN
== 6 ? BRW_NEW_VERTEX_PROGRAM
: 0) |
2249 (GEN_GEN
<= 5 ? BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
2250 BRW_NEW_PROGRAM_CACHE
|
2251 BRW_NEW_SAMPLER_STATE_TABLE
|
2255 .emit
= genX(upload_vs_state
),
2258 /* ---------------------------------------------------------------------- */
2261 genX(upload_cc_viewport
)(struct brw_context
*brw
)
2263 struct gl_context
*ctx
= &brw
->ctx
;
2265 /* BRW_NEW_VIEWPORT_COUNT */
2266 const unsigned viewport_count
= brw
->clip
.viewport_count
;
2268 struct GENX(CC_VIEWPORT
) ccv
;
2269 uint32_t cc_vp_offset
;
2271 brw_state_batch(brw
, 4 * GENX(CC_VIEWPORT_length
) * viewport_count
,
2274 for (unsigned i
= 0; i
< viewport_count
; i
++) {
2275 /* _NEW_VIEWPORT | _NEW_TRANSFORM */
2276 const struct gl_viewport_attrib
*vp
= &ctx
->ViewportArray
[i
];
2277 if (ctx
->Transform
.DepthClampNear
&& ctx
->Transform
.DepthClampFar
) {
2278 ccv
.MinimumDepth
= MIN2(vp
->Near
, vp
->Far
);
2279 ccv
.MaximumDepth
= MAX2(vp
->Near
, vp
->Far
);
2280 } else if (ctx
->Transform
.DepthClampNear
) {
2281 ccv
.MinimumDepth
= MIN2(vp
->Near
, vp
->Far
);
2282 ccv
.MaximumDepth
= 0.0;
2283 } else if (ctx
->Transform
.DepthClampFar
) {
2284 ccv
.MinimumDepth
= 0.0;
2285 ccv
.MaximumDepth
= MAX2(vp
->Near
, vp
->Far
);
2287 ccv
.MinimumDepth
= 0.0;
2288 ccv
.MaximumDepth
= 1.0;
2290 GENX(CC_VIEWPORT_pack
)(NULL
, cc_map
, &ccv
);
2291 cc_map
+= GENX(CC_VIEWPORT_length
);
2295 brw_batch_emit(brw
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC
), ptr
) {
2296 ptr
.CCViewportPointer
= cc_vp_offset
;
2299 brw_batch_emit(brw
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS
), vp
) {
2300 vp
.CCViewportStateChange
= 1;
2301 vp
.PointertoCC_VIEWPORT
= cc_vp_offset
;
2304 brw
->cc
.vp_offset
= cc_vp_offset
;
2305 ctx
->NewDriverState
|= BRW_NEW_CC_VP
;
2309 const struct brw_tracked_state
genX(cc_vp
) = {
2311 .mesa
= _NEW_TRANSFORM
|
2313 .brw
= BRW_NEW_BATCH
|
2315 BRW_NEW_VIEWPORT_COUNT
,
2317 .emit
= genX(upload_cc_viewport
)
2320 /* ---------------------------------------------------------------------- */
2323 set_scissor_bits(const struct gl_context
*ctx
, int i
,
2324 bool flip_y
, unsigned fb_width
, unsigned fb_height
,
2325 struct GENX(SCISSOR_RECT
) *sc
)
2329 bbox
[0] = MAX2(ctx
->ViewportArray
[i
].X
, 0);
2330 bbox
[1] = MIN2(bbox
[0] + ctx
->ViewportArray
[i
].Width
, fb_width
);
2331 bbox
[2] = CLAMP(ctx
->ViewportArray
[i
].Y
, 0, fb_height
);
2332 bbox
[3] = MIN2(bbox
[2] + ctx
->ViewportArray
[i
].Height
, fb_height
);
2333 _mesa_intersect_scissor_bounding_box(ctx
, i
, bbox
);
2335 if (bbox
[0] == bbox
[1] || bbox
[2] == bbox
[3]) {
2336 /* If the scissor was out of bounds and got clamped to 0 width/height
2337 * at the bounds, the subtraction of 1 from maximums could produce a
2338 * negative number and thus not clip anything. Instead, just provide
2339 * a min > max scissor inside the bounds, which produces the expected
2342 sc
->ScissorRectangleXMin
= 1;
2343 sc
->ScissorRectangleXMax
= 0;
2344 sc
->ScissorRectangleYMin
= 1;
2345 sc
->ScissorRectangleYMax
= 0;
2346 } else if (!flip_y
) {
2347 /* texmemory: Y=0=bottom */
2348 sc
->ScissorRectangleXMin
= bbox
[0];
2349 sc
->ScissorRectangleXMax
= bbox
[1] - 1;
2350 sc
->ScissorRectangleYMin
= bbox
[2];
2351 sc
->ScissorRectangleYMax
= bbox
[3] - 1;
2353 /* memory: Y=0=top */
2354 sc
->ScissorRectangleXMin
= bbox
[0];
2355 sc
->ScissorRectangleXMax
= bbox
[1] - 1;
2356 sc
->ScissorRectangleYMin
= fb_height
- bbox
[3];
2357 sc
->ScissorRectangleYMax
= fb_height
- bbox
[2] - 1;
2363 genX(upload_scissor_state
)(struct brw_context
*brw
)
2365 struct gl_context
*ctx
= &brw
->ctx
;
2366 const bool flip_y
= ctx
->DrawBuffer
->FlipY
;
2367 struct GENX(SCISSOR_RECT
) scissor
;
2368 uint32_t scissor_state_offset
;
2369 const unsigned int fb_width
= _mesa_geometric_width(ctx
->DrawBuffer
);
2370 const unsigned int fb_height
= _mesa_geometric_height(ctx
->DrawBuffer
);
2371 uint32_t *scissor_map
;
2373 /* BRW_NEW_VIEWPORT_COUNT */
2374 const unsigned viewport_count
= brw
->clip
.viewport_count
;
2376 scissor_map
= brw_state_batch(
2377 brw
, GENX(SCISSOR_RECT_length
) * sizeof(uint32_t) * viewport_count
,
2378 32, &scissor_state_offset
);
2380 /* _NEW_SCISSOR | _NEW_BUFFERS | _NEW_VIEWPORT */
2382 /* The scissor only needs to handle the intersection of drawable and
2383 * scissor rect. Clipping to the boundaries of static shared buffers
2384 * for front/back/depth is covered by looping over cliprects in brw_draw.c.
2386 * Note that the hardware's coordinates are inclusive, while Mesa's min is
2387 * inclusive but max is exclusive.
2389 for (unsigned i
= 0; i
< viewport_count
; i
++) {
2390 set_scissor_bits(ctx
, i
, flip_y
, fb_width
, fb_height
, &scissor
);
2391 GENX(SCISSOR_RECT_pack
)(
2392 NULL
, scissor_map
+ i
* GENX(SCISSOR_RECT_length
), &scissor
);
2395 brw_batch_emit(brw
, GENX(3DSTATE_SCISSOR_STATE_POINTERS
), ptr
) {
2396 ptr
.ScissorRectPointer
= scissor_state_offset
;
2400 static const struct brw_tracked_state
genX(scissor_state
) = {
2402 .mesa
= _NEW_BUFFERS
|
2405 .brw
= BRW_NEW_BATCH
|
2407 BRW_NEW_VIEWPORT_COUNT
,
2409 .emit
= genX(upload_scissor_state
),
2413 /* ---------------------------------------------------------------------- */
2416 genX(upload_sf_clip_viewport
)(struct brw_context
*brw
)
2418 struct gl_context
*ctx
= &brw
->ctx
;
2419 float y_scale
, y_bias
;
2421 /* BRW_NEW_VIEWPORT_COUNT */
2422 const unsigned viewport_count
= brw
->clip
.viewport_count
;
2425 const bool flip_y
= ctx
->DrawBuffer
->FlipY
;
2426 const uint32_t fb_width
= (float)_mesa_geometric_width(ctx
->DrawBuffer
);
2427 const uint32_t fb_height
= (float)_mesa_geometric_height(ctx
->DrawBuffer
);
2431 struct GENX(SF_CLIP_VIEWPORT
) sfv
;
2432 uint32_t sf_clip_vp_offset
;
2433 uint32_t *sf_clip_map
=
2434 brw_state_batch(brw
, GENX(SF_CLIP_VIEWPORT_length
) * 4 * viewport_count
,
2435 64, &sf_clip_vp_offset
);
2437 struct GENX(SF_VIEWPORT
) sfv
;
2438 struct GENX(CLIP_VIEWPORT
) clv
;
2439 uint32_t sf_vp_offset
, clip_vp_offset
;
2441 brw_state_batch(brw
, GENX(SF_VIEWPORT_length
) * 4 * viewport_count
,
2443 uint32_t *clip_map
=
2444 brw_state_batch(brw
, GENX(CLIP_VIEWPORT_length
) * 4 * viewport_count
,
2445 32, &clip_vp_offset
);
2451 y_bias
= (float)fb_height
;
2457 for (unsigned i
= 0; i
< brw
->clip
.viewport_count
; i
++) {
2458 /* _NEW_VIEWPORT: Guardband Clipping */
2459 float scale
[3], translate
[3], gb_xmin
, gb_xmax
, gb_ymin
, gb_ymax
;
2460 _mesa_get_viewport_xform(ctx
, i
, scale
, translate
);
2462 sfv
.ViewportMatrixElementm00
= scale
[0];
2463 sfv
.ViewportMatrixElementm11
= scale
[1] * y_scale
,
2464 sfv
.ViewportMatrixElementm22
= scale
[2],
2465 sfv
.ViewportMatrixElementm30
= translate
[0],
2466 sfv
.ViewportMatrixElementm31
= translate
[1] * y_scale
+ y_bias
,
2467 sfv
.ViewportMatrixElementm32
= translate
[2],
2468 gen_calculate_guardband_size(fb_width
, fb_height
,
2469 sfv
.ViewportMatrixElementm00
,
2470 sfv
.ViewportMatrixElementm11
,
2471 sfv
.ViewportMatrixElementm30
,
2472 sfv
.ViewportMatrixElementm31
,
2473 &gb_xmin
, &gb_xmax
, &gb_ymin
, &gb_ymax
);
2476 clv
.XMinClipGuardband
= gb_xmin
;
2477 clv
.XMaxClipGuardband
= gb_xmax
;
2478 clv
.YMinClipGuardband
= gb_ymin
;
2479 clv
.YMaxClipGuardband
= gb_ymax
;
2482 set_scissor_bits(ctx
, i
, flip_y
, fb_width
, fb_height
,
2483 &sfv
.ScissorRectangle
);
2485 /* _NEW_VIEWPORT | _NEW_BUFFERS: Screen Space Viewport
2486 * The hardware will take the intersection of the drawing rectangle,
2487 * scissor rectangle, and the viewport extents. However, emitting
2488 * 3DSTATE_DRAWING_RECTANGLE is expensive since it requires a full
2489 * pipeline stall so we're better off just being a little more clever
2490 * with our viewport so we can emit it once at context creation time.
2492 const float viewport_Xmin
= MAX2(ctx
->ViewportArray
[i
].X
, 0);
2493 const float viewport_Ymin
= MAX2(ctx
->ViewportArray
[i
].Y
, 0);
2494 const float viewport_Xmax
=
2495 MIN2(ctx
->ViewportArray
[i
].X
+ ctx
->ViewportArray
[i
].Width
, fb_width
);
2496 const float viewport_Ymax
=
2497 MIN2(ctx
->ViewportArray
[i
].Y
+ ctx
->ViewportArray
[i
].Height
, fb_height
);
2500 sfv
.XMinViewPort
= viewport_Xmin
;
2501 sfv
.XMaxViewPort
= viewport_Xmax
- 1;
2502 sfv
.YMinViewPort
= fb_height
- viewport_Ymax
;
2503 sfv
.YMaxViewPort
= fb_height
- viewport_Ymin
- 1;
2505 sfv
.XMinViewPort
= viewport_Xmin
;
2506 sfv
.XMaxViewPort
= viewport_Xmax
- 1;
2507 sfv
.YMinViewPort
= viewport_Ymin
;
2508 sfv
.YMaxViewPort
= viewport_Ymax
- 1;
2513 GENX(SF_CLIP_VIEWPORT_pack
)(NULL
, sf_clip_map
, &sfv
);
2514 sf_clip_map
+= GENX(SF_CLIP_VIEWPORT_length
);
2516 GENX(SF_VIEWPORT_pack
)(NULL
, sf_map
, &sfv
);
2517 GENX(CLIP_VIEWPORT_pack
)(NULL
, clip_map
, &clv
);
2518 sf_map
+= GENX(SF_VIEWPORT_length
);
2519 clip_map
+= GENX(CLIP_VIEWPORT_length
);
2524 brw_batch_emit(brw
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
), ptr
) {
2525 ptr
.SFClipViewportPointer
= sf_clip_vp_offset
;
2528 brw_batch_emit(brw
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS
), vp
) {
2529 vp
.SFViewportStateChange
= 1;
2530 vp
.CLIPViewportStateChange
= 1;
2531 vp
.PointertoCLIP_VIEWPORT
= clip_vp_offset
;
2532 vp
.PointertoSF_VIEWPORT
= sf_vp_offset
;
2535 brw
->sf
.vp_offset
= sf_vp_offset
;
2536 brw
->clip
.vp_offset
= clip_vp_offset
;
2537 brw
->ctx
.NewDriverState
|= BRW_NEW_SF_VP
| BRW_NEW_CLIP_VP
;
2541 static const struct brw_tracked_state
genX(sf_clip_viewport
) = {
2543 .mesa
= _NEW_BUFFERS
|
2545 (GEN_GEN
<= 5 ? _NEW_SCISSOR
: 0),
2546 .brw
= BRW_NEW_BATCH
|
2548 BRW_NEW_VIEWPORT_COUNT
,
2550 .emit
= genX(upload_sf_clip_viewport
),
2553 /* ---------------------------------------------------------------------- */
2556 genX(upload_gs_state
)(struct brw_context
*brw
)
2558 UNUSED
struct gl_context
*ctx
= &brw
->ctx
;
2559 UNUSED
const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
2560 const struct brw_stage_state
*stage_state
= &brw
->gs
.base
;
2561 const struct gl_program
*gs_prog
= brw
->programs
[MESA_SHADER_GEOMETRY
];
2562 /* BRW_NEW_GEOMETRY_PROGRAM */
2563 bool active
= GEN_GEN
>= 6 && gs_prog
;
2565 /* BRW_NEW_GS_PROG_DATA */
2566 struct brw_stage_prog_data
*stage_prog_data
= stage_state
->prog_data
;
2567 UNUSED
const struct brw_vue_prog_data
*vue_prog_data
=
2568 brw_vue_prog_data(stage_prog_data
);
2570 const struct brw_gs_prog_data
*gs_prog_data
=
2571 brw_gs_prog_data(stage_prog_data
);
2575 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_GS
), cgs
) {
2576 if (active
&& stage_state
->push_const_size
!= 0) {
2577 cgs
.Buffer0Valid
= true;
2578 cgs
.ConstantBody
.PointertoConstantBuffer0
= stage_state
->push_const_offset
;
2579 cgs
.ConstantBody
.ConstantBuffer0ReadLength
= stage_state
->push_const_size
- 1;
2584 #if GEN_GEN == 7 && !GEN_IS_HASWELL
2586 * From Graphics BSpec: 3D-Media-GPGPU Engine > 3D Pipeline Stages >
2587 * Geometry > Geometry Shader > State:
2589 * "Note: Because of corruption in IVB:GT2, software needs to flush the
2590 * whole fixed function pipeline when the GS enable changes value in
2593 * The hardware architects have clarified that in this context "flush the
2594 * whole fixed function pipeline" means to emit a PIPE_CONTROL with the "CS
2597 if (devinfo
->gt
== 2 && brw
->gs
.enabled
!= active
)
2598 gen7_emit_cs_stall_flush(brw
);
2602 brw_batch_emit(brw
, GENX(3DSTATE_GS
), gs
) {
2604 ctx
->NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
2605 brw_state_emit(brw
, GENX(GS_STATE
), 32, &brw
->ff_gs
.state_offset
, gs
) {
2610 INIT_THREAD_DISPATCH_FIELDS(gs
, Vertex
);
2613 gs
.OutputVertexSize
= gs_prog_data
->output_vertex_size_hwords
* 2 - 1;
2614 gs
.OutputTopology
= gs_prog_data
->output_topology
;
2615 gs
.ControlDataHeaderSize
=
2616 gs_prog_data
->control_data_header_size_hwords
;
2618 gs
.InstanceControl
= gs_prog_data
->invocations
- 1;
2619 gs
.DispatchMode
= vue_prog_data
->dispatch_mode
;
2621 gs
.IncludePrimitiveID
= gs_prog_data
->include_primitive_id
;
2623 gs
.ControlDataFormat
= gs_prog_data
->control_data_format
;
2626 /* Note: the meaning of the GEN7_GS_REORDER_TRAILING bit changes between
2627 * Ivy Bridge and Haswell.
2629 * On Ivy Bridge, setting this bit causes the vertices of a triangle
2630 * strip to be delivered to the geometry shader in an order that does
2631 * not strictly follow the OpenGL spec, but preserves triangle
2632 * orientation. For example, if the vertices are (1, 2, 3, 4, 5), then
2633 * the geometry shader sees triangles:
2635 * (1, 2, 3), (2, 4, 3), (3, 4, 5)
2637 * (Clearing the bit is even worse, because it fails to preserve
2640 * Triangle strips with adjacency always ordered in a way that preserves
2641 * triangle orientation but does not strictly follow the OpenGL spec,
2642 * regardless of the setting of this bit.
2644 * On Haswell, both triangle strips and triangle strips with adjacency
2645 * are always ordered in a way that preserves triangle orientation.
2646 * Setting this bit causes the ordering to strictly follow the OpenGL
2649 * So in either case we want to set the bit. Unfortunately on Ivy
2650 * Bridge this will get the order close to correct but not perfect.
2652 gs
.ReorderMode
= TRAILING
;
2653 gs
.MaximumNumberofThreads
=
2654 GEN_GEN
== 8 ? (devinfo
->max_gs_threads
/ 2 - 1)
2655 : (devinfo
->max_gs_threads
- 1);
2658 gs
.SOStatisticsEnable
= true;
2659 if (gs_prog
->info
.has_transform_feedback_varyings
)
2660 gs
.SVBIPayloadEnable
= _mesa_is_xfb_active_and_unpaused(ctx
);
2662 /* GEN6_GS_SPF_MODE and GEN6_GS_VECTOR_MASK_ENABLE are enabled as it
2663 * was previously done for gen6.
2665 * TODO: test with both disabled to see if the HW is behaving
2666 * as expected, like in gen7.
2668 gs
.SingleProgramFlow
= true;
2669 gs
.VectorMaskEnable
= true;
2673 gs
.ExpectedVertexCount
= gs_prog_data
->vertices_in
;
2675 if (gs_prog_data
->static_vertex_count
!= -1) {
2676 gs
.StaticOutput
= true;
2677 gs
.StaticOutputVertexCount
= gs_prog_data
->static_vertex_count
;
2679 gs
.IncludeVertexHandles
= vue_prog_data
->include_vue_handles
;
2681 gs
.UserClipDistanceCullTestEnableBitmask
=
2682 vue_prog_data
->cull_distance_mask
;
2684 const int urb_entry_write_offset
= 1;
2685 const uint32_t urb_entry_output_length
=
2686 DIV_ROUND_UP(vue_prog_data
->vue_map
.num_slots
, 2) -
2687 urb_entry_write_offset
;
2689 gs
.VertexURBEntryOutputReadOffset
= urb_entry_write_offset
;
2690 gs
.VertexURBEntryOutputLength
= MAX2(urb_entry_output_length
, 1);
2696 if (!active
&& brw
->ff_gs
.prog_active
) {
2697 /* In gen6, transform feedback for the VS stage is done with an
2698 * ad-hoc GS program. This function provides the needed 3DSTATE_GS
2701 gs
.KernelStartPointer
= KSP(brw
, brw
->ff_gs
.prog_offset
);
2702 gs
.SingleProgramFlow
= true;
2703 gs
.DispatchGRFStartRegisterForURBData
= GEN_GEN
== 6 ? 2 : 1;
2704 gs
.VertexURBEntryReadLength
= brw
->ff_gs
.prog_data
->urb_read_length
;
2707 gs
.GRFRegisterCount
=
2708 DIV_ROUND_UP(brw
->ff_gs
.prog_data
->total_grf
, 16) - 1;
2709 /* BRW_NEW_URB_FENCE */
2710 gs
.NumberofURBEntries
= brw
->urb
.nr_gs_entries
;
2711 gs
.URBEntryAllocationSize
= brw
->urb
.vsize
- 1;
2712 gs
.MaximumNumberofThreads
= brw
->urb
.nr_gs_entries
>= 8 ? 1 : 0;
2713 gs
.FloatingPointMode
= FLOATING_POINT_MODE_Alternate
;
2716 gs
.VectorMaskEnable
= true;
2717 gs
.SVBIPayloadEnable
= true;
2718 gs
.SVBIPostIncrementEnable
= true;
2719 gs
.SVBIPostIncrementValue
=
2720 brw
->ff_gs
.prog_data
->svbi_postincrement_value
;
2721 gs
.SOStatisticsEnable
= true;
2722 gs
.MaximumNumberofThreads
= devinfo
->max_gs_threads
- 1;
2726 if (!active
&& !brw
->ff_gs
.prog_active
) {
2728 gs
.DispatchGRFStartRegisterForURBData
= 1;
2730 gs
.IncludeVertexHandles
= true;
2736 gs
.StatisticsEnable
= true;
2738 #if GEN_GEN == 5 || GEN_GEN == 6
2739 gs
.RenderingEnabled
= true;
2742 gs
.MaximumVPIndex
= brw
->clip
.viewport_count
- 1;
2747 brw
->gs
.enabled
= active
;
2751 static const struct brw_tracked_state
genX(gs_state
) = {
2753 .mesa
= (GEN_GEN
== 6 ? _NEW_PROGRAM_CONSTANTS
: 0),
2754 .brw
= BRW_NEW_BATCH
|
2756 (GEN_GEN
<= 5 ? BRW_NEW_PUSH_CONSTANT_ALLOCATION
|
2757 BRW_NEW_PROGRAM_CACHE
|
2759 BRW_NEW_VIEWPORT_COUNT
2761 (GEN_GEN
>= 6 ? BRW_NEW_CONTEXT
|
2762 BRW_NEW_GEOMETRY_PROGRAM
|
2763 BRW_NEW_GS_PROG_DATA
2765 (GEN_GEN
< 7 ? BRW_NEW_FF_GS_PROG_DATA
: 0),
2767 .emit
= genX(upload_gs_state
),
2770 /* ---------------------------------------------------------------------- */
2772 UNUSED
static GLenum
2773 fix_dual_blend_alpha_to_one(GLenum function
)
2779 case GL_ONE_MINUS_SRC1_ALPHA
:
2786 #define blend_factor(x) brw_translate_blend_factor(x)
2787 #define blend_eqn(x) brw_translate_blend_equation(x)
2790 * Modify blend function to force destination alpha to 1.0
2792 * If \c function specifies a blend function that uses destination alpha,
2793 * replace it with a function that hard-wires destination alpha to 1.0. This
2794 * is used when rendering to xRGB targets.
2797 brw_fix_xRGB_alpha(GLenum function
)
2803 case GL_ONE_MINUS_DST_ALPHA
:
2804 case GL_SRC_ALPHA_SATURATE
:
2812 typedef struct GENX(BLEND_STATE_ENTRY
) BLEND_ENTRY_GENXML
;
2814 typedef struct GENX(COLOR_CALC_STATE
) BLEND_ENTRY_GENXML
;
2818 set_blend_entry_bits(struct brw_context
*brw
, BLEND_ENTRY_GENXML
*entry
, int i
,
2821 struct gl_context
*ctx
= &brw
->ctx
;
2824 const struct gl_renderbuffer
*rb
= ctx
->DrawBuffer
->_ColorDrawBuffers
[i
];
2826 bool independent_alpha_blend
= false;
2828 /* Used for implementing the following bit of GL_EXT_texture_integer:
2829 * "Per-fragment operations that require floating-point color
2830 * components, including multisample alpha operations, alpha test,
2831 * blending, and dithering, have no effect when the corresponding
2832 * colors are written to an integer color buffer."
2834 const bool integer
= ctx
->DrawBuffer
->_IntegerBuffers
& (0x1 << i
);
2836 const unsigned blend_enabled
= GEN_GEN
>= 6 ?
2837 ctx
->Color
.BlendEnabled
& (1 << i
) : ctx
->Color
.BlendEnabled
;
2840 if (ctx
->Color
.ColorLogicOpEnabled
) {
2841 GLenum rb_type
= rb
? _mesa_get_format_datatype(rb
->Format
)
2842 : GL_UNSIGNED_NORMALIZED
;
2843 WARN_ONCE(ctx
->Color
.LogicOp
!= GL_COPY
&&
2844 rb_type
!= GL_UNSIGNED_NORMALIZED
&&
2845 rb_type
!= GL_FLOAT
, "Ignoring %s logic op on %s "
2847 _mesa_enum_to_string(ctx
->Color
.LogicOp
),
2848 _mesa_enum_to_string(rb_type
));
2849 if (GEN_GEN
>= 8 || rb_type
== GL_UNSIGNED_NORMALIZED
) {
2850 entry
->LogicOpEnable
= true;
2851 entry
->LogicOpFunction
= ctx
->Color
._LogicOp
;
2853 } else if (blend_enabled
&& !ctx
->Color
._AdvancedBlendMode
2854 && (GEN_GEN
<= 5 || !integer
)) {
2855 GLenum eqRGB
= ctx
->Color
.Blend
[i
].EquationRGB
;
2856 GLenum eqA
= ctx
->Color
.Blend
[i
].EquationA
;
2857 GLenum srcRGB
= ctx
->Color
.Blend
[i
].SrcRGB
;
2858 GLenum dstRGB
= ctx
->Color
.Blend
[i
].DstRGB
;
2859 GLenum srcA
= ctx
->Color
.Blend
[i
].SrcA
;
2860 GLenum dstA
= ctx
->Color
.Blend
[i
].DstA
;
2862 if (eqRGB
== GL_MIN
|| eqRGB
== GL_MAX
)
2863 srcRGB
= dstRGB
= GL_ONE
;
2865 if (eqA
== GL_MIN
|| eqA
== GL_MAX
)
2866 srcA
= dstA
= GL_ONE
;
2868 /* Due to hardware limitations, the destination may have information
2869 * in an alpha channel even when the format specifies no alpha
2870 * channel. In order to avoid getting any incorrect blending due to
2871 * that alpha channel, coerce the blend factors to values that will
2872 * not read the alpha channel, but will instead use the correct
2873 * implicit value for alpha.
2875 if (rb
&& !_mesa_base_format_has_channel(rb
->_BaseFormat
,
2876 GL_TEXTURE_ALPHA_TYPE
)) {
2877 srcRGB
= brw_fix_xRGB_alpha(srcRGB
);
2878 srcA
= brw_fix_xRGB_alpha(srcA
);
2879 dstRGB
= brw_fix_xRGB_alpha(dstRGB
);
2880 dstA
= brw_fix_xRGB_alpha(dstA
);
2883 /* From the BLEND_STATE docs, DWord 0, Bit 29 (AlphaToOne Enable):
2884 * "If Dual Source Blending is enabled, this bit must be disabled."
2886 * We override SRC1_ALPHA to ONE and ONE_MINUS_SRC1_ALPHA to ZERO,
2887 * and leave it enabled anyway.
2889 if (GEN_GEN
>= 6 && ctx
->Color
.Blend
[i
]._UsesDualSrc
&& alpha_to_one
) {
2890 srcRGB
= fix_dual_blend_alpha_to_one(srcRGB
);
2891 srcA
= fix_dual_blend_alpha_to_one(srcA
);
2892 dstRGB
= fix_dual_blend_alpha_to_one(dstRGB
);
2893 dstA
= fix_dual_blend_alpha_to_one(dstA
);
2896 /* BRW_NEW_FS_PROG_DATA */
2897 const struct brw_wm_prog_data
*wm_prog_data
=
2898 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
2900 /* The Dual Source Blending documentation says:
2902 * "If SRC1 is included in a src/dst blend factor and
2903 * a DualSource RT Write message is not used, results
2904 * are UNDEFINED. (This reflects the same restriction in DX APIs,
2905 * where undefined results are produced if “o1” is not written
2906 * by a PS – there are no default values defined).
2907 * If SRC1 is not included in a src/dst blend factor,
2908 * dual source blending must be disabled."
2910 * There is no way to gracefully fix this undefined situation
2911 * so we just disable the blending to prevent possible issues.
2913 entry
->ColorBufferBlendEnable
=
2914 !ctx
->Color
.Blend
[0]._UsesDualSrc
|| wm_prog_data
->dual_src_blend
;
2916 entry
->DestinationBlendFactor
= blend_factor(dstRGB
);
2917 entry
->SourceBlendFactor
= blend_factor(srcRGB
);
2918 entry
->DestinationAlphaBlendFactor
= blend_factor(dstA
);
2919 entry
->SourceAlphaBlendFactor
= blend_factor(srcA
);
2920 entry
->ColorBlendFunction
= blend_eqn(eqRGB
);
2921 entry
->AlphaBlendFunction
= blend_eqn(eqA
);
2923 if (srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
)
2924 independent_alpha_blend
= true;
2927 return independent_alpha_blend
;
2932 genX(upload_blend_state
)(struct brw_context
*brw
)
2934 struct gl_context
*ctx
= &brw
->ctx
;
2937 /* We need at least one BLEND_STATE written, because we might do
2938 * thread dispatch even if _NumColorDrawBuffers is 0 (for example
2939 * for computed depth or alpha test), which will do an FB write
2940 * with render target 0, which will reference BLEND_STATE[0] for
2941 * alpha test enable.
2943 int nr_draw_buffers
= ctx
->DrawBuffer
->_NumColorDrawBuffers
;
2944 if (nr_draw_buffers
== 0 && ctx
->Color
.AlphaEnabled
)
2945 nr_draw_buffers
= 1;
2947 size
= GENX(BLEND_STATE_ENTRY_length
) * 4 * nr_draw_buffers
;
2949 size
+= GENX(BLEND_STATE_length
) * 4;
2952 uint32_t *blend_map
;
2953 blend_map
= brw_state_batch(brw
, size
, 64, &brw
->cc
.blend_state_offset
);
2956 struct GENX(BLEND_STATE
) blend
= { 0 };
2959 for (int i
= 0; i
< nr_draw_buffers
; i
++) {
2960 struct GENX(BLEND_STATE_ENTRY
) entry
= { 0 };
2963 /* OpenGL specification 3.3 (page 196), section 4.1.3 says:
2964 * "If drawbuffer zero is not NONE and the buffer it references has an
2965 * integer format, the SAMPLE_ALPHA_TO_COVERAGE and SAMPLE_ALPHA_TO_ONE
2966 * operations are skipped."
2968 if (!(ctx
->DrawBuffer
->_IntegerBuffers
& 0x1)) {
2969 /* _NEW_MULTISAMPLE */
2970 if (_mesa_is_multisample_enabled(ctx
)) {
2971 if (ctx
->Multisample
.SampleAlphaToCoverage
) {
2972 blend
.AlphaToCoverageEnable
= true;
2973 blend
.AlphaToCoverageDitherEnable
= GEN_GEN
>= 7;
2975 if (ctx
->Multisample
.SampleAlphaToOne
)
2976 blend
.AlphaToOneEnable
= true;
2980 if (ctx
->Color
.AlphaEnabled
) {
2981 blend
.AlphaTestEnable
= true;
2982 blend
.AlphaTestFunction
=
2983 intel_translate_compare_func(ctx
->Color
.AlphaFunc
);
2986 if (ctx
->Color
.DitherFlag
) {
2987 blend
.ColorDitherEnable
= true;
2992 for (int i
= 0; i
< nr_draw_buffers
; i
++) {
2993 struct GENX(BLEND_STATE_ENTRY
) entry
= { 0 };
2997 blend
.IndependentAlphaBlendEnable
=
2998 set_blend_entry_bits(brw
, &entry
, i
, blend
.AlphaToOneEnable
) ||
2999 blend
.IndependentAlphaBlendEnable
;
3001 /* See section 8.1.6 "Pre-Blend Color Clamping" of the
3002 * SandyBridge PRM Volume 2 Part 1 for HW requirements.
3004 * We do our ARB_color_buffer_float CLAMP_FRAGMENT_COLOR
3005 * clamping in the fragment shader. For its clamping of
3006 * blending, the spec says:
3008 * "RESOLVED: For fixed-point color buffers, the inputs and
3009 * the result of the blending equation are clamped. For
3010 * floating-point color buffers, no clamping occurs."
3012 * So, generally, we want clamping to the render target's range.
3013 * And, good news, the hardware tables for both pre- and
3014 * post-blend color clamping are either ignored, or any are
3015 * allowed, or clamping is required but RT range clamping is a
3018 entry
.PreBlendColorClampEnable
= true;
3019 entry
.PostBlendColorClampEnable
= true;
3020 entry
.ColorClampRange
= COLORCLAMP_RTFORMAT
;
3022 entry
.WriteDisableRed
= !GET_COLORMASK_BIT(ctx
->Color
.ColorMask
, i
, 0);
3023 entry
.WriteDisableGreen
= !GET_COLORMASK_BIT(ctx
->Color
.ColorMask
, i
, 1);
3024 entry
.WriteDisableBlue
= !GET_COLORMASK_BIT(ctx
->Color
.ColorMask
, i
, 2);
3025 entry
.WriteDisableAlpha
= !GET_COLORMASK_BIT(ctx
->Color
.ColorMask
, i
, 3);
3028 GENX(BLEND_STATE_ENTRY_pack
)(NULL
, &blend_map
[1 + i
* 2], &entry
);
3030 GENX(BLEND_STATE_ENTRY_pack
)(NULL
, &blend_map
[i
* 2], &entry
);
3036 GENX(BLEND_STATE_pack
)(NULL
, blend_map
, &blend
);
3040 brw_batch_emit(brw
, GENX(3DSTATE_CC_STATE_POINTERS
), ptr
) {
3041 ptr
.PointertoBLEND_STATE
= brw
->cc
.blend_state_offset
;
3042 ptr
.BLEND_STATEChange
= true;
3045 brw_batch_emit(brw
, GENX(3DSTATE_BLEND_STATE_POINTERS
), ptr
) {
3046 ptr
.BlendStatePointer
= brw
->cc
.blend_state_offset
;
3048 ptr
.BlendStatePointerValid
= true;
3054 static const struct brw_tracked_state
genX(blend_state
) = {
3056 .mesa
= _NEW_BUFFERS
|
3059 .brw
= BRW_NEW_BATCH
|
3061 BRW_NEW_FS_PROG_DATA
|
3062 BRW_NEW_STATE_BASE_ADDRESS
,
3064 .emit
= genX(upload_blend_state
),
3068 /* ---------------------------------------------------------------------- */
3071 UNUSED
static const uint32_t push_constant_opcodes
[] = {
3072 [MESA_SHADER_VERTEX
] = 21,
3073 [MESA_SHADER_TESS_CTRL
] = 25, /* HS */
3074 [MESA_SHADER_TESS_EVAL
] = 26, /* DS */
3075 [MESA_SHADER_GEOMETRY
] = 22,
3076 [MESA_SHADER_FRAGMENT
] = 23,
3077 [MESA_SHADER_COMPUTE
] = 0,
3081 genX(upload_push_constant_packets
)(struct brw_context
*brw
)
3083 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
3084 struct gl_context
*ctx
= &brw
->ctx
;
3086 UNUSED
uint32_t mocs
= GEN_GEN
< 8 ? GEN7_MOCS_L3
: 0;
3088 struct brw_stage_state
*stage_states
[] = {
3096 if (GEN_GEN
== 7 && !GEN_IS_HASWELL
&& !devinfo
->is_baytrail
&&
3097 stage_states
[MESA_SHADER_VERTEX
]->push_constants_dirty
)
3098 gen7_emit_vs_workaround_flush(brw
);
3100 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
3101 struct brw_stage_state
*stage_state
= stage_states
[stage
];
3102 UNUSED
struct gl_program
*prog
= ctx
->_Shader
->CurrentProgram
[stage
];
3104 if (!stage_state
->push_constants_dirty
)
3107 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_VS
), pkt
) {
3108 pkt
._3DCommandSubOpcode
= push_constant_opcodes
[stage
];
3109 if (stage_state
->prog_data
) {
3110 #if GEN_GEN >= 8 || GEN_IS_HASWELL
3111 /* The Skylake PRM contains the following restriction:
3113 * "The driver must ensure The following case does not occur
3114 * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
3115 * buffer 3 read length equal to zero committed followed by a
3116 * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
3119 * To avoid this, we program the buffers in the highest slots.
3120 * This way, slot 0 is only used if slot 3 is also used.
3124 for (int i
= 3; i
>= 0; i
--) {
3125 const struct brw_ubo_range
*range
=
3126 &stage_state
->prog_data
->ubo_ranges
[i
];
3128 if (range
->length
== 0)
3131 const struct gl_uniform_block
*block
=
3132 prog
->sh
.UniformBlocks
[range
->block
];
3133 const struct gl_buffer_binding
*binding
=
3134 &ctx
->UniformBufferBindings
[block
->Binding
];
3136 if (binding
->BufferObject
== ctx
->Shared
->NullBufferObj
) {
3137 static unsigned msg_id
= 0;
3138 _mesa_gl_debugf(ctx
, &msg_id
, MESA_DEBUG_SOURCE_API
,
3139 MESA_DEBUG_TYPE_UNDEFINED
,
3140 MESA_DEBUG_SEVERITY_HIGH
,
3141 "UBO %d unbound, %s shader uniform data "
3142 "will be undefined.",
3144 _mesa_shader_stage_to_string(stage
));
3148 assert(binding
->Offset
% 32 == 0);
3150 struct brw_bo
*bo
= intel_bufferobj_buffer(brw
,
3151 intel_buffer_object(binding
->BufferObject
),
3152 binding
->Offset
, range
->length
* 32, false);
3154 pkt
.ConstantBody
.ReadLength
[n
] = range
->length
;
3155 pkt
.ConstantBody
.Buffer
[n
] =
3156 ro_bo(bo
, range
->start
* 32 + binding
->Offset
);
3160 if (stage_state
->push_const_size
> 0) {
3162 pkt
.ConstantBody
.ReadLength
[n
] = stage_state
->push_const_size
;
3163 pkt
.ConstantBody
.Buffer
[n
] =
3164 ro_bo(stage_state
->push_const_bo
,
3165 stage_state
->push_const_offset
);
3168 pkt
.ConstantBody
.ReadLength
[0] = stage_state
->push_const_size
;
3169 pkt
.ConstantBody
.Buffer
[0].offset
=
3170 stage_state
->push_const_offset
| mocs
;
3175 stage_state
->push_constants_dirty
= false;
3176 brw
->ctx
.NewDriverState
|= GEN_GEN
>= 9 ? BRW_NEW_SURFACES
: 0;
3180 const struct brw_tracked_state
genX(push_constant_packets
) = {
3183 .brw
= BRW_NEW_DRAW_CALL
,
3185 .emit
= genX(upload_push_constant_packets
),
3191 genX(upload_vs_push_constants
)(struct brw_context
*brw
)
3193 struct brw_stage_state
*stage_state
= &brw
->vs
.base
;
3195 /* BRW_NEW_VERTEX_PROGRAM */
3196 const struct gl_program
*vp
= brw
->programs
[MESA_SHADER_VERTEX
];
3197 /* BRW_NEW_VS_PROG_DATA */
3198 const struct brw_stage_prog_data
*prog_data
= brw
->vs
.base
.prog_data
;
3200 gen6_upload_push_constants(brw
, vp
, prog_data
, stage_state
);
3203 static const struct brw_tracked_state
genX(vs_push_constants
) = {
3205 .mesa
= _NEW_PROGRAM_CONSTANTS
|
3207 .brw
= BRW_NEW_BATCH
|
3209 BRW_NEW_VERTEX_PROGRAM
|
3210 BRW_NEW_VS_PROG_DATA
,
3212 .emit
= genX(upload_vs_push_constants
),
3216 genX(upload_gs_push_constants
)(struct brw_context
*brw
)
3218 struct brw_stage_state
*stage_state
= &brw
->gs
.base
;
3220 /* BRW_NEW_GEOMETRY_PROGRAM */
3221 const struct gl_program
*gp
= brw
->programs
[MESA_SHADER_GEOMETRY
];
3223 /* BRW_NEW_GS_PROG_DATA */
3224 struct brw_stage_prog_data
*prog_data
= brw
->gs
.base
.prog_data
;
3226 gen6_upload_push_constants(brw
, gp
, prog_data
, stage_state
);
3229 static const struct brw_tracked_state
genX(gs_push_constants
) = {
3231 .mesa
= _NEW_PROGRAM_CONSTANTS
|
3233 .brw
= BRW_NEW_BATCH
|
3235 BRW_NEW_GEOMETRY_PROGRAM
|
3236 BRW_NEW_GS_PROG_DATA
,
3238 .emit
= genX(upload_gs_push_constants
),
3242 genX(upload_wm_push_constants
)(struct brw_context
*brw
)
3244 struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
3245 /* BRW_NEW_FRAGMENT_PROGRAM */
3246 const struct gl_program
*fp
= brw
->programs
[MESA_SHADER_FRAGMENT
];
3247 /* BRW_NEW_FS_PROG_DATA */
3248 const struct brw_stage_prog_data
*prog_data
= brw
->wm
.base
.prog_data
;
3250 gen6_upload_push_constants(brw
, fp
, prog_data
, stage_state
);
3253 static const struct brw_tracked_state
genX(wm_push_constants
) = {
3255 .mesa
= _NEW_PROGRAM_CONSTANTS
,
3256 .brw
= BRW_NEW_BATCH
|
3258 BRW_NEW_FRAGMENT_PROGRAM
|
3259 BRW_NEW_FS_PROG_DATA
,
3261 .emit
= genX(upload_wm_push_constants
),
3265 /* ---------------------------------------------------------------------- */
3269 genX(determine_sample_mask
)(struct brw_context
*brw
)
3271 struct gl_context
*ctx
= &brw
->ctx
;
3272 float coverage
= 1.0f
;
3273 float coverage_invert
= false;
3274 unsigned sample_mask
= ~0u;
3276 /* BRW_NEW_NUM_SAMPLES */
3277 unsigned num_samples
= brw
->num_samples
;
3279 if (_mesa_is_multisample_enabled(ctx
)) {
3280 if (ctx
->Multisample
.SampleCoverage
) {
3281 coverage
= ctx
->Multisample
.SampleCoverageValue
;
3282 coverage_invert
= ctx
->Multisample
.SampleCoverageInvert
;
3284 if (ctx
->Multisample
.SampleMask
) {
3285 sample_mask
= ctx
->Multisample
.SampleMaskValue
;
3289 if (num_samples
> 1) {
3290 int coverage_int
= (int) (num_samples
* coverage
+ 0.5f
);
3291 uint32_t coverage_bits
= (1 << coverage_int
) - 1;
3292 if (coverage_invert
)
3293 coverage_bits
^= (1 << num_samples
) - 1;
3294 return coverage_bits
& sample_mask
;
3301 genX(emit_3dstate_multisample2
)(struct brw_context
*brw
,
3302 unsigned num_samples
)
3304 unsigned log2_samples
= ffs(num_samples
) - 1;
3306 brw_batch_emit(brw
, GENX(3DSTATE_MULTISAMPLE
), multi
) {
3307 multi
.PixelLocation
= CENTER
;
3308 multi
.NumberofMultisamples
= log2_samples
;
3310 GEN_SAMPLE_POS_4X(multi
.Sample
);
3312 switch (num_samples
) {
3314 GEN_SAMPLE_POS_1X(multi
.Sample
);
3317 GEN_SAMPLE_POS_2X(multi
.Sample
);
3320 GEN_SAMPLE_POS_4X(multi
.Sample
);
3323 GEN_SAMPLE_POS_8X(multi
.Sample
);
3333 genX(upload_multisample_state
)(struct brw_context
*brw
)
3335 assert(brw
->num_samples
> 0 && brw
->num_samples
<= 16);
3337 genX(emit_3dstate_multisample2
)(brw
, brw
->num_samples
);
3339 brw_batch_emit(brw
, GENX(3DSTATE_SAMPLE_MASK
), sm
) {
3340 sm
.SampleMask
= genX(determine_sample_mask
)(brw
);
3344 static const struct brw_tracked_state
genX(multisample_state
) = {
3346 .mesa
= _NEW_MULTISAMPLE
|
3347 (GEN_GEN
== 10 ? _NEW_BUFFERS
: 0),
3348 .brw
= BRW_NEW_BLORP
|
3350 BRW_NEW_NUM_SAMPLES
,
3352 .emit
= genX(upload_multisample_state
)
3356 /* ---------------------------------------------------------------------- */
3359 genX(upload_color_calc_state
)(struct brw_context
*brw
)
3361 struct gl_context
*ctx
= &brw
->ctx
;
3363 brw_state_emit(brw
, GENX(COLOR_CALC_STATE
), 64, &brw
->cc
.state_offset
, cc
) {
3365 cc
.IndependentAlphaBlendEnable
=
3366 set_blend_entry_bits(brw
, &cc
, 0, false);
3367 set_depth_stencil_bits(brw
, &cc
);
3369 if (ctx
->Color
.AlphaEnabled
&&
3370 ctx
->DrawBuffer
->_NumColorDrawBuffers
<= 1) {
3371 cc
.AlphaTestEnable
= true;
3372 cc
.AlphaTestFunction
=
3373 intel_translate_compare_func(ctx
->Color
.AlphaFunc
);
3376 cc
.ColorDitherEnable
= ctx
->Color
.DitherFlag
;
3378 cc
.StatisticsEnable
= brw
->stats_wm
;
3380 cc
.CCViewportStatePointer
=
3381 ro_bo(brw
->batch
.state
.bo
, brw
->cc
.vp_offset
);
3384 cc
.BlendConstantColorRed
= ctx
->Color
.BlendColorUnclamped
[0];
3385 cc
.BlendConstantColorGreen
= ctx
->Color
.BlendColorUnclamped
[1];
3386 cc
.BlendConstantColorBlue
= ctx
->Color
.BlendColorUnclamped
[2];
3387 cc
.BlendConstantColorAlpha
= ctx
->Color
.BlendColorUnclamped
[3];
3391 cc
.StencilReferenceValue
= _mesa_get_stencil_ref(ctx
, 0);
3392 cc
.BackfaceStencilReferenceValue
=
3393 _mesa_get_stencil_ref(ctx
, ctx
->Stencil
._BackFace
);
3399 UNCLAMPED_FLOAT_TO_UBYTE(cc
.AlphaReferenceValueAsUNORM8
,
3400 ctx
->Color
.AlphaRef
);
3404 brw_batch_emit(brw
, GENX(3DSTATE_CC_STATE_POINTERS
), ptr
) {
3405 ptr
.ColorCalcStatePointer
= brw
->cc
.state_offset
;
3407 ptr
.ColorCalcStatePointerValid
= true;
3411 brw
->ctx
.NewDriverState
|= BRW_NEW_GEN4_UNIT_STATE
;
3415 static const struct brw_tracked_state
genX(color_calc_state
) = {
3417 .mesa
= _NEW_COLOR
|
3419 (GEN_GEN
<= 5 ? _NEW_BUFFERS
|
3422 .brw
= BRW_NEW_BATCH
|
3424 (GEN_GEN
<= 5 ? BRW_NEW_CC_VP
|
3426 : BRW_NEW_CC_STATE
|
3427 BRW_NEW_STATE_BASE_ADDRESS
),
3429 .emit
= genX(upload_color_calc_state
),
3433 /* ---------------------------------------------------------------------- */
3437 genX(upload_sbe
)(struct brw_context
*brw
)
3439 struct gl_context
*ctx
= &brw
->ctx
;
3440 /* BRW_NEW_FRAGMENT_PROGRAM */
3441 UNUSED
const struct gl_program
*fp
= brw
->programs
[MESA_SHADER_FRAGMENT
];
3442 /* BRW_NEW_FS_PROG_DATA */
3443 const struct brw_wm_prog_data
*wm_prog_data
=
3444 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
3446 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) attr_overrides
[16] = { { 0 } };
3448 #define attr_overrides sbe.Attribute
3450 uint32_t urb_entry_read_length
;
3451 uint32_t urb_entry_read_offset
;
3452 uint32_t point_sprite_enables
;
3454 brw_batch_emit(brw
, GENX(3DSTATE_SBE
), sbe
) {
3455 sbe
.AttributeSwizzleEnable
= true;
3456 sbe
.NumberofSFOutputAttributes
= wm_prog_data
->num_varying_inputs
;
3459 bool flip_y
= ctx
->DrawBuffer
->FlipY
;
3463 * Window coordinates in an FBO are inverted, which means point
3464 * sprite origin must be inverted.
3466 if ((ctx
->Point
.SpriteOrigin
== GL_LOWER_LEFT
) == flip_y
)
3467 sbe
.PointSpriteTextureCoordinateOrigin
= LOWERLEFT
;
3469 sbe
.PointSpriteTextureCoordinateOrigin
= UPPERLEFT
;
3471 /* _NEW_POINT | _NEW_LIGHT | _NEW_PROGRAM,
3472 * BRW_NEW_FS_PROG_DATA | BRW_NEW_FRAGMENT_PROGRAM |
3473 * BRW_NEW_GS_PROG_DATA | BRW_NEW_PRIMITIVE | BRW_NEW_TES_PROG_DATA |
3474 * BRW_NEW_VUE_MAP_GEOM_OUT
3476 genX(calculate_attr_overrides
)(brw
,
3478 &point_sprite_enables
,
3479 &urb_entry_read_length
,
3480 &urb_entry_read_offset
);
3482 /* Typically, the URB entry read length and offset should be programmed
3483 * in 3DSTATE_VS and 3DSTATE_GS; SBE inherits it from the last active
3484 * stage which produces geometry. However, we don't know the proper
3485 * value until we call calculate_attr_overrides().
3487 * To fit with our existing code, we override the inherited values and
3488 * specify it here directly, as we did on previous generations.
3490 sbe
.VertexURBEntryReadLength
= urb_entry_read_length
;
3491 sbe
.VertexURBEntryReadOffset
= urb_entry_read_offset
;
3492 sbe
.PointSpriteTextureCoordinateEnable
= point_sprite_enables
;
3493 sbe
.ConstantInterpolationEnable
= wm_prog_data
->flat_inputs
;
3496 sbe
.ForceVertexURBEntryReadLength
= true;
3497 sbe
.ForceVertexURBEntryReadOffset
= true;
3501 /* prepare the active component dwords */
3502 for (int i
= 0; i
< 32; i
++)
3503 sbe
.AttributeActiveComponentFormat
[i
] = ACTIVE_COMPONENT_XYZW
;
3508 brw_batch_emit(brw
, GENX(3DSTATE_SBE_SWIZ
), sbes
) {
3509 for (int i
= 0; i
< 16; i
++)
3510 sbes
.Attribute
[i
] = attr_overrides
[i
];
3514 #undef attr_overrides
3517 static const struct brw_tracked_state
genX(sbe_state
) = {
3519 .mesa
= _NEW_BUFFERS
|
3524 .brw
= BRW_NEW_BLORP
|
3526 BRW_NEW_FRAGMENT_PROGRAM
|
3527 BRW_NEW_FS_PROG_DATA
|
3528 BRW_NEW_GS_PROG_DATA
|
3529 BRW_NEW_TES_PROG_DATA
|
3530 BRW_NEW_VUE_MAP_GEOM_OUT
|
3531 (GEN_GEN
== 7 ? BRW_NEW_PRIMITIVE
3534 .emit
= genX(upload_sbe
),
3538 /* ---------------------------------------------------------------------- */
3542 * Outputs the 3DSTATE_SO_DECL_LIST command.
3544 * The data output is a series of 64-bit entries containing a SO_DECL per
3545 * stream. We only have one stream of rendering coming out of the GS unit, so
3546 * we only emit stream 0 (low 16 bits) SO_DECLs.
3549 genX(upload_3dstate_so_decl_list
)(struct brw_context
*brw
,
3550 const struct brw_vue_map
*vue_map
)
3552 struct gl_context
*ctx
= &brw
->ctx
;
3553 /* BRW_NEW_TRANSFORM_FEEDBACK */
3554 struct gl_transform_feedback_object
*xfb_obj
=
3555 ctx
->TransformFeedback
.CurrentObject
;
3556 const struct gl_transform_feedback_info
*linked_xfb_info
=
3557 xfb_obj
->program
->sh
.LinkedTransformFeedback
;
3558 struct GENX(SO_DECL
) so_decl
[MAX_VERTEX_STREAMS
][128];
3559 int buffer_mask
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
3560 int next_offset
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
3561 int decls
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
3563 STATIC_ASSERT(ARRAY_SIZE(so_decl
[0]) >= MAX_PROGRAM_OUTPUTS
);
3565 memset(so_decl
, 0, sizeof(so_decl
));
3567 /* Construct the list of SO_DECLs to be emitted. The formatting of the
3568 * command feels strange -- each dword pair contains a SO_DECL per stream.
3570 for (unsigned i
= 0; i
< linked_xfb_info
->NumOutputs
; i
++) {
3571 const struct gl_transform_feedback_output
*output
=
3572 &linked_xfb_info
->Outputs
[i
];
3573 const int buffer
= output
->OutputBuffer
;
3574 const int varying
= output
->OutputRegister
;
3575 const unsigned stream_id
= output
->StreamId
;
3576 assert(stream_id
< MAX_VERTEX_STREAMS
);
3578 buffer_mask
[stream_id
] |= 1 << buffer
;
3580 assert(vue_map
->varying_to_slot
[varying
] >= 0);
3582 /* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
3583 * array. Instead, it simply increments DstOffset for the following
3584 * input by the number of components that should be skipped.
3586 * Our hardware is unusual in that it requires us to program SO_DECLs
3587 * for fake "hole" components, rather than simply taking the offset
3588 * for each real varying. Each hole can have size 1, 2, 3, or 4; we
3589 * program as many size = 4 holes as we can, then a final hole to
3590 * accommodate the final 1, 2, or 3 remaining.
3592 int skip_components
= output
->DstOffset
- next_offset
[buffer
];
3594 while (skip_components
> 0) {
3595 so_decl
[stream_id
][decls
[stream_id
]++] = (struct GENX(SO_DECL
)) {
3597 .OutputBufferSlot
= output
->OutputBuffer
,
3598 .ComponentMask
= (1 << MIN2(skip_components
, 4)) - 1,
3600 skip_components
-= 4;
3603 next_offset
[buffer
] = output
->DstOffset
+ output
->NumComponents
;
3605 so_decl
[stream_id
][decls
[stream_id
]++] = (struct GENX(SO_DECL
)) {
3606 .OutputBufferSlot
= output
->OutputBuffer
,
3607 .RegisterIndex
= vue_map
->varying_to_slot
[varying
],
3609 ((1 << output
->NumComponents
) - 1) << output
->ComponentOffset
,
3612 if (decls
[stream_id
] > max_decls
)
3613 max_decls
= decls
[stream_id
];
3617 dw
= brw_batch_emitn(brw
, GENX(3DSTATE_SO_DECL_LIST
), 3 + 2 * max_decls
,
3618 .StreamtoBufferSelects0
= buffer_mask
[0],
3619 .StreamtoBufferSelects1
= buffer_mask
[1],
3620 .StreamtoBufferSelects2
= buffer_mask
[2],
3621 .StreamtoBufferSelects3
= buffer_mask
[3],
3622 .NumEntries0
= decls
[0],
3623 .NumEntries1
= decls
[1],
3624 .NumEntries2
= decls
[2],
3625 .NumEntries3
= decls
[3]);
3627 for (int i
= 0; i
< max_decls
; i
++) {
3628 GENX(SO_DECL_ENTRY_pack
)(
3629 brw
, dw
+ 2 + i
* 2,
3630 &(struct GENX(SO_DECL_ENTRY
)) {
3631 .Stream0Decl
= so_decl
[0][i
],
3632 .Stream1Decl
= so_decl
[1][i
],
3633 .Stream2Decl
= so_decl
[2][i
],
3634 .Stream3Decl
= so_decl
[3][i
],
3640 genX(upload_3dstate_so_buffers
)(struct brw_context
*brw
)
3642 struct gl_context
*ctx
= &brw
->ctx
;
3643 /* BRW_NEW_TRANSFORM_FEEDBACK */
3644 struct gl_transform_feedback_object
*xfb_obj
=
3645 ctx
->TransformFeedback
.CurrentObject
;
3647 const struct gl_transform_feedback_info
*linked_xfb_info
=
3648 xfb_obj
->program
->sh
.LinkedTransformFeedback
;
3650 struct brw_transform_feedback_object
*brw_obj
=
3651 (struct brw_transform_feedback_object
*) xfb_obj
;
3652 uint32_t mocs_wb
= GEN_GEN
>= 9 ? SKL_MOCS_WB
: BDW_MOCS_WB
;
3655 /* Set up the up to 4 output buffers. These are the ranges defined in the
3656 * gl_transform_feedback_object.
3658 for (int i
= 0; i
< 4; i
++) {
3659 struct intel_buffer_object
*bufferobj
=
3660 intel_buffer_object(xfb_obj
->Buffers
[i
]);
3661 uint32_t start
= xfb_obj
->Offset
[i
];
3662 uint32_t end
= ALIGN(start
+ xfb_obj
->Size
[i
], 4);
3663 uint32_t const size
= end
- start
;
3665 if (!bufferobj
|| !size
) {
3666 brw_batch_emit(brw
, GENX(3DSTATE_SO_BUFFER
), sob
) {
3667 sob
.SOBufferIndex
= i
;
3672 assert(start
% 4 == 0);
3674 intel_bufferobj_buffer(brw
, bufferobj
, start
, size
, true);
3675 assert(end
<= bo
->size
);
3677 brw_batch_emit(brw
, GENX(3DSTATE_SO_BUFFER
), sob
) {
3678 sob
.SOBufferIndex
= i
;
3680 sob
.SurfaceBaseAddress
= rw_bo(bo
, start
);
3682 sob
.SurfacePitch
= linked_xfb_info
->Buffers
[i
].Stride
* 4;
3683 sob
.SurfaceEndAddress
= rw_bo(bo
, end
);
3685 sob
.SOBufferEnable
= true;
3686 sob
.StreamOffsetWriteEnable
= true;
3687 sob
.StreamOutputBufferOffsetAddressEnable
= true;
3690 sob
.SurfaceSize
= MAX2(xfb_obj
->Size
[i
] / 4, 1) - 1;
3691 sob
.StreamOutputBufferOffsetAddress
=
3692 rw_bo(brw_obj
->offset_bo
, i
* sizeof(uint32_t));
3694 if (brw_obj
->zero_offsets
) {
3695 /* Zero out the offset and write that to offset_bo */
3696 sob
.StreamOffset
= 0;
3698 /* Use offset_bo as the "Stream Offset." */
3699 sob
.StreamOffset
= 0xFFFFFFFF;
3706 brw_obj
->zero_offsets
= false;
3711 query_active(struct gl_query_object
*q
)
3713 return q
&& q
->Active
;
3717 genX(upload_3dstate_streamout
)(struct brw_context
*brw
, bool active
,
3718 const struct brw_vue_map
*vue_map
)
3720 struct gl_context
*ctx
= &brw
->ctx
;
3721 /* BRW_NEW_TRANSFORM_FEEDBACK */
3722 struct gl_transform_feedback_object
*xfb_obj
=
3723 ctx
->TransformFeedback
.CurrentObject
;
3725 brw_batch_emit(brw
, GENX(3DSTATE_STREAMOUT
), sos
) {
3727 int urb_entry_read_offset
= 0;
3728 int urb_entry_read_length
= (vue_map
->num_slots
+ 1) / 2 -
3729 urb_entry_read_offset
;
3731 sos
.SOFunctionEnable
= true;
3732 sos
.SOStatisticsEnable
= true;
3734 /* BRW_NEW_RASTERIZER_DISCARD */
3735 if (ctx
->RasterDiscard
) {
3736 if (!query_active(ctx
->Query
.PrimitivesGenerated
[0])) {
3737 sos
.RenderingDisable
= true;
3739 perf_debug("Rasterizer discard with a GL_PRIMITIVES_GENERATED "
3740 "query active relies on the clipper.\n");
3745 if (ctx
->Light
.ProvokingVertex
!= GL_FIRST_VERTEX_CONVENTION
)
3746 sos
.ReorderMode
= TRAILING
;
3749 sos
.SOBufferEnable0
= xfb_obj
->Buffers
[0] != NULL
;
3750 sos
.SOBufferEnable1
= xfb_obj
->Buffers
[1] != NULL
;
3751 sos
.SOBufferEnable2
= xfb_obj
->Buffers
[2] != NULL
;
3752 sos
.SOBufferEnable3
= xfb_obj
->Buffers
[3] != NULL
;
3754 const struct gl_transform_feedback_info
*linked_xfb_info
=
3755 xfb_obj
->program
->sh
.LinkedTransformFeedback
;
3756 /* Set buffer pitches; 0 means unbound. */
3757 if (xfb_obj
->Buffers
[0])
3758 sos
.Buffer0SurfacePitch
= linked_xfb_info
->Buffers
[0].Stride
* 4;
3759 if (xfb_obj
->Buffers
[1])
3760 sos
.Buffer1SurfacePitch
= linked_xfb_info
->Buffers
[1].Stride
* 4;
3761 if (xfb_obj
->Buffers
[2])
3762 sos
.Buffer2SurfacePitch
= linked_xfb_info
->Buffers
[2].Stride
* 4;
3763 if (xfb_obj
->Buffers
[3])
3764 sos
.Buffer3SurfacePitch
= linked_xfb_info
->Buffers
[3].Stride
* 4;
3767 /* We always read the whole vertex. This could be reduced at some
3768 * point by reading less and offsetting the register index in the
3771 sos
.Stream0VertexReadOffset
= urb_entry_read_offset
;
3772 sos
.Stream0VertexReadLength
= urb_entry_read_length
- 1;
3773 sos
.Stream1VertexReadOffset
= urb_entry_read_offset
;
3774 sos
.Stream1VertexReadLength
= urb_entry_read_length
- 1;
3775 sos
.Stream2VertexReadOffset
= urb_entry_read_offset
;
3776 sos
.Stream2VertexReadLength
= urb_entry_read_length
- 1;
3777 sos
.Stream3VertexReadOffset
= urb_entry_read_offset
;
3778 sos
.Stream3VertexReadLength
= urb_entry_read_length
- 1;
3784 genX(upload_sol
)(struct brw_context
*brw
)
3786 struct gl_context
*ctx
= &brw
->ctx
;
3787 /* BRW_NEW_TRANSFORM_FEEDBACK */
3788 bool active
= _mesa_is_xfb_active_and_unpaused(ctx
);
3791 genX(upload_3dstate_so_buffers
)(brw
);
3793 /* BRW_NEW_VUE_MAP_GEOM_OUT */
3794 genX(upload_3dstate_so_decl_list
)(brw
, &brw
->vue_map_geom_out
);
3797 /* Finally, set up the SOL stage. This command must always follow updates to
3798 * the nonpipelined SOL state (3DSTATE_SO_BUFFER, 3DSTATE_SO_DECL_LIST) or
3799 * MMIO register updates (current performed by the kernel at each batch
3802 genX(upload_3dstate_streamout
)(brw
, active
, &brw
->vue_map_geom_out
);
3805 static const struct brw_tracked_state
genX(sol_state
) = {
3808 .brw
= BRW_NEW_BATCH
|
3810 BRW_NEW_RASTERIZER_DISCARD
|
3811 BRW_NEW_VUE_MAP_GEOM_OUT
|
3812 BRW_NEW_TRANSFORM_FEEDBACK
,
3814 .emit
= genX(upload_sol
),
3818 /* ---------------------------------------------------------------------- */
3822 genX(upload_ps
)(struct brw_context
*brw
)
3824 UNUSED
const struct gl_context
*ctx
= &brw
->ctx
;
3825 UNUSED
const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
3827 /* BRW_NEW_FS_PROG_DATA */
3828 const struct brw_wm_prog_data
*prog_data
=
3829 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
3830 const struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
3835 brw_batch_emit(brw
, GENX(3DSTATE_PS
), ps
) {
3836 /* Initialize the execution mask with VMask. Otherwise, derivatives are
3837 * incorrect for subspans where some of the pixels are unlit. We believe
3838 * the bit just didn't take effect in previous generations.
3840 ps
.VectorMaskEnable
= GEN_GEN
>= 8;
3843 * "Incorrect TDL's SSP address shift in SARB for 16:6 & 18:8 modes.
3844 * Disable the Sampler state prefetch functionality in the SARB by
3845 * programming 0xB000[30] to '1'."
3847 ps
.SamplerCount
= GEN_GEN
== 11 ?
3848 0 : DIV_ROUND_UP(CLAMP(stage_state
->sampler_count
, 0, 16), 4);
3850 /* BRW_NEW_FS_PROG_DATA */
3851 /* Gen 11 workarounds table #2056 WABTPPrefetchDisable suggests to disable
3852 * prefetching of binding tables in A0 and B0 steppings.
3853 * TODO: Revisit this workaround on C0 stepping.
3855 ps
.BindingTableEntryCount
= GEN_GEN
== 11 ?
3857 prog_data
->base
.binding_table
.size_bytes
/ 4;
3859 if (prog_data
->base
.use_alt_mode
)
3860 ps
.FloatingPointMode
= Alternate
;
3862 /* Haswell requires the sample mask to be set in this packet as well as
3863 * in 3DSTATE_SAMPLE_MASK; the values should match.
3866 /* _NEW_BUFFERS, _NEW_MULTISAMPLE */
3868 ps
.SampleMask
= genX(determine_sample_mask(brw
));
3871 /* 3DSTATE_PS expects the number of threads per PSD, which is always 64
3872 * for pre Gen11 and 128 for gen11+; On gen11+ If a programmed value is
3873 * k, it implies 2(k+1) threads. It implicitly scales for different GT
3874 * levels (which have some # of PSDs).
3876 * In Gen8 the format is U8-2 whereas in Gen9+ it is U9-1.
3879 ps
.MaximumNumberofThreadsPerPSD
= 64 - 1;
3881 ps
.MaximumNumberofThreadsPerPSD
= 64 - 2;
3883 ps
.MaximumNumberofThreads
= devinfo
->max_wm_threads
- 1;
3886 if (prog_data
->base
.nr_params
> 0 ||
3887 prog_data
->base
.ubo_ranges
[0].length
> 0)
3888 ps
.PushConstantEnable
= true;
3891 /* From the IVB PRM, volume 2 part 1, page 287:
3892 * "This bit is inserted in the PS payload header and made available to
3893 * the DataPort (either via the message header or via header bypass) to
3894 * indicate that oMask data (one or two phases) is included in Render
3895 * Target Write messages. If present, the oMask data is used to mask off
3898 ps
.oMaskPresenttoRenderTarget
= prog_data
->uses_omask
;
3900 /* The hardware wedges if you have this bit set but don't turn on any
3901 * dual source blend factors.
3903 * BRW_NEW_FS_PROG_DATA | _NEW_COLOR
3905 ps
.DualSourceBlendEnable
= prog_data
->dual_src_blend
&&
3906 (ctx
->Color
.BlendEnabled
& 1) &&
3907 ctx
->Color
.Blend
[0]._UsesDualSrc
;
3909 /* BRW_NEW_FS_PROG_DATA */
3910 ps
.AttributeEnable
= (prog_data
->num_varying_inputs
!= 0);
3913 /* From the documentation for this packet:
3914 * "If the PS kernel does not need the Position XY Offsets to
3915 * compute a Position Value, then this field should be programmed
3916 * to POSOFFSET_NONE."
3918 * "SW Recommendation: If the PS kernel needs the Position Offsets
3919 * to compute a Position XY value, this field should match Position
3920 * ZW Interpolation Mode to ensure a consistent position.xyzw
3923 * We only require XY sample offsets. So, this recommendation doesn't
3924 * look useful at the moment. We might need this in future.
3926 if (prog_data
->uses_pos_offset
)
3927 ps
.PositionXYOffsetSelect
= POSOFFSET_SAMPLE
;
3929 ps
.PositionXYOffsetSelect
= POSOFFSET_NONE
;
3931 ps
._8PixelDispatchEnable
= prog_data
->dispatch_8
;
3932 ps
._16PixelDispatchEnable
= prog_data
->dispatch_16
;
3933 ps
._32PixelDispatchEnable
= prog_data
->dispatch_32
;
3935 /* From the Sky Lake PRM 3DSTATE_PS::32 Pixel Dispatch Enable:
3937 * "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16, SIMD32
3938 * Dispatch must not be enabled for PER_PIXEL dispatch mode."
3940 * Since 16x MSAA is first introduced on SKL, we don't need to apply
3941 * the workaround on any older hardware.
3943 * BRW_NEW_NUM_SAMPLES
3945 if (GEN_GEN
>= 9 && !prog_data
->persample_dispatch
&&
3946 brw
->num_samples
== 16) {
3947 assert(ps
._8PixelDispatchEnable
|| ps
._16PixelDispatchEnable
);
3948 ps
._32PixelDispatchEnable
= false;
3951 ps
.DispatchGRFStartRegisterForConstantSetupData0
=
3952 brw_wm_prog_data_dispatch_grf_start_reg(prog_data
, ps
, 0);
3953 ps
.DispatchGRFStartRegisterForConstantSetupData1
=
3954 brw_wm_prog_data_dispatch_grf_start_reg(prog_data
, ps
, 1);
3955 ps
.DispatchGRFStartRegisterForConstantSetupData2
=
3956 brw_wm_prog_data_dispatch_grf_start_reg(prog_data
, ps
, 2);
3958 ps
.KernelStartPointer0
= stage_state
->prog_offset
+
3959 brw_wm_prog_data_prog_offset(prog_data
, ps
, 0);
3960 ps
.KernelStartPointer1
= stage_state
->prog_offset
+
3961 brw_wm_prog_data_prog_offset(prog_data
, ps
, 1);
3962 ps
.KernelStartPointer2
= stage_state
->prog_offset
+
3963 brw_wm_prog_data_prog_offset(prog_data
, ps
, 2);
3965 if (prog_data
->base
.total_scratch
) {
3966 ps
.ScratchSpaceBasePointer
=
3967 rw_32_bo(stage_state
->scratch_bo
,
3968 ffs(stage_state
->per_thread_scratch
) - 11);
3973 static const struct brw_tracked_state
genX(ps_state
) = {
3975 .mesa
= _NEW_MULTISAMPLE
|
3976 (GEN_GEN
< 8 ? _NEW_BUFFERS
|
3979 .brw
= BRW_NEW_BATCH
|
3981 BRW_NEW_FS_PROG_DATA
|
3982 (GEN_GEN
>= 9 ? BRW_NEW_NUM_SAMPLES
: 0),
3984 .emit
= genX(upload_ps
),
3988 /* ---------------------------------------------------------------------- */
3992 genX(upload_hs_state
)(struct brw_context
*brw
)
3994 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
3995 struct brw_stage_state
*stage_state
= &brw
->tcs
.base
;
3996 struct brw_stage_prog_data
*stage_prog_data
= stage_state
->prog_data
;
3997 const struct brw_vue_prog_data
*vue_prog_data
=
3998 brw_vue_prog_data(stage_prog_data
);
4000 /* BRW_NEW_TES_PROG_DATA */
4001 struct brw_tcs_prog_data
*tcs_prog_data
=
4002 brw_tcs_prog_data(stage_prog_data
);
4004 if (!tcs_prog_data
) {
4005 brw_batch_emit(brw
, GENX(3DSTATE_HS
), hs
);
4007 brw_batch_emit(brw
, GENX(3DSTATE_HS
), hs
) {
4008 INIT_THREAD_DISPATCH_FIELDS(hs
, Vertex
);
4010 hs
.InstanceCount
= tcs_prog_data
->instances
- 1;
4011 hs
.IncludeVertexHandles
= true;
4013 hs
.MaximumNumberofThreads
= devinfo
->max_tcs_threads
- 1;
4016 hs
.DispatchMode
= vue_prog_data
->dispatch_mode
;
4017 hs
.IncludePrimitiveID
= tcs_prog_data
->include_primitive_id
;
4023 static const struct brw_tracked_state
genX(hs_state
) = {
4026 .brw
= BRW_NEW_BATCH
|
4028 BRW_NEW_TCS_PROG_DATA
|
4029 BRW_NEW_TESS_PROGRAMS
,
4031 .emit
= genX(upload_hs_state
),
4035 genX(upload_ds_state
)(struct brw_context
*brw
)
4037 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
4038 const struct brw_stage_state
*stage_state
= &brw
->tes
.base
;
4039 struct brw_stage_prog_data
*stage_prog_data
= stage_state
->prog_data
;
4041 /* BRW_NEW_TES_PROG_DATA */
4042 const struct brw_tes_prog_data
*tes_prog_data
=
4043 brw_tes_prog_data(stage_prog_data
);
4044 const struct brw_vue_prog_data
*vue_prog_data
=
4045 brw_vue_prog_data(stage_prog_data
);
4047 if (!tes_prog_data
) {
4048 brw_batch_emit(brw
, GENX(3DSTATE_DS
), ds
);
4050 assert(GEN_GEN
< 11 ||
4051 vue_prog_data
->dispatch_mode
== DISPATCH_MODE_SIMD8
);
4053 brw_batch_emit(brw
, GENX(3DSTATE_DS
), ds
) {
4054 INIT_THREAD_DISPATCH_FIELDS(ds
, Patch
);
4056 ds
.MaximumNumberofThreads
= devinfo
->max_tes_threads
- 1;
4057 ds
.ComputeWCoordinateEnable
=
4058 tes_prog_data
->domain
== BRW_TESS_DOMAIN_TRI
;
4061 if (vue_prog_data
->dispatch_mode
== DISPATCH_MODE_SIMD8
)
4062 ds
.DispatchMode
= DISPATCH_MODE_SIMD8_SINGLE_PATCH
;
4063 ds
.UserClipDistanceCullTestEnableBitmask
=
4064 vue_prog_data
->cull_distance_mask
;
4070 static const struct brw_tracked_state
genX(ds_state
) = {
4073 .brw
= BRW_NEW_BATCH
|
4075 BRW_NEW_TESS_PROGRAMS
|
4076 BRW_NEW_TES_PROG_DATA
,
4078 .emit
= genX(upload_ds_state
),
4081 /* ---------------------------------------------------------------------- */
4084 upload_te_state(struct brw_context
*brw
)
4086 /* BRW_NEW_TESS_PROGRAMS */
4087 bool active
= brw
->programs
[MESA_SHADER_TESS_EVAL
];
4089 /* BRW_NEW_TES_PROG_DATA */
4090 const struct brw_tes_prog_data
*tes_prog_data
=
4091 brw_tes_prog_data(brw
->tes
.base
.prog_data
);
4094 brw_batch_emit(brw
, GENX(3DSTATE_TE
), te
) {
4095 te
.Partitioning
= tes_prog_data
->partitioning
;
4096 te
.OutputTopology
= tes_prog_data
->output_topology
;
4097 te
.TEDomain
= tes_prog_data
->domain
;
4099 te
.MaximumTessellationFactorOdd
= 63.0;
4100 te
.MaximumTessellationFactorNotOdd
= 64.0;
4103 brw_batch_emit(brw
, GENX(3DSTATE_TE
), te
);
4107 static const struct brw_tracked_state
genX(te_state
) = {
4110 .brw
= BRW_NEW_BLORP
|
4112 BRW_NEW_TES_PROG_DATA
|
4113 BRW_NEW_TESS_PROGRAMS
,
4115 .emit
= upload_te_state
,
4118 /* ---------------------------------------------------------------------- */
4121 genX(upload_tes_push_constants
)(struct brw_context
*brw
)
4123 struct brw_stage_state
*stage_state
= &brw
->tes
.base
;
4124 /* BRW_NEW_TESS_PROGRAMS */
4125 const struct gl_program
*tep
= brw
->programs
[MESA_SHADER_TESS_EVAL
];
4127 /* BRW_NEW_TES_PROG_DATA */
4128 const struct brw_stage_prog_data
*prog_data
= brw
->tes
.base
.prog_data
;
4129 gen6_upload_push_constants(brw
, tep
, prog_data
, stage_state
);
4132 static const struct brw_tracked_state
genX(tes_push_constants
) = {
4134 .mesa
= _NEW_PROGRAM_CONSTANTS
,
4135 .brw
= BRW_NEW_BATCH
|
4137 BRW_NEW_TESS_PROGRAMS
|
4138 BRW_NEW_TES_PROG_DATA
,
4140 .emit
= genX(upload_tes_push_constants
),
4144 genX(upload_tcs_push_constants
)(struct brw_context
*brw
)
4146 struct brw_stage_state
*stage_state
= &brw
->tcs
.base
;
4147 /* BRW_NEW_TESS_PROGRAMS */
4148 const struct gl_program
*tcp
= brw
->programs
[MESA_SHADER_TESS_CTRL
];
4150 /* BRW_NEW_TCS_PROG_DATA */
4151 const struct brw_stage_prog_data
*prog_data
= brw
->tcs
.base
.prog_data
;
4153 gen6_upload_push_constants(brw
, tcp
, prog_data
, stage_state
);
4156 static const struct brw_tracked_state
genX(tcs_push_constants
) = {
4158 .mesa
= _NEW_PROGRAM_CONSTANTS
,
4159 .brw
= BRW_NEW_BATCH
|
4161 BRW_NEW_DEFAULT_TESS_LEVELS
|
4162 BRW_NEW_TESS_PROGRAMS
|
4163 BRW_NEW_TCS_PROG_DATA
,
4165 .emit
= genX(upload_tcs_push_constants
),
4170 /* ---------------------------------------------------------------------- */
4174 genX(upload_cs_push_constants
)(struct brw_context
*brw
)
4176 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
4178 /* BRW_NEW_COMPUTE_PROGRAM */
4179 const struct gl_program
*cp
= brw
->programs
[MESA_SHADER_COMPUTE
];
4182 /* BRW_NEW_CS_PROG_DATA */
4183 struct brw_cs_prog_data
*cs_prog_data
=
4184 brw_cs_prog_data(brw
->cs
.base
.prog_data
);
4186 _mesa_shader_write_subroutine_indices(&brw
->ctx
, MESA_SHADER_COMPUTE
);
4187 brw_upload_cs_push_constants(brw
, cp
, cs_prog_data
, stage_state
);
4191 const struct brw_tracked_state
genX(cs_push_constants
) = {
4193 .mesa
= _NEW_PROGRAM_CONSTANTS
,
4194 .brw
= BRW_NEW_BATCH
|
4196 BRW_NEW_COMPUTE_PROGRAM
|
4197 BRW_NEW_CS_PROG_DATA
,
4199 .emit
= genX(upload_cs_push_constants
),
4203 * Creates a new CS constant buffer reflecting the current CS program's
4204 * constants, if needed by the CS program.
4207 genX(upload_cs_pull_constants
)(struct brw_context
*brw
)
4209 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
4211 /* BRW_NEW_COMPUTE_PROGRAM */
4212 struct brw_program
*cp
=
4213 (struct brw_program
*) brw
->programs
[MESA_SHADER_COMPUTE
];
4215 /* BRW_NEW_CS_PROG_DATA */
4216 const struct brw_stage_prog_data
*prog_data
= brw
->cs
.base
.prog_data
;
4218 _mesa_shader_write_subroutine_indices(&brw
->ctx
, MESA_SHADER_COMPUTE
);
4219 /* _NEW_PROGRAM_CONSTANTS */
4220 brw_upload_pull_constants(brw
, BRW_NEW_SURFACES
, &cp
->program
,
4221 stage_state
, prog_data
);
4224 const struct brw_tracked_state
genX(cs_pull_constants
) = {
4226 .mesa
= _NEW_PROGRAM_CONSTANTS
,
4227 .brw
= BRW_NEW_BATCH
|
4229 BRW_NEW_COMPUTE_PROGRAM
|
4230 BRW_NEW_CS_PROG_DATA
,
4232 .emit
= genX(upload_cs_pull_constants
),
4236 genX(upload_cs_state
)(struct brw_context
*brw
)
4238 if (!brw
->cs
.base
.prog_data
)
4242 uint32_t *desc
= (uint32_t*) brw_state_batch(
4243 brw
, GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t), 64,
4246 struct brw_stage_state
*stage_state
= &brw
->cs
.base
;
4247 struct brw_stage_prog_data
*prog_data
= stage_state
->prog_data
;
4248 struct brw_cs_prog_data
*cs_prog_data
= brw_cs_prog_data(prog_data
);
4249 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
4251 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
4252 brw_emit_buffer_surface_state(
4253 brw
, &stage_state
->surf_offset
[
4254 prog_data
->binding_table
.shader_time_start
],
4255 brw
->shader_time
.bo
, 0, ISL_FORMAT_RAW
,
4256 brw
->shader_time
.bo
->size
, 1,
4260 uint32_t *bind
= brw_state_batch(brw
, prog_data
->binding_table
.size_bytes
,
4261 32, &stage_state
->bind_bo_offset
);
4263 /* The MEDIA_VFE_STATE documentation for Gen8+ says:
4265 * "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
4266 * the only bits that are changed are scoreboard related: Scoreboard
4267 * Enable, Scoreboard Type, Scoreboard Mask, Scoreboard * Delta. For
4268 * these scoreboard related states, a MEDIA_STATE_FLUSH is sufficient."
4270 * Earlier generations say "MI_FLUSH" instead of "stalling PIPE_CONTROL",
4271 * but MI_FLUSH isn't really a thing, so we assume they meant PIPE_CONTROL.
4273 brw_emit_pipe_control_flush(brw
, PIPE_CONTROL_CS_STALL
);
4275 brw_batch_emit(brw
, GENX(MEDIA_VFE_STATE
), vfe
) {
4276 if (prog_data
->total_scratch
) {
4277 uint32_t per_thread_scratch_value
;
4280 /* Broadwell's Per Thread Scratch Space is in the range [0, 11]
4281 * where 0 = 1k, 1 = 2k, 2 = 4k, ..., 11 = 2M.
4283 per_thread_scratch_value
= ffs(stage_state
->per_thread_scratch
) - 11;
4284 } else if (GEN_IS_HASWELL
) {
4285 /* Haswell's Per Thread Scratch Space is in the range [0, 10]
4286 * where 0 = 2k, 1 = 4k, 2 = 8k, ..., 10 = 2M.
4288 per_thread_scratch_value
= ffs(stage_state
->per_thread_scratch
) - 12;
4290 /* Earlier platforms use the range [0, 11] to mean [1kB, 12kB]
4291 * where 0 = 1kB, 1 = 2kB, 2 = 3kB, ..., 11 = 12kB.
4293 per_thread_scratch_value
= stage_state
->per_thread_scratch
/ 1024 - 1;
4295 vfe
.ScratchSpaceBasePointer
= rw_32_bo(stage_state
->scratch_bo
, 0);
4296 vfe
.PerThreadScratchSpace
= per_thread_scratch_value
;
4299 /* If brw->screen->subslice_total is greater than one, then
4300 * devinfo->max_cs_threads stores number of threads per sub-slice;
4301 * thus we need to multiply by that number by subslices to get
4302 * the actual maximum number of threads; the -1 is because the HW
4303 * has a bias of 1 (would not make sense to say the maximum number
4306 const uint32_t subslices
= MAX2(brw
->screen
->subslice_total
, 1);
4307 vfe
.MaximumNumberofThreads
= devinfo
->max_cs_threads
* subslices
- 1;
4308 vfe
.NumberofURBEntries
= GEN_GEN
>= 8 ? 2 : 0;
4310 vfe
.ResetGatewayTimer
=
4311 Resettingrelativetimerandlatchingtheglobaltimestamp
;
4314 vfe
.BypassGatewayControl
= BypassingOpenGatewayCloseGatewayprotocol
;
4320 /* We are uploading duplicated copies of push constant uniforms for each
4321 * thread. Although the local id data needs to vary per thread, it won't
4322 * change for other uniform data. Unfortunately this duplication is
4323 * required for gen7. As of Haswell, this duplication can be avoided,
4324 * but this older mechanism with duplicated data continues to work.
4326 * FINISHME: As of Haswell, we could make use of the
4327 * INTERFACE_DESCRIPTOR_DATA "Cross-Thread Constant Data Read Length"
4328 * field to only store one copy of uniform data.
4330 * FINISHME: Broadwell adds a new alternative "Indirect Payload Storage"
4331 * which is described in the GPGPU_WALKER command and in the Broadwell
4332 * PRM Volume 7: 3D Media GPGPU, under Media GPGPU Pipeline => Mode of
4333 * Operations => GPGPU Mode => Indirect Payload Storage.
4335 * Note: The constant data is built in brw_upload_cs_push_constants
4338 vfe
.URBEntryAllocationSize
= GEN_GEN
>= 8 ? 2 : 0;
4340 const uint32_t vfe_curbe_allocation
=
4341 ALIGN(cs_prog_data
->push
.per_thread
.regs
* cs_prog_data
->threads
+
4342 cs_prog_data
->push
.cross_thread
.regs
, 2);
4343 vfe
.CURBEAllocationSize
= vfe_curbe_allocation
;
4346 if (cs_prog_data
->push
.total
.size
> 0) {
4347 brw_batch_emit(brw
, GENX(MEDIA_CURBE_LOAD
), curbe
) {
4348 curbe
.CURBETotalDataLength
=
4349 ALIGN(cs_prog_data
->push
.total
.size
, 64);
4350 curbe
.CURBEDataStartAddress
= stage_state
->push_const_offset
;
4354 /* BRW_NEW_SURFACES and BRW_NEW_*_CONSTBUF */
4355 memcpy(bind
, stage_state
->surf_offset
,
4356 prog_data
->binding_table
.size_bytes
);
4357 const struct GENX(INTERFACE_DESCRIPTOR_DATA
) idd
= {
4358 .KernelStartPointer
= brw
->cs
.base
.prog_offset
,
4359 .SamplerStatePointer
= stage_state
->sampler_offset
,
4361 .SamplerCount
= GEN_GEN
== 11 ? 0 :
4362 DIV_ROUND_UP(CLAMP(stage_state
->sampler_count
, 0, 16), 4),
4363 .BindingTablePointer
= stage_state
->bind_bo_offset
,
4364 .ConstantURBEntryReadLength
= cs_prog_data
->push
.per_thread
.regs
,
4365 .NumberofThreadsinGPGPUThreadGroup
= cs_prog_data
->threads
,
4366 .SharedLocalMemorySize
= encode_slm_size(GEN_GEN
,
4367 prog_data
->total_shared
),
4368 .BarrierEnable
= cs_prog_data
->uses_barrier
,
4369 #if GEN_GEN >= 8 || GEN_IS_HASWELL
4370 .CrossThreadConstantDataReadLength
=
4371 cs_prog_data
->push
.cross_thread
.regs
,
4375 GENX(INTERFACE_DESCRIPTOR_DATA_pack
)(brw
, desc
, &idd
);
4377 brw_batch_emit(brw
, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
), load
) {
4378 load
.InterfaceDescriptorTotalLength
=
4379 GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
4380 load
.InterfaceDescriptorDataStartAddress
= offset
;
4384 static const struct brw_tracked_state
genX(cs_state
) = {
4386 .mesa
= _NEW_PROGRAM_CONSTANTS
,
4387 .brw
= BRW_NEW_BATCH
|
4389 BRW_NEW_CS_PROG_DATA
|
4390 BRW_NEW_SAMPLER_STATE_TABLE
|
4393 .emit
= genX(upload_cs_state
)
4396 #define GPGPU_DISPATCHDIMX 0x2500
4397 #define GPGPU_DISPATCHDIMY 0x2504
4398 #define GPGPU_DISPATCHDIMZ 0x2508
4400 #define MI_PREDICATE_SRC0 0x2400
4401 #define MI_PREDICATE_SRC1 0x2408
4404 prepare_indirect_gpgpu_walker(struct brw_context
*brw
)
4406 GLintptr indirect_offset
= brw
->compute
.num_work_groups_offset
;
4407 struct brw_bo
*bo
= brw
->compute
.num_work_groups_bo
;
4409 emit_lrm(brw
, GPGPU_DISPATCHDIMX
, ro_bo(bo
, indirect_offset
+ 0));
4410 emit_lrm(brw
, GPGPU_DISPATCHDIMY
, ro_bo(bo
, indirect_offset
+ 4));
4411 emit_lrm(brw
, GPGPU_DISPATCHDIMZ
, ro_bo(bo
, indirect_offset
+ 8));
4414 /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
4415 emit_lri(brw
, MI_PREDICATE_SRC0
+ 4, 0);
4416 emit_lri(brw
, MI_PREDICATE_SRC1
, 0);
4417 emit_lri(brw
, MI_PREDICATE_SRC1
+ 4, 0);
4419 /* Load compute_dispatch_indirect_x_size into SRC0 */
4420 emit_lrm(brw
, MI_PREDICATE_SRC0
, ro_bo(bo
, indirect_offset
+ 0));
4422 /* predicate = (compute_dispatch_indirect_x_size == 0); */
4423 brw_batch_emit(brw
, GENX(MI_PREDICATE
), mip
) {
4424 mip
.LoadOperation
= LOAD_LOAD
;
4425 mip
.CombineOperation
= COMBINE_SET
;
4426 mip
.CompareOperation
= COMPARE_SRCS_EQUAL
;
4429 /* Load compute_dispatch_indirect_y_size into SRC0 */
4430 emit_lrm(brw
, MI_PREDICATE_SRC0
, ro_bo(bo
, indirect_offset
+ 4));
4432 /* predicate |= (compute_dispatch_indirect_y_size == 0); */
4433 brw_batch_emit(brw
, GENX(MI_PREDICATE
), mip
) {
4434 mip
.LoadOperation
= LOAD_LOAD
;
4435 mip
.CombineOperation
= COMBINE_OR
;
4436 mip
.CompareOperation
= COMPARE_SRCS_EQUAL
;
4439 /* Load compute_dispatch_indirect_z_size into SRC0 */
4440 emit_lrm(brw
, MI_PREDICATE_SRC0
, ro_bo(bo
, indirect_offset
+ 8));
4442 /* predicate |= (compute_dispatch_indirect_z_size == 0); */
4443 brw_batch_emit(brw
, GENX(MI_PREDICATE
), mip
) {
4444 mip
.LoadOperation
= LOAD_LOAD
;
4445 mip
.CombineOperation
= COMBINE_OR
;
4446 mip
.CompareOperation
= COMPARE_SRCS_EQUAL
;
4449 /* predicate = !predicate; */
4450 #define COMPARE_FALSE 1
4451 brw_batch_emit(brw
, GENX(MI_PREDICATE
), mip
) {
4452 mip
.LoadOperation
= LOAD_LOADINV
;
4453 mip
.CombineOperation
= COMBINE_OR
;
4454 mip
.CompareOperation
= COMPARE_FALSE
;
4460 genX(emit_gpgpu_walker
)(struct brw_context
*brw
)
4462 const struct brw_cs_prog_data
*prog_data
=
4463 brw_cs_prog_data(brw
->cs
.base
.prog_data
);
4465 const GLuint
*num_groups
= brw
->compute
.num_work_groups
;
4467 bool indirect
= brw
->compute
.num_work_groups_bo
!= NULL
;
4469 prepare_indirect_gpgpu_walker(brw
);
4471 const unsigned simd_size
= prog_data
->simd_size
;
4472 unsigned group_size
= prog_data
->local_size
[0] *
4473 prog_data
->local_size
[1] * prog_data
->local_size
[2];
4475 uint32_t right_mask
= 0xffffffffu
>> (32 - simd_size
);
4476 const unsigned right_non_aligned
= group_size
& (simd_size
- 1);
4477 if (right_non_aligned
!= 0)
4478 right_mask
>>= (simd_size
- right_non_aligned
);
4480 brw_batch_emit(brw
, GENX(GPGPU_WALKER
), ggw
) {
4481 ggw
.IndirectParameterEnable
= indirect
;
4482 ggw
.PredicateEnable
= GEN_GEN
<= 7 && indirect
;
4483 ggw
.SIMDSize
= prog_data
->simd_size
/ 16;
4484 ggw
.ThreadDepthCounterMaximum
= 0;
4485 ggw
.ThreadHeightCounterMaximum
= 0;
4486 ggw
.ThreadWidthCounterMaximum
= prog_data
->threads
- 1;
4487 ggw
.ThreadGroupIDXDimension
= num_groups
[0];
4488 ggw
.ThreadGroupIDYDimension
= num_groups
[1];
4489 ggw
.ThreadGroupIDZDimension
= num_groups
[2];
4490 ggw
.RightExecutionMask
= right_mask
;
4491 ggw
.BottomExecutionMask
= 0xffffffff;
4494 brw_batch_emit(brw
, GENX(MEDIA_STATE_FLUSH
), msf
);
4499 /* ---------------------------------------------------------------------- */
4503 genX(upload_raster
)(struct brw_context
*brw
)
4505 const struct gl_context
*ctx
= &brw
->ctx
;
4508 const bool flip_y
= ctx
->DrawBuffer
->FlipY
;
4511 const struct gl_polygon_attrib
*polygon
= &ctx
->Polygon
;
4514 const struct gl_point_attrib
*point
= &ctx
->Point
;
4516 brw_batch_emit(brw
, GENX(3DSTATE_RASTER
), raster
) {
4517 if (brw
->polygon_front_bit
!= flip_y
)
4518 raster
.FrontWinding
= CounterClockwise
;
4520 if (polygon
->CullFlag
) {
4521 switch (polygon
->CullFaceMode
) {
4523 raster
.CullMode
= CULLMODE_FRONT
;
4526 raster
.CullMode
= CULLMODE_BACK
;
4528 case GL_FRONT_AND_BACK
:
4529 raster
.CullMode
= CULLMODE_BOTH
;
4532 unreachable("not reached");
4535 raster
.CullMode
= CULLMODE_NONE
;
4538 raster
.SmoothPointEnable
= point
->SmoothFlag
;
4540 raster
.DXMultisampleRasterizationEnable
=
4541 _mesa_is_multisample_enabled(ctx
);
4543 raster
.GlobalDepthOffsetEnableSolid
= polygon
->OffsetFill
;
4544 raster
.GlobalDepthOffsetEnableWireframe
= polygon
->OffsetLine
;
4545 raster
.GlobalDepthOffsetEnablePoint
= polygon
->OffsetPoint
;
4547 switch (polygon
->FrontMode
) {
4549 raster
.FrontFaceFillMode
= FILL_MODE_SOLID
;
4552 raster
.FrontFaceFillMode
= FILL_MODE_WIREFRAME
;
4555 raster
.FrontFaceFillMode
= FILL_MODE_POINT
;
4558 unreachable("not reached");
4561 switch (polygon
->BackMode
) {
4563 raster
.BackFaceFillMode
= FILL_MODE_SOLID
;
4566 raster
.BackFaceFillMode
= FILL_MODE_WIREFRAME
;
4569 raster
.BackFaceFillMode
= FILL_MODE_POINT
;
4572 unreachable("not reached");
4576 raster
.AntialiasingEnable
= ctx
->Line
.SmoothFlag
;
4580 * Antialiasing Enable bit MUST not be set when NUM_MULTISAMPLES > 1.
4582 const bool multisampled_fbo
=
4583 _mesa_geometric_samples(ctx
->DrawBuffer
) > 1;
4584 if (multisampled_fbo
)
4585 raster
.AntialiasingEnable
= false;
4589 raster
.ScissorRectangleEnable
= ctx
->Scissor
.EnableFlags
;
4591 /* _NEW_TRANSFORM */
4593 if (!(ctx
->Transform
.DepthClampNear
&&
4594 ctx
->Transform
.DepthClampFar
))
4595 raster
.ViewportZClipTestEnable
= true;
4599 if (!ctx
->Transform
.DepthClampNear
)
4600 raster
.ViewportZNearClipTestEnable
= true;
4602 if (!ctx
->Transform
.DepthClampFar
)
4603 raster
.ViewportZFarClipTestEnable
= true;
4606 /* BRW_NEW_CONSERVATIVE_RASTERIZATION */
4608 raster
.ConservativeRasterizationEnable
=
4609 ctx
->IntelConservativeRasterization
;
4612 raster
.GlobalDepthOffsetClamp
= polygon
->OffsetClamp
;
4613 raster
.GlobalDepthOffsetScale
= polygon
->OffsetFactor
;
4615 raster
.GlobalDepthOffsetConstant
= polygon
->OffsetUnits
* 2;
4619 static const struct brw_tracked_state
genX(raster_state
) = {
4621 .mesa
= _NEW_BUFFERS
|
4628 .brw
= BRW_NEW_BLORP
|
4630 BRW_NEW_CONSERVATIVE_RASTERIZATION
,
4632 .emit
= genX(upload_raster
),
4636 /* ---------------------------------------------------------------------- */
4640 genX(upload_ps_extra
)(struct brw_context
*brw
)
4642 UNUSED
struct gl_context
*ctx
= &brw
->ctx
;
4644 const struct brw_wm_prog_data
*prog_data
=
4645 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
4647 brw_batch_emit(brw
, GENX(3DSTATE_PS_EXTRA
), psx
) {
4648 psx
.PixelShaderValid
= true;
4649 psx
.PixelShaderComputedDepthMode
= prog_data
->computed_depth_mode
;
4650 psx
.PixelShaderKillsPixel
= prog_data
->uses_kill
;
4651 psx
.AttributeEnable
= prog_data
->num_varying_inputs
!= 0;
4652 psx
.PixelShaderUsesSourceDepth
= prog_data
->uses_src_depth
;
4653 psx
.PixelShaderUsesSourceW
= prog_data
->uses_src_w
;
4654 psx
.PixelShaderIsPerSample
= prog_data
->persample_dispatch
;
4656 /* _NEW_MULTISAMPLE | BRW_NEW_CONSERVATIVE_RASTERIZATION */
4657 if (prog_data
->uses_sample_mask
) {
4659 if (prog_data
->post_depth_coverage
)
4660 psx
.InputCoverageMaskState
= ICMS_DEPTH_COVERAGE
;
4661 else if (prog_data
->inner_coverage
&& ctx
->IntelConservativeRasterization
)
4662 psx
.InputCoverageMaskState
= ICMS_INNER_CONSERVATIVE
;
4664 psx
.InputCoverageMaskState
= ICMS_NORMAL
;
4666 psx
.PixelShaderUsesInputCoverageMask
= true;
4670 psx
.oMaskPresenttoRenderTarget
= prog_data
->uses_omask
;
4672 psx
.PixelShaderPullsBary
= prog_data
->pulls_bary
;
4673 psx
.PixelShaderComputesStencil
= prog_data
->computed_stencil
;
4676 /* The stricter cross-primitive coherency guarantees that the hardware
4677 * gives us with the "Accesses UAV" bit set for at least one shader stage
4678 * and the "UAV coherency required" bit set on the 3DPRIMITIVE command
4679 * are redundant within the current image, atomic counter and SSBO GL
4680 * APIs, which all have very loose ordering and coherency requirements
4681 * and generally rely on the application to insert explicit barriers when
4682 * a shader invocation is expected to see the memory writes performed by
4683 * the invocations of some previous primitive. Regardless of the value
4684 * of "UAV coherency required", the "Accesses UAV" bits will implicitly
4685 * cause an in most cases useless DC flush when the lowermost stage with
4686 * the bit set finishes execution.
4688 * It would be nice to disable it, but in some cases we can't because on
4689 * Gen8+ it also has an influence on rasterization via the PS UAV-only
4690 * signal (which could be set independently from the coherency mechanism
4691 * in the 3DSTATE_WM command on Gen7), and because in some cases it will
4692 * determine whether the hardware skips execution of the fragment shader
4693 * or not via the ThreadDispatchEnable signal. However if we know that
4694 * GEN8_PS_BLEND_HAS_WRITEABLE_RT is going to be set and
4695 * GEN8_PSX_PIXEL_SHADER_NO_RT_WRITE is not set it shouldn't make any
4696 * difference so we may just disable it here.
4698 * Gen8 hardware tries to compute ThreadDispatchEnable for us but doesn't
4699 * take into account KillPixels when no depth or stencil writes are
4700 * enabled. In order for occlusion queries to work correctly with no
4701 * attachments, we need to force-enable here.
4703 * BRW_NEW_FS_PROG_DATA | BRW_NEW_FRAGMENT_PROGRAM | _NEW_BUFFERS |
4706 if ((prog_data
->has_side_effects
|| prog_data
->uses_kill
) &&
4707 !brw_color_buffer_write_enabled(brw
))
4708 psx
.PixelShaderHasUAV
= true;
4712 const struct brw_tracked_state
genX(ps_extra
) = {
4714 .mesa
= _NEW_BUFFERS
| _NEW_COLOR
,
4715 .brw
= BRW_NEW_BLORP
|
4717 BRW_NEW_FRAGMENT_PROGRAM
|
4718 BRW_NEW_FS_PROG_DATA
|
4719 BRW_NEW_CONSERVATIVE_RASTERIZATION
,
4721 .emit
= genX(upload_ps_extra
),
4725 /* ---------------------------------------------------------------------- */
4729 genX(upload_ps_blend
)(struct brw_context
*brw
)
4731 struct gl_context
*ctx
= &brw
->ctx
;
4734 struct gl_renderbuffer
*rb
= ctx
->DrawBuffer
->_ColorDrawBuffers
[0];
4735 const bool buffer0_is_integer
= ctx
->DrawBuffer
->_IntegerBuffers
& 0x1;
4738 struct gl_colorbuffer_attrib
*color
= &ctx
->Color
;
4740 brw_batch_emit(brw
, GENX(3DSTATE_PS_BLEND
), pb
) {
4741 /* BRW_NEW_FRAGMENT_PROGRAM | _NEW_BUFFERS | _NEW_COLOR */
4742 pb
.HasWriteableRT
= brw_color_buffer_write_enabled(brw
);
4744 bool alpha_to_one
= false;
4746 if (!buffer0_is_integer
) {
4747 /* _NEW_MULTISAMPLE */
4749 if (_mesa_is_multisample_enabled(ctx
)) {
4750 pb
.AlphaToCoverageEnable
= ctx
->Multisample
.SampleAlphaToCoverage
;
4751 alpha_to_one
= ctx
->Multisample
.SampleAlphaToOne
;
4754 pb
.AlphaTestEnable
= color
->AlphaEnabled
;
4757 /* Used for implementing the following bit of GL_EXT_texture_integer:
4758 * "Per-fragment operations that require floating-point color
4759 * components, including multisample alpha operations, alpha test,
4760 * blending, and dithering, have no effect when the corresponding
4761 * colors are written to an integer color buffer."
4763 * The OpenGL specification 3.3 (page 196), section 4.1.3 says:
4764 * "If drawbuffer zero is not NONE and the buffer it references has an
4765 * integer format, the SAMPLE_ALPHA_TO_COVERAGE and SAMPLE_ALPHA_TO_ONE
4766 * operations are skipped."
4768 if (rb
&& !buffer0_is_integer
&& (color
->BlendEnabled
& 1)) {
4769 GLenum eqRGB
= color
->Blend
[0].EquationRGB
;
4770 GLenum eqA
= color
->Blend
[0].EquationA
;
4771 GLenum srcRGB
= color
->Blend
[0].SrcRGB
;
4772 GLenum dstRGB
= color
->Blend
[0].DstRGB
;
4773 GLenum srcA
= color
->Blend
[0].SrcA
;
4774 GLenum dstA
= color
->Blend
[0].DstA
;
4776 if (eqRGB
== GL_MIN
|| eqRGB
== GL_MAX
)
4777 srcRGB
= dstRGB
= GL_ONE
;
4779 if (eqA
== GL_MIN
|| eqA
== GL_MAX
)
4780 srcA
= dstA
= GL_ONE
;
4782 /* Due to hardware limitations, the destination may have information
4783 * in an alpha channel even when the format specifies no alpha
4784 * channel. In order to avoid getting any incorrect blending due to
4785 * that alpha channel, coerce the blend factors to values that will
4786 * not read the alpha channel, but will instead use the correct
4787 * implicit value for alpha.
4789 if (!_mesa_base_format_has_channel(rb
->_BaseFormat
,
4790 GL_TEXTURE_ALPHA_TYPE
)) {
4791 srcRGB
= brw_fix_xRGB_alpha(srcRGB
);
4792 srcA
= brw_fix_xRGB_alpha(srcA
);
4793 dstRGB
= brw_fix_xRGB_alpha(dstRGB
);
4794 dstA
= brw_fix_xRGB_alpha(dstA
);
4797 /* Alpha to One doesn't work with Dual Color Blending. Override
4798 * SRC1_ALPHA to ONE and ONE_MINUS_SRC1_ALPHA to ZERO.
4800 if (alpha_to_one
&& color
->Blend
[0]._UsesDualSrc
) {
4801 srcRGB
= fix_dual_blend_alpha_to_one(srcRGB
);
4802 srcA
= fix_dual_blend_alpha_to_one(srcA
);
4803 dstRGB
= fix_dual_blend_alpha_to_one(dstRGB
);
4804 dstA
= fix_dual_blend_alpha_to_one(dstA
);
4807 /* BRW_NEW_FS_PROG_DATA */
4808 const struct brw_wm_prog_data
*wm_prog_data
=
4809 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
4811 /* The Dual Source Blending documentation says:
4813 * "If SRC1 is included in a src/dst blend factor and
4814 * a DualSource RT Write message is not used, results
4815 * are UNDEFINED. (This reflects the same restriction in DX APIs,
4816 * where undefined results are produced if “o1” is not written
4817 * by a PS – there are no default values defined).
4818 * If SRC1 is not included in a src/dst blend factor,
4819 * dual source blending must be disabled."
4821 * There is no way to gracefully fix this undefined situation
4822 * so we just disable the blending to prevent possible issues.
4824 pb
.ColorBufferBlendEnable
=
4825 !color
->Blend
[0]._UsesDualSrc
|| wm_prog_data
->dual_src_blend
;
4826 pb
.SourceAlphaBlendFactor
= brw_translate_blend_factor(srcA
);
4827 pb
.DestinationAlphaBlendFactor
= brw_translate_blend_factor(dstA
);
4828 pb
.SourceBlendFactor
= brw_translate_blend_factor(srcRGB
);
4829 pb
.DestinationBlendFactor
= brw_translate_blend_factor(dstRGB
);
4831 pb
.IndependentAlphaBlendEnable
=
4832 srcA
!= srcRGB
|| dstA
!= dstRGB
|| eqA
!= eqRGB
;
4837 static const struct brw_tracked_state
genX(ps_blend
) = {
4839 .mesa
= _NEW_BUFFERS
|
4842 .brw
= BRW_NEW_BLORP
|
4844 BRW_NEW_FRAGMENT_PROGRAM
|
4845 BRW_NEW_FS_PROG_DATA
,
4847 .emit
= genX(upload_ps_blend
)
4851 /* ---------------------------------------------------------------------- */
4855 genX(emit_vf_topology
)(struct brw_context
*brw
)
4857 brw_batch_emit(brw
, GENX(3DSTATE_VF_TOPOLOGY
), vftopo
) {
4858 vftopo
.PrimitiveTopologyType
= brw
->primitive
;
4862 static const struct brw_tracked_state
genX(vf_topology
) = {
4865 .brw
= BRW_NEW_BLORP
|
4868 .emit
= genX(emit_vf_topology
),
4872 /* ---------------------------------------------------------------------- */
4876 genX(emit_mi_report_perf_count
)(struct brw_context
*brw
,
4878 uint32_t offset_in_bytes
,
4881 brw_batch_emit(brw
, GENX(MI_REPORT_PERF_COUNT
), mi_rpc
) {
4882 mi_rpc
.MemoryAddress
= ggtt_bo(bo
, offset_in_bytes
);
4883 mi_rpc
.ReportID
= report_id
;
4888 /* ---------------------------------------------------------------------- */
4891 * Emit a 3DSTATE_SAMPLER_STATE_POINTERS_{VS,HS,GS,DS,PS} packet.
4894 genX(emit_sampler_state_pointers_xs
)(UNUSED
struct brw_context
*brw
,
4895 UNUSED
struct brw_stage_state
*stage_state
)
4898 static const uint16_t packet_headers
[] = {
4899 [MESA_SHADER_VERTEX
] = 43,
4900 [MESA_SHADER_TESS_CTRL
] = 44,
4901 [MESA_SHADER_TESS_EVAL
] = 45,
4902 [MESA_SHADER_GEOMETRY
] = 46,
4903 [MESA_SHADER_FRAGMENT
] = 47,
4906 /* Ivybridge requires a workaround flush before VS packets. */
4907 if (GEN_GEN
== 7 && !GEN_IS_HASWELL
&&
4908 stage_state
->stage
== MESA_SHADER_VERTEX
) {
4909 gen7_emit_vs_workaround_flush(brw
);
4912 brw_batch_emit(brw
, GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS
), ptr
) {
4913 ptr
._3DCommandSubOpcode
= packet_headers
[stage_state
->stage
];
4914 ptr
.PointertoVSSamplerState
= stage_state
->sampler_offset
;
4920 has_component(mesa_format format
, int i
)
4922 if (_mesa_is_format_color_format(format
))
4923 return _mesa_format_has_color_component(format
, i
);
4925 /* depth and stencil have only one component */
4930 * Upload SAMPLER_BORDER_COLOR_STATE.
4933 genX(upload_default_color
)(struct brw_context
*brw
,
4934 const struct gl_sampler_object
*sampler
,
4935 mesa_format format
, GLenum base_format
,
4936 bool is_integer_format
, bool is_stencil_sampling
,
4937 uint32_t *sdc_offset
)
4939 union gl_color_union color
;
4941 switch (base_format
) {
4942 case GL_DEPTH_COMPONENT
:
4943 /* GL specs that border color for depth textures is taken from the
4944 * R channel, while the hardware uses A. Spam R into all the
4945 * channels for safety.
4947 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4948 color
.ui
[1] = sampler
->BorderColor
.ui
[0];
4949 color
.ui
[2] = sampler
->BorderColor
.ui
[0];
4950 color
.ui
[3] = sampler
->BorderColor
.ui
[0];
4956 color
.ui
[3] = sampler
->BorderColor
.ui
[3];
4959 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4960 color
.ui
[1] = sampler
->BorderColor
.ui
[0];
4961 color
.ui
[2] = sampler
->BorderColor
.ui
[0];
4962 color
.ui
[3] = sampler
->BorderColor
.ui
[0];
4965 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4966 color
.ui
[1] = sampler
->BorderColor
.ui
[0];
4967 color
.ui
[2] = sampler
->BorderColor
.ui
[0];
4968 color
.ui
[3] = float_as_int(1.0);
4970 case GL_LUMINANCE_ALPHA
:
4971 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4972 color
.ui
[1] = sampler
->BorderColor
.ui
[0];
4973 color
.ui
[2] = sampler
->BorderColor
.ui
[0];
4974 color
.ui
[3] = sampler
->BorderColor
.ui
[3];
4977 color
.ui
[0] = sampler
->BorderColor
.ui
[0];
4978 color
.ui
[1] = sampler
->BorderColor
.ui
[1];
4979 color
.ui
[2] = sampler
->BorderColor
.ui
[2];
4980 color
.ui
[3] = sampler
->BorderColor
.ui
[3];
4984 /* In some cases we use an RGBA surface format for GL RGB textures,
4985 * where we've initialized the A channel to 1.0. We also have to set
4986 * the border color alpha to 1.0 in that case.
4988 if (base_format
== GL_RGB
)
4989 color
.ui
[3] = float_as_int(1.0);
4994 } else if (GEN_IS_HASWELL
&& (is_integer_format
|| is_stencil_sampling
)) {
4998 uint32_t *sdc
= brw_state_batch(
4999 brw
, GENX(SAMPLER_BORDER_COLOR_STATE_length
) * sizeof(uint32_t),
5000 alignment
, sdc_offset
);
5002 struct GENX(SAMPLER_BORDER_COLOR_STATE
) state
= { 0 };
5004 #define ASSIGN(dst, src) \
5009 #define ASSIGNu16(dst, src) \
5011 dst = (uint16_t)src; \
5014 #define ASSIGNu8(dst, src) \
5016 dst = (uint8_t)src; \
5019 #define BORDER_COLOR_ATTR(macro, _color_type, src) \
5020 macro(state.BorderColor ## _color_type ## Red, src[0]); \
5021 macro(state.BorderColor ## _color_type ## Green, src[1]); \
5022 macro(state.BorderColor ## _color_type ## Blue, src[2]); \
5023 macro(state.BorderColor ## _color_type ## Alpha, src[3]);
5026 /* On Broadwell, the border color is represented as four 32-bit floats,
5027 * integers, or unsigned values, interpreted according to the surface
5028 * format. This matches the sampler->BorderColor union exactly; just
5029 * memcpy the values.
5031 BORDER_COLOR_ATTR(ASSIGN
, 32bit
, color
.ui
);
5032 #elif GEN_IS_HASWELL
5033 if (is_integer_format
|| is_stencil_sampling
) {
5034 bool stencil
= format
== MESA_FORMAT_S_UINT8
|| is_stencil_sampling
;
5035 const int bits_per_channel
=
5036 _mesa_get_format_bits(format
, stencil
? GL_STENCIL_BITS
: GL_RED_BITS
);
5038 /* From the Haswell PRM, "Command Reference: Structures", Page 36:
5039 * "If any color channel is missing from the surface format,
5040 * corresponding border color should be programmed as zero and if
5041 * alpha channel is missing, corresponding Alpha border color should
5042 * be programmed as 1."
5044 unsigned c
[4] = { 0, 0, 0, 1 };
5045 for (int i
= 0; i
< 4; i
++) {
5046 if (has_component(format
, i
))
5050 switch (bits_per_channel
) {
5052 /* Copy RGBA in order. */
5053 BORDER_COLOR_ATTR(ASSIGNu8
, 8bit
, c
);
5056 /* R10G10B10A2_UINT is treated like a 16-bit format. */
5058 BORDER_COLOR_ATTR(ASSIGNu16
, 16bit
, c
);
5061 if (base_format
== GL_RG
) {
5062 /* Careful inspection of the tables reveals that for RG32 formats,
5063 * the green channel needs to go where blue normally belongs.
5065 state
.BorderColor32bitRed
= c
[0];
5066 state
.BorderColor32bitBlue
= c
[1];
5067 state
.BorderColor32bitAlpha
= 1;
5069 /* Copy RGBA in order. */
5070 BORDER_COLOR_ATTR(ASSIGN
, 32bit
, c
);
5074 assert(!"Invalid number of bits per channel in integer format.");
5078 BORDER_COLOR_ATTR(ASSIGN
, Float
, color
.f
);
5080 #elif GEN_GEN == 5 || GEN_GEN == 6
5081 BORDER_COLOR_ATTR(UNCLAMPED_FLOAT_TO_UBYTE
, Unorm
, color
.f
);
5082 BORDER_COLOR_ATTR(UNCLAMPED_FLOAT_TO_USHORT
, Unorm16
, color
.f
);
5083 BORDER_COLOR_ATTR(UNCLAMPED_FLOAT_TO_SHORT
, Snorm16
, color
.f
);
5085 #define MESA_FLOAT_TO_HALF(dst, src) \
5086 dst = _mesa_float_to_half(src);
5088 BORDER_COLOR_ATTR(MESA_FLOAT_TO_HALF
, Float16
, color
.f
);
5090 #undef MESA_FLOAT_TO_HALF
5092 state
.BorderColorSnorm8Red
= state
.BorderColorSnorm16Red
>> 8;
5093 state
.BorderColorSnorm8Green
= state
.BorderColorSnorm16Green
>> 8;
5094 state
.BorderColorSnorm8Blue
= state
.BorderColorSnorm16Blue
>> 8;
5095 state
.BorderColorSnorm8Alpha
= state
.BorderColorSnorm16Alpha
>> 8;
5097 BORDER_COLOR_ATTR(ASSIGN
, Float
, color
.f
);
5099 BORDER_COLOR_ATTR(ASSIGN
, , color
.f
);
5101 BORDER_COLOR_ATTR(ASSIGN
, Float
, color
.f
);
5105 #undef BORDER_COLOR_ATTR
5107 GENX(SAMPLER_BORDER_COLOR_STATE_pack
)(brw
, sdc
, &state
);
5111 translate_wrap_mode(GLenum wrap
, UNUSED
bool using_nearest
)
5118 /* GL_CLAMP is the weird mode where coordinates are clamped to
5119 * [0.0, 1.0], so linear filtering of coordinates outside of
5120 * [0.0, 1.0] give you half edge texel value and half border
5123 * Gen8+ supports this natively.
5125 return TCM_HALF_BORDER
;
5127 /* On Gen4-7.5, we clamp the coordinates in the fragment shader
5128 * and set clamp_border here, which gets the result desired.
5129 * We just use clamp(_to_edge) for nearest, because for nearest
5130 * clamping to 1.0 gives border color instead of the desired
5136 return TCM_CLAMP_BORDER
;
5138 case GL_CLAMP_TO_EDGE
:
5140 case GL_CLAMP_TO_BORDER
:
5141 return TCM_CLAMP_BORDER
;
5142 case GL_MIRRORED_REPEAT
:
5144 case GL_MIRROR_CLAMP_TO_EDGE
:
5145 return TCM_MIRROR_ONCE
;
5152 * Return true if the given wrap mode requires the border color to exist.
5155 wrap_mode_needs_border_color(unsigned wrap_mode
)
5158 return wrap_mode
== TCM_CLAMP_BORDER
||
5159 wrap_mode
== TCM_HALF_BORDER
;
5161 return wrap_mode
== TCM_CLAMP_BORDER
;
5166 * Sets the sampler state for a single unit based off of the sampler key
5170 genX(update_sampler_state
)(struct brw_context
*brw
,
5171 GLenum target
, bool tex_cube_map_seamless
,
5172 GLfloat tex_unit_lod_bias
,
5173 mesa_format format
, GLenum base_format
,
5174 const struct gl_texture_object
*texObj
,
5175 const struct gl_sampler_object
*sampler
,
5176 uint32_t *sampler_state
)
5178 struct GENX(SAMPLER_STATE
) samp_st
= { 0 };
5180 /* Select min and mip filters. */
5181 switch (sampler
->MinFilter
) {
5183 samp_st
.MinModeFilter
= MAPFILTER_NEAREST
;
5184 samp_st
.MipModeFilter
= MIPFILTER_NONE
;
5187 samp_st
.MinModeFilter
= MAPFILTER_LINEAR
;
5188 samp_st
.MipModeFilter
= MIPFILTER_NONE
;
5190 case GL_NEAREST_MIPMAP_NEAREST
:
5191 samp_st
.MinModeFilter
= MAPFILTER_NEAREST
;
5192 samp_st
.MipModeFilter
= MIPFILTER_NEAREST
;
5194 case GL_LINEAR_MIPMAP_NEAREST
:
5195 samp_st
.MinModeFilter
= MAPFILTER_LINEAR
;
5196 samp_st
.MipModeFilter
= MIPFILTER_NEAREST
;
5198 case GL_NEAREST_MIPMAP_LINEAR
:
5199 samp_st
.MinModeFilter
= MAPFILTER_NEAREST
;
5200 samp_st
.MipModeFilter
= MIPFILTER_LINEAR
;
5202 case GL_LINEAR_MIPMAP_LINEAR
:
5203 samp_st
.MinModeFilter
= MAPFILTER_LINEAR
;
5204 samp_st
.MipModeFilter
= MIPFILTER_LINEAR
;
5207 unreachable("not reached");
5210 /* Select mag filter. */
5211 samp_st
.MagModeFilter
= sampler
->MagFilter
== GL_LINEAR
?
5212 MAPFILTER_LINEAR
: MAPFILTER_NEAREST
;
5214 /* Enable anisotropic filtering if desired. */
5215 samp_st
.MaximumAnisotropy
= RATIO21
;
5217 if (sampler
->MaxAnisotropy
> 1.0f
) {
5218 if (samp_st
.MinModeFilter
== MAPFILTER_LINEAR
)
5219 samp_st
.MinModeFilter
= MAPFILTER_ANISOTROPIC
;
5220 if (samp_st
.MagModeFilter
== MAPFILTER_LINEAR
)
5221 samp_st
.MagModeFilter
= MAPFILTER_ANISOTROPIC
;
5223 if (sampler
->MaxAnisotropy
> 2.0f
) {
5224 samp_st
.MaximumAnisotropy
=
5225 MIN2((sampler
->MaxAnisotropy
- 2) / 2, RATIO161
);
5229 /* Set address rounding bits if not using nearest filtering. */
5230 if (samp_st
.MinModeFilter
!= MAPFILTER_NEAREST
) {
5231 samp_st
.UAddressMinFilterRoundingEnable
= true;
5232 samp_st
.VAddressMinFilterRoundingEnable
= true;
5233 samp_st
.RAddressMinFilterRoundingEnable
= true;
5236 if (samp_st
.MagModeFilter
!= MAPFILTER_NEAREST
) {
5237 samp_st
.UAddressMagFilterRoundingEnable
= true;
5238 samp_st
.VAddressMagFilterRoundingEnable
= true;
5239 samp_st
.RAddressMagFilterRoundingEnable
= true;
5242 bool either_nearest
=
5243 sampler
->MinFilter
== GL_NEAREST
|| sampler
->MagFilter
== GL_NEAREST
;
5244 unsigned wrap_s
= translate_wrap_mode(sampler
->WrapS
, either_nearest
);
5245 unsigned wrap_t
= translate_wrap_mode(sampler
->WrapT
, either_nearest
);
5246 unsigned wrap_r
= translate_wrap_mode(sampler
->WrapR
, either_nearest
);
5248 if (target
== GL_TEXTURE_CUBE_MAP
||
5249 target
== GL_TEXTURE_CUBE_MAP_ARRAY
) {
5250 /* Cube maps must use the same wrap mode for all three coordinate
5251 * dimensions. Prior to Haswell, only CUBE and CLAMP are valid.
5253 * Ivybridge and Baytrail seem to have problems with CUBE mode and
5254 * integer formats. Fall back to CLAMP for now.
5256 if ((tex_cube_map_seamless
|| sampler
->CubeMapSeamless
) &&
5257 !(GEN_GEN
== 7 && !GEN_IS_HASWELL
&& texObj
->_IsIntegerFormat
)) {
5266 } else if (target
== GL_TEXTURE_1D
) {
5267 /* There's a bug in 1D texture sampling - it actually pays
5268 * attention to the wrap_t value, though it should not.
5269 * Override the wrap_t value here to GL_REPEAT to keep
5270 * any nonexistent border pixels from floating in.
5275 samp_st
.TCXAddressControlMode
= wrap_s
;
5276 samp_st
.TCYAddressControlMode
= wrap_t
;
5277 samp_st
.TCZAddressControlMode
= wrap_r
;
5279 samp_st
.ShadowFunction
=
5280 sampler
->CompareMode
== GL_COMPARE_R_TO_TEXTURE_ARB
?
5281 intel_translate_shadow_compare_func(sampler
->CompareFunc
) : 0;
5284 /* Set shadow function. */
5285 samp_st
.AnisotropicAlgorithm
=
5286 samp_st
.MinModeFilter
== MAPFILTER_ANISOTROPIC
?
5287 EWAApproximation
: LEGACY
;
5291 samp_st
.NonnormalizedCoordinateEnable
= target
== GL_TEXTURE_RECTANGLE
;
5294 const float hw_max_lod
= GEN_GEN
>= 7 ? 14 : 13;
5295 samp_st
.MinLOD
= CLAMP(sampler
->MinLod
, 0, hw_max_lod
);
5296 samp_st
.MaxLOD
= CLAMP(sampler
->MaxLod
, 0, hw_max_lod
);
5297 samp_st
.TextureLODBias
=
5298 CLAMP(tex_unit_lod_bias
+ sampler
->LodBias
, -16, 15);
5301 samp_st
.BaseMipLevel
=
5302 CLAMP(texObj
->MinLevel
+ texObj
->BaseLevel
, 0, hw_max_lod
);
5303 samp_st
.MinandMagStateNotEqual
=
5304 samp_st
.MinModeFilter
!= samp_st
.MagModeFilter
;
5307 /* Upload the border color if necessary. If not, just point it at
5308 * offset 0 (the start of the batch) - the color should be ignored,
5309 * but that address won't fault in case something reads it anyway.
5311 uint32_t border_color_offset
= 0;
5312 if (wrap_mode_needs_border_color(wrap_s
) ||
5313 wrap_mode_needs_border_color(wrap_t
) ||
5314 wrap_mode_needs_border_color(wrap_r
)) {
5315 genX(upload_default_color
)(brw
, sampler
, format
, base_format
,
5316 texObj
->_IsIntegerFormat
,
5317 texObj
->StencilSampling
,
5318 &border_color_offset
);
5321 samp_st
.BorderColorPointer
=
5322 ro_bo(brw
->batch
.state
.bo
, border_color_offset
);
5324 samp_st
.BorderColorPointer
= border_color_offset
;
5328 samp_st
.LODPreClampMode
= CLAMP_MODE_OGL
;
5330 samp_st
.LODPreClampEnable
= true;
5333 GENX(SAMPLER_STATE_pack
)(brw
, sampler_state
, &samp_st
);
5337 update_sampler_state(struct brw_context
*brw
,
5339 uint32_t *sampler_state
)
5341 struct gl_context
*ctx
= &brw
->ctx
;
5342 const struct gl_texture_unit
*texUnit
= &ctx
->Texture
.Unit
[unit
];
5343 const struct gl_texture_object
*texObj
= texUnit
->_Current
;
5344 const struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, unit
);
5346 /* These don't use samplers at all. */
5347 if (texObj
->Target
== GL_TEXTURE_BUFFER
)
5350 struct gl_texture_image
*firstImage
= texObj
->Image
[0][texObj
->BaseLevel
];
5351 genX(update_sampler_state
)(brw
, texObj
->Target
,
5352 ctx
->Texture
.CubeMapSeamless
,
5354 firstImage
->TexFormat
, firstImage
->_BaseFormat
,
5360 genX(upload_sampler_state_table
)(struct brw_context
*brw
,
5361 struct gl_program
*prog
,
5362 struct brw_stage_state
*stage_state
)
5364 struct gl_context
*ctx
= &brw
->ctx
;
5365 uint32_t sampler_count
= stage_state
->sampler_count
;
5367 GLbitfield SamplersUsed
= prog
->SamplersUsed
;
5369 if (sampler_count
== 0)
5372 /* SAMPLER_STATE is 4 DWords on all platforms. */
5373 const int dwords
= GENX(SAMPLER_STATE_length
);
5374 const int size_in_bytes
= dwords
* sizeof(uint32_t);
5376 uint32_t *sampler_state
= brw_state_batch(brw
,
5377 sampler_count
* size_in_bytes
,
5378 32, &stage_state
->sampler_offset
);
5379 /* memset(sampler_state, 0, sampler_count * size_in_bytes); */
5381 for (unsigned s
= 0; s
< sampler_count
; s
++) {
5382 if (SamplersUsed
& (1 << s
)) {
5383 const unsigned unit
= prog
->SamplerUnits
[s
];
5384 if (ctx
->Texture
.Unit
[unit
]._Current
) {
5385 update_sampler_state(brw
, unit
, sampler_state
);
5389 sampler_state
+= dwords
;
5392 if (GEN_GEN
>= 7 && stage_state
->stage
!= MESA_SHADER_COMPUTE
) {
5393 /* Emit a 3DSTATE_SAMPLER_STATE_POINTERS_XS packet. */
5394 genX(emit_sampler_state_pointers_xs
)(brw
, stage_state
);
5396 /* Flag that the sampler state table pointer has changed; later atoms
5399 brw
->ctx
.NewDriverState
|= BRW_NEW_SAMPLER_STATE_TABLE
;
5404 genX(upload_fs_samplers
)(struct brw_context
*brw
)
5406 /* BRW_NEW_FRAGMENT_PROGRAM */
5407 struct gl_program
*fs
= brw
->programs
[MESA_SHADER_FRAGMENT
];
5408 genX(upload_sampler_state_table
)(brw
, fs
, &brw
->wm
.base
);
5411 static const struct brw_tracked_state
genX(fs_samplers
) = {
5413 .mesa
= _NEW_TEXTURE
,
5414 .brw
= BRW_NEW_BATCH
|
5416 BRW_NEW_FRAGMENT_PROGRAM
,
5418 .emit
= genX(upload_fs_samplers
),
5422 genX(upload_vs_samplers
)(struct brw_context
*brw
)
5424 /* BRW_NEW_VERTEX_PROGRAM */
5425 struct gl_program
*vs
= brw
->programs
[MESA_SHADER_VERTEX
];
5426 genX(upload_sampler_state_table
)(brw
, vs
, &brw
->vs
.base
);
5429 static const struct brw_tracked_state
genX(vs_samplers
) = {
5431 .mesa
= _NEW_TEXTURE
,
5432 .brw
= BRW_NEW_BATCH
|
5434 BRW_NEW_VERTEX_PROGRAM
,
5436 .emit
= genX(upload_vs_samplers
),
5441 genX(upload_gs_samplers
)(struct brw_context
*brw
)
5443 /* BRW_NEW_GEOMETRY_PROGRAM */
5444 struct gl_program
*gs
= brw
->programs
[MESA_SHADER_GEOMETRY
];
5448 genX(upload_sampler_state_table
)(brw
, gs
, &brw
->gs
.base
);
5452 static const struct brw_tracked_state
genX(gs_samplers
) = {
5454 .mesa
= _NEW_TEXTURE
,
5455 .brw
= BRW_NEW_BATCH
|
5457 BRW_NEW_GEOMETRY_PROGRAM
,
5459 .emit
= genX(upload_gs_samplers
),
5465 genX(upload_tcs_samplers
)(struct brw_context
*brw
)
5467 /* BRW_NEW_TESS_PROGRAMS */
5468 struct gl_program
*tcs
= brw
->programs
[MESA_SHADER_TESS_CTRL
];
5472 genX(upload_sampler_state_table
)(brw
, tcs
, &brw
->tcs
.base
);
5475 static const struct brw_tracked_state
genX(tcs_samplers
) = {
5477 .mesa
= _NEW_TEXTURE
,
5478 .brw
= BRW_NEW_BATCH
|
5480 BRW_NEW_TESS_PROGRAMS
,
5482 .emit
= genX(upload_tcs_samplers
),
5488 genX(upload_tes_samplers
)(struct brw_context
*brw
)
5490 /* BRW_NEW_TESS_PROGRAMS */
5491 struct gl_program
*tes
= brw
->programs
[MESA_SHADER_TESS_EVAL
];
5495 genX(upload_sampler_state_table
)(brw
, tes
, &brw
->tes
.base
);
5498 static const struct brw_tracked_state
genX(tes_samplers
) = {
5500 .mesa
= _NEW_TEXTURE
,
5501 .brw
= BRW_NEW_BATCH
|
5503 BRW_NEW_TESS_PROGRAMS
,
5505 .emit
= genX(upload_tes_samplers
),
5511 genX(upload_cs_samplers
)(struct brw_context
*brw
)
5513 /* BRW_NEW_COMPUTE_PROGRAM */
5514 struct gl_program
*cs
= brw
->programs
[MESA_SHADER_COMPUTE
];
5518 genX(upload_sampler_state_table
)(brw
, cs
, &brw
->cs
.base
);
5521 const struct brw_tracked_state
genX(cs_samplers
) = {
5523 .mesa
= _NEW_TEXTURE
,
5524 .brw
= BRW_NEW_BATCH
|
5526 BRW_NEW_COMPUTE_PROGRAM
,
5528 .emit
= genX(upload_cs_samplers
),
5532 /* ---------------------------------------------------------------------- */
5536 static void genX(upload_blend_constant_color
)(struct brw_context
*brw
)
5538 struct gl_context
*ctx
= &brw
->ctx
;
5540 brw_batch_emit(brw
, GENX(3DSTATE_CONSTANT_COLOR
), blend_cc
) {
5541 blend_cc
.BlendConstantColorRed
= ctx
->Color
.BlendColorUnclamped
[0];
5542 blend_cc
.BlendConstantColorGreen
= ctx
->Color
.BlendColorUnclamped
[1];
5543 blend_cc
.BlendConstantColorBlue
= ctx
->Color
.BlendColorUnclamped
[2];
5544 blend_cc
.BlendConstantColorAlpha
= ctx
->Color
.BlendColorUnclamped
[3];
5548 static const struct brw_tracked_state
genX(blend_constant_color
) = {
5551 .brw
= BRW_NEW_CONTEXT
|
5554 .emit
= genX(upload_blend_constant_color
)
5558 /* ---------------------------------------------------------------------- */
5561 genX(init_atoms
)(struct brw_context
*brw
)
5564 static const struct brw_tracked_state
*render_atoms
[] =
5566 &genX(vf_statistics
),
5568 /* Once all the programs are done, we know how large urb entry
5569 * sizes need to be and can decide if we need to change the urb
5573 &brw_recalculate_urb_fence
,
5576 &genX(color_calc_state
),
5578 /* Surface state setup. Must come before the VS/WM unit. The binding
5579 * table upload must be last.
5581 &brw_vs_pull_constants
,
5582 &brw_wm_pull_constants
,
5583 &brw_renderbuffer_surfaces
,
5584 &brw_renderbuffer_read_surfaces
,
5585 &brw_texture_surfaces
,
5586 &brw_vs_binding_table
,
5587 &brw_wm_binding_table
,
5592 /* These set up state for brw_psp_urb_cbs */
5594 &genX(sf_clip_viewport
),
5596 &genX(vs_state
), /* always required, enabled or not */
5602 &brw_binding_table_pointers
,
5603 &genX(blend_constant_color
),
5607 &genX(polygon_stipple
),
5608 &genX(polygon_stipple_offset
),
5610 &genX(line_stipple
),
5614 &genX(drawing_rect
),
5615 &brw_indices
, /* must come before brw_vertices */
5616 &genX(index_buffer
),
5619 &brw_constant_buffer
5622 static const struct brw_tracked_state
*render_atoms
[] =
5624 &genX(vf_statistics
),
5626 &genX(sf_clip_viewport
),
5628 /* Command packets: */
5633 &genX(blend_state
), /* must do before cc unit */
5634 &genX(color_calc_state
), /* must do before cc unit */
5635 &genX(depth_stencil_state
), /* must do before cc unit */
5637 &genX(vs_push_constants
), /* Before vs_state */
5638 &genX(gs_push_constants
), /* Before gs_state */
5639 &genX(wm_push_constants
), /* Before wm_state */
5641 /* Surface state setup. Must come before the VS/WM unit. The binding
5642 * table upload must be last.
5644 &brw_vs_pull_constants
,
5645 &brw_vs_ubo_surfaces
,
5646 &brw_gs_pull_constants
,
5647 &brw_gs_ubo_surfaces
,
5648 &brw_wm_pull_constants
,
5649 &brw_wm_ubo_surfaces
,
5650 &gen6_renderbuffer_surfaces
,
5651 &brw_renderbuffer_read_surfaces
,
5652 &brw_texture_surfaces
,
5654 &brw_vs_binding_table
,
5655 &gen6_gs_binding_table
,
5656 &brw_wm_binding_table
,
5661 &gen6_sampler_state
,
5662 &genX(multisample_state
),
5670 &genX(scissor_state
),
5672 &gen6_binding_table_pointers
,
5676 &genX(polygon_stipple
),
5677 &genX(polygon_stipple_offset
),
5679 &genX(line_stipple
),
5681 &genX(drawing_rect
),
5683 &brw_indices
, /* must come before brw_vertices */
5684 &genX(index_buffer
),
5688 static const struct brw_tracked_state
*render_atoms
[] =
5690 &genX(vf_statistics
),
5692 /* Command packets: */
5695 &genX(sf_clip_viewport
),
5698 &gen7_push_constant_space
,
5700 &genX(blend_state
), /* must do before cc unit */
5701 &genX(color_calc_state
), /* must do before cc unit */
5702 &genX(depth_stencil_state
), /* must do before cc unit */
5704 &brw_vs_image_surfaces
, /* Before vs push/pull constants and binding table */
5705 &brw_tcs_image_surfaces
, /* Before tcs push/pull constants and binding table */
5706 &brw_tes_image_surfaces
, /* Before tes push/pull constants and binding table */
5707 &brw_gs_image_surfaces
, /* Before gs push/pull constants and binding table */
5708 &brw_wm_image_surfaces
, /* Before wm push/pull constants and binding table */
5710 &genX(vs_push_constants
), /* Before vs_state */
5711 &genX(tcs_push_constants
),
5712 &genX(tes_push_constants
),
5713 &genX(gs_push_constants
), /* Before gs_state */
5714 &genX(wm_push_constants
), /* Before wm_surfaces and constant_buffer */
5716 /* Surface state setup. Must come before the VS/WM unit. The binding
5717 * table upload must be last.
5719 &brw_vs_pull_constants
,
5720 &brw_vs_ubo_surfaces
,
5721 &brw_tcs_pull_constants
,
5722 &brw_tcs_ubo_surfaces
,
5723 &brw_tes_pull_constants
,
5724 &brw_tes_ubo_surfaces
,
5725 &brw_gs_pull_constants
,
5726 &brw_gs_ubo_surfaces
,
5727 &brw_wm_pull_constants
,
5728 &brw_wm_ubo_surfaces
,
5729 &gen6_renderbuffer_surfaces
,
5730 &brw_renderbuffer_read_surfaces
,
5731 &brw_texture_surfaces
,
5733 &genX(push_constant_packets
),
5735 &brw_vs_binding_table
,
5736 &brw_tcs_binding_table
,
5737 &brw_tes_binding_table
,
5738 &brw_gs_binding_table
,
5739 &brw_wm_binding_table
,
5743 &genX(tcs_samplers
),
5744 &genX(tes_samplers
),
5746 &genX(multisample_state
),
5760 &genX(scissor_state
),
5764 &genX(polygon_stipple
),
5765 &genX(polygon_stipple_offset
),
5767 &genX(line_stipple
),
5769 &genX(drawing_rect
),
5771 &brw_indices
, /* must come before brw_vertices */
5772 &genX(index_buffer
),
5780 static const struct brw_tracked_state
*render_atoms
[] =
5782 &genX(vf_statistics
),
5785 &genX(sf_clip_viewport
),
5788 &gen7_push_constant_space
,
5791 &genX(color_calc_state
),
5793 &brw_vs_image_surfaces
, /* Before vs push/pull constants and binding table */
5794 &brw_tcs_image_surfaces
, /* Before tcs push/pull constants and binding table */
5795 &brw_tes_image_surfaces
, /* Before tes push/pull constants and binding table */
5796 &brw_gs_image_surfaces
, /* Before gs push/pull constants and binding table */
5797 &brw_wm_image_surfaces
, /* Before wm push/pull constants and binding table */
5799 &genX(vs_push_constants
), /* Before vs_state */
5800 &genX(tcs_push_constants
),
5801 &genX(tes_push_constants
),
5802 &genX(gs_push_constants
), /* Before gs_state */
5803 &genX(wm_push_constants
), /* Before wm_surfaces and constant_buffer */
5805 /* Surface state setup. Must come before the VS/WM unit. The binding
5806 * table upload must be last.
5808 &brw_vs_pull_constants
,
5809 &brw_vs_ubo_surfaces
,
5810 &brw_tcs_pull_constants
,
5811 &brw_tcs_ubo_surfaces
,
5812 &brw_tes_pull_constants
,
5813 &brw_tes_ubo_surfaces
,
5814 &brw_gs_pull_constants
,
5815 &brw_gs_ubo_surfaces
,
5816 &brw_wm_pull_constants
,
5817 &brw_wm_ubo_surfaces
,
5818 &gen6_renderbuffer_surfaces
,
5819 &brw_renderbuffer_read_surfaces
,
5820 &brw_texture_surfaces
,
5822 &genX(push_constant_packets
),
5824 &brw_vs_binding_table
,
5825 &brw_tcs_binding_table
,
5826 &brw_tes_binding_table
,
5827 &brw_gs_binding_table
,
5828 &brw_wm_binding_table
,
5832 &genX(tcs_samplers
),
5833 &genX(tes_samplers
),
5835 &genX(multisample_state
),
5844 &genX(raster_state
),
5850 &genX(depth_stencil_state
),
5853 &genX(scissor_state
),
5857 &genX(polygon_stipple
),
5858 &genX(polygon_stipple_offset
),
5860 &genX(line_stipple
),
5862 &genX(drawing_rect
),
5867 &genX(index_buffer
),
5875 STATIC_ASSERT(ARRAY_SIZE(render_atoms
) <= ARRAY_SIZE(brw
->render_atoms
));
5876 brw_copy_pipeline_atoms(brw
, BRW_RENDER_PIPELINE
,
5877 render_atoms
, ARRAY_SIZE(render_atoms
));
5880 static const struct brw_tracked_state
*compute_atoms
[] =
5883 &brw_cs_image_surfaces
,
5884 &genX(cs_push_constants
),
5885 &genX(cs_pull_constants
),
5886 &brw_cs_ubo_surfaces
,
5887 &brw_cs_texture_surfaces
,
5888 &brw_cs_work_groups_surface
,
5893 STATIC_ASSERT(ARRAY_SIZE(compute_atoms
) <= ARRAY_SIZE(brw
->compute_atoms
));
5894 brw_copy_pipeline_atoms(brw
, BRW_COMPUTE_PIPELINE
,
5895 compute_atoms
, ARRAY_SIZE(compute_atoms
));
5897 brw
->vtbl
.emit_mi_report_perf_count
= genX(emit_mi_report_perf_count
);
5898 brw
->vtbl
.emit_compute_walker
= genX(emit_gpgpu_walker
);