2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "main/bufferobj.h"
25 #include "main/context.h"
26 #include "main/enums.h"
27 #include "main/macros.h"
30 #include "brw_defines.h"
31 #include "brw_context.h"
32 #include "brw_state.h"
34 #include "intel_batchbuffer.h"
35 #include "intel_buffer_objects.h"
39 is_passthru_format(uint32_t format
)
42 case BRW_SURFACEFORMAT_R64_PASSTHRU
:
43 case BRW_SURFACEFORMAT_R64G64_PASSTHRU
:
44 case BRW_SURFACEFORMAT_R64G64B64_PASSTHRU
:
45 case BRW_SURFACEFORMAT_R64G64B64A64_PASSTHRU
:
54 gen8_emit_vertices(struct brw_context
*brw
)
56 struct gl_context
*ctx
= &brw
->ctx
;
59 brw_prepare_vertices(brw
);
60 brw_prepare_shader_draw_parameters(brw
);
62 uses_edge_flag
= (ctx
->Polygon
.FrontMode
!= GL_FILL
||
63 ctx
->Polygon
.BackMode
!= GL_FILL
);
65 const struct brw_vs_prog_data
*vs_prog_data
=
66 brw_vs_prog_data(brw
->vs
.base
.prog_data
);
68 if (vs_prog_data
->uses_vertexid
|| vs_prog_data
->uses_instanceid
) {
69 unsigned vue
= brw
->vb
.nr_enabled
;
71 /* The element for the edge flags must always be last, so we have to
72 * insert the SGVS before it in that case.
80 "Trying to insert VID/IID past 33rd vertex element, "
81 "need to reorder the vertex attrbutes.");
84 if (vs_prog_data
->uses_vertexid
) {
85 dw1
|= GEN8_SGVS_ENABLE_VERTEX_ID
|
86 (2 << GEN8_SGVS_VERTEX_ID_COMPONENT_SHIFT
) | /* .z channel */
87 (vue
<< GEN8_SGVS_VERTEX_ID_ELEMENT_OFFSET_SHIFT
);
90 if (vs_prog_data
->uses_instanceid
) {
91 dw1
|= GEN8_SGVS_ENABLE_INSTANCE_ID
|
92 (3 << GEN8_SGVS_INSTANCE_ID_COMPONENT_SHIFT
) | /* .w channel */
93 (vue
<< GEN8_SGVS_INSTANCE_ID_ELEMENT_OFFSET_SHIFT
);
97 OUT_BATCH(_3DSTATE_VF_SGVS
<< 16 | (2 - 2));
102 OUT_BATCH(_3DSTATE_VF_INSTANCING
<< 16 | (3 - 2));
103 OUT_BATCH(vue
| GEN8_VF_INSTANCING_ENABLE
);
108 OUT_BATCH(_3DSTATE_VF_SGVS
<< 16 | (2 - 2));
113 /* If the VS doesn't read any inputs (calculating vertex position from
114 * a state variable for some reason, for example), emit a single pad
115 * VERTEX_ELEMENT struct and bail.
117 * The stale VB state stays in place, but they don't do anything unless
118 * a VE loads from them.
120 if (brw
->vb
.nr_enabled
== 0) {
122 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS
<< 16) | (3 - 2));
123 OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT
) |
125 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
<< BRW_VE0_FORMAT_SHIFT
) |
126 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
127 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
128 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
129 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
130 (BRW_VE1_COMPONENT_STORE_1_FLT
<< BRW_VE1_COMPONENT_3_SHIFT
));
135 /* Now emit 3DSTATE_VERTEX_BUFFERS and 3DSTATE_VERTEX_ELEMENTS packets. */
136 const bool uses_draw_params
=
137 vs_prog_data
->uses_basevertex
||
138 vs_prog_data
->uses_baseinstance
;
139 const unsigned nr_buffers
= brw
->vb
.nr_buffers
+
140 uses_draw_params
+ vs_prog_data
->uses_drawid
;
143 assert(nr_buffers
<= 33);
145 BEGIN_BATCH(1 + 4 * nr_buffers
);
146 OUT_BATCH((_3DSTATE_VERTEX_BUFFERS
<< 16) | (4 * nr_buffers
- 1));
147 for (unsigned i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
148 const struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[i
];
149 EMIT_VERTEX_BUFFER_STATE(brw
, i
, buffer
->bo
,
151 buffer
->offset
+ buffer
->size
,
152 buffer
->stride
, 0 /* unused */);
155 if (uses_draw_params
) {
156 EMIT_VERTEX_BUFFER_STATE(brw
, brw
->vb
.nr_buffers
,
157 brw
->draw
.draw_params_bo
,
158 brw
->draw
.draw_params_offset
,
159 brw
->draw
.draw_params_bo
->size
,
164 if (vs_prog_data
->uses_drawid
) {
165 EMIT_VERTEX_BUFFER_STATE(brw
, brw
->vb
.nr_buffers
+ 1,
166 brw
->draw
.draw_id_bo
,
167 brw
->draw
.draw_id_offset
,
168 brw
->draw
.draw_id_bo
->size
,
175 /* Normally we don't need an element for the SGVS attribute because the
176 * 3DSTATE_VF_SGVS instruction lets you store the generated attribute in an
177 * element that is past the list in 3DSTATE_VERTEX_ELEMENTS. However if
178 * we're using draw parameters then we need an element for the those
179 * values. Additionally if there is an edge flag element then the SGVS
180 * can't be inserted past that so we need a dummy element to ensure that
181 * the edge flag is the last one.
183 const bool needs_sgvs_element
= (vs_prog_data
->uses_basevertex
||
184 vs_prog_data
->uses_baseinstance
||
185 ((vs_prog_data
->uses_instanceid
||
186 vs_prog_data
->uses_vertexid
) &&
188 const unsigned nr_elements
=
189 brw
->vb
.nr_enabled
+ needs_sgvs_element
+ vs_prog_data
->uses_drawid
;
191 /* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS,
192 * presumably for VertexID/InstanceID.
194 assert(nr_elements
<= 34);
196 struct brw_vertex_element
*gen6_edgeflag_input
= NULL
;
198 BEGIN_BATCH(1 + nr_elements
* 2);
199 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS
<< 16) | (2 * nr_elements
- 1));
200 for (unsigned i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
201 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
202 uint32_t format
= brw_get_vertex_surface_type(brw
, input
->glarray
);
203 uint32_t comp0
= BRW_VE1_COMPONENT_STORE_SRC
;
204 uint32_t comp1
= BRW_VE1_COMPONENT_STORE_SRC
;
205 uint32_t comp2
= BRW_VE1_COMPONENT_STORE_SRC
;
206 uint32_t comp3
= BRW_VE1_COMPONENT_STORE_SRC
;
208 /* From the BDW PRM, Volume 2d, page 588 (VERTEX_ELEMENT_STATE):
209 * "Any SourceElementFormat of *64*_PASSTHRU cannot be used with an
210 * element which has edge flag enabled."
212 assert(!(is_passthru_format(format
) && uses_edge_flag
));
214 /* The gen4 driver expects edgeflag to come in as a float, and passes
215 * that float on to the tests in the clipper. Mesa's current vertex
216 * attribute value for EdgeFlag is stored as a float, which works out.
217 * glEdgeFlagPointer, on the other hand, gives us an unnormalized
218 * integer ubyte. Just rewrite that to convert to a float.
220 if (input
== &brw
->vb
.inputs
[VERT_ATTRIB_EDGEFLAG
]) {
221 /* Gen6+ passes edgeflag as sideband along with the vertex, instead
222 * of in the VUE. We have to upload it sideband as the last vertex
223 * element according to the B-Spec.
225 gen6_edgeflag_input
= input
;
229 switch (input
->glarray
->Size
) {
230 case 0: comp0
= BRW_VE1_COMPONENT_STORE_0
;
231 case 1: comp1
= BRW_VE1_COMPONENT_STORE_0
;
232 case 2: comp2
= BRW_VE1_COMPONENT_STORE_0
;
233 case 3: comp3
= input
->glarray
->Integer
? BRW_VE1_COMPONENT_STORE_1_INT
234 : BRW_VE1_COMPONENT_STORE_1_FLT
;
238 /* From the BDW PRM, Volume 2d, page 586 (VERTEX_ELEMENT_STATE):
240 * "When SourceElementFormat is set to one of the *64*_PASSTHRU
241 * formats, 64-bit components are stored in the URB without any
242 * conversion. In this case, vertex elements must be written as 128
243 * or 256 bits, with VFCOMP_STORE_0 being used to pad the output
244 * as required. E.g., if R64_PASSTHRU is used to copy a 64-bit Red
245 * component into the URB, Component 1 must be specified as
246 * VFCOMP_STORE_0 (with Components 2,3 set to VFCOMP_NOSTORE)
247 * in order to output a 128-bit vertex element, or Components 1-3 must
248 * be specified as VFCOMP_STORE_0 in order to output a 256-bit vertex
249 * element. Likewise, use of R64G64B64_PASSTHRU requires Component 3
250 * to be specified as VFCOMP_STORE_0 in order to output a 256-bit vertex
253 if (input
->glarray
->Doubles
) {
254 switch (input
->glarray
->Size
) {
258 /* Use 128-bits instead of 256-bits to write double and dvec2
261 comp2
= BRW_VE1_COMPONENT_NOSTORE
;
262 comp3
= BRW_VE1_COMPONENT_NOSTORE
;
265 /* Pad the output using VFCOMP_STORE_0 as suggested
268 comp3
= BRW_VE1_COMPONENT_STORE_0
;
273 OUT_BATCH((input
->buffer
<< GEN6_VE0_INDEX_SHIFT
) |
275 (format
<< BRW_VE0_FORMAT_SHIFT
) |
276 (input
->offset
<< BRW_VE0_SRC_OFFSET_SHIFT
));
278 OUT_BATCH((comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
279 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
280 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
281 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
));
284 if (needs_sgvs_element
) {
285 if (vs_prog_data
->uses_basevertex
||
286 vs_prog_data
->uses_baseinstance
) {
287 OUT_BATCH(GEN6_VE0_VALID
|
288 brw
->vb
.nr_buffers
<< GEN6_VE0_INDEX_SHIFT
|
289 BRW_SURFACEFORMAT_R32G32_UINT
<< BRW_VE0_FORMAT_SHIFT
);
290 OUT_BATCH((BRW_VE1_COMPONENT_STORE_SRC
<< BRW_VE1_COMPONENT_0_SHIFT
) |
291 (BRW_VE1_COMPONENT_STORE_SRC
<< BRW_VE1_COMPONENT_1_SHIFT
) |
292 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
293 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_3_SHIFT
));
295 OUT_BATCH(GEN6_VE0_VALID
);
296 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
297 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
298 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
299 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_3_SHIFT
));
303 if (vs_prog_data
->uses_drawid
) {
304 OUT_BATCH(GEN6_VE0_VALID
|
305 ((brw
->vb
.nr_buffers
+ 1) << GEN6_VE0_INDEX_SHIFT
) |
306 (BRW_SURFACEFORMAT_R32_UINT
<< BRW_VE0_FORMAT_SHIFT
));
307 OUT_BATCH((BRW_VE1_COMPONENT_STORE_SRC
<< BRW_VE1_COMPONENT_0_SHIFT
) |
308 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
309 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
310 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_3_SHIFT
));
313 if (gen6_edgeflag_input
) {
315 brw_get_vertex_surface_type(brw
, gen6_edgeflag_input
->glarray
);
317 OUT_BATCH((gen6_edgeflag_input
->buffer
<< GEN6_VE0_INDEX_SHIFT
) |
319 GEN6_VE0_EDGE_FLAG_ENABLE
|
320 (format
<< BRW_VE0_FORMAT_SHIFT
) |
321 (gen6_edgeflag_input
->offset
<< BRW_VE0_SRC_OFFSET_SHIFT
));
322 OUT_BATCH((BRW_VE1_COMPONENT_STORE_SRC
<< BRW_VE1_COMPONENT_0_SHIFT
) |
323 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
324 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
325 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_3_SHIFT
));
329 for (unsigned i
= 0, j
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
330 const struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
331 const struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[input
->buffer
];
332 unsigned element_index
;
334 /* The edge flag element is reordered to be the last one in the code
335 * above so we need to compensate for that in the element indices used
338 if (input
== gen6_edgeflag_input
)
339 element_index
= nr_elements
- 1;
344 OUT_BATCH(_3DSTATE_VF_INSTANCING
<< 16 | (3 - 2));
345 OUT_BATCH(element_index
|
346 (buffer
->step_rate
? GEN8_VF_INSTANCING_ENABLE
: 0));
347 OUT_BATCH(buffer
->step_rate
);
351 if (vs_prog_data
->uses_drawid
) {
352 const unsigned element
= brw
->vb
.nr_enabled
+ needs_sgvs_element
;
354 OUT_BATCH(_3DSTATE_VF_INSTANCING
<< 16 | (3 - 2));
361 const struct brw_tracked_state gen8_vertices
= {
363 .mesa
= _NEW_POLYGON
,
364 .brw
= BRW_NEW_BATCH
|
367 BRW_NEW_VS_PROG_DATA
,
369 .emit
= gen8_emit_vertices
,
373 gen8_emit_index_buffer(struct brw_context
*brw
)
375 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
376 uint32_t mocs_wb
= brw
->gen
>= 9 ? SKL_MOCS_WB
: BDW_MOCS_WB
;
378 if (index_buffer
== NULL
)
382 OUT_BATCH(CMD_INDEX_BUFFER
<< 16 | (5 - 2));
383 OUT_BATCH(brw_get_index_type(index_buffer
->type
) | mocs_wb
);
384 OUT_RELOC64(brw
->ib
.bo
, I915_GEM_DOMAIN_VERTEX
, 0, 0);
385 OUT_BATCH(brw
->ib
.size
);
389 const struct brw_tracked_state gen8_index_buffer
= {
392 .brw
= BRW_NEW_BATCH
|
394 BRW_NEW_INDEX_BUFFER
,
396 .emit
= gen8_emit_index_buffer
,
400 gen8_emit_vf_topology(struct brw_context
*brw
)
403 OUT_BATCH(_3DSTATE_VF_TOPOLOGY
<< 16 | (2 - 2));
404 OUT_BATCH(brw
->primitive
);
408 const struct brw_tracked_state gen8_vf_topology
= {
411 .brw
= BRW_NEW_BLORP
|
414 .emit
= gen8_emit_vf_topology
,