2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "main/bufferobj.h"
25 #include "main/context.h"
26 #include "main/enums.h"
27 #include "main/macros.h"
30 #include "brw_defines.h"
31 #include "brw_context.h"
32 #include "brw_state.h"
34 #include "intel_batchbuffer.h"
35 #include "intel_buffer_objects.h"
38 is_passthru_format(uint32_t format
)
41 case BRW_SURFACEFORMAT_R64_PASSTHRU
:
42 case BRW_SURFACEFORMAT_R64G64_PASSTHRU
:
43 case BRW_SURFACEFORMAT_R64G64B64_PASSTHRU
:
44 case BRW_SURFACEFORMAT_R64G64B64A64_PASSTHRU
:
52 gen8_emit_vertices(struct brw_context
*brw
)
54 struct gl_context
*ctx
= &brw
->ctx
;
57 brw_prepare_vertices(brw
);
58 brw_prepare_shader_draw_parameters(brw
);
60 uses_edge_flag
= (ctx
->Polygon
.FrontMode
!= GL_FILL
||
61 ctx
->Polygon
.BackMode
!= GL_FILL
);
63 if (brw
->vs
.prog_data
->uses_vertexid
|| brw
->vs
.prog_data
->uses_instanceid
) {
64 unsigned vue
= brw
->vb
.nr_enabled
;
66 /* The element for the edge flags must always be last, so we have to
67 * insert the SGVS before it in that case.
75 "Trying to insert VID/IID past 33rd vertex element, "
76 "need to reorder the vertex attrbutes.");
79 if (brw
->vs
.prog_data
->uses_vertexid
) {
80 dw1
|= GEN8_SGVS_ENABLE_VERTEX_ID
|
81 (2 << GEN8_SGVS_VERTEX_ID_COMPONENT_SHIFT
) | /* .z channel */
82 (vue
<< GEN8_SGVS_VERTEX_ID_ELEMENT_OFFSET_SHIFT
);
85 if (brw
->vs
.prog_data
->uses_instanceid
) {
86 dw1
|= GEN8_SGVS_ENABLE_INSTANCE_ID
|
87 (3 << GEN8_SGVS_INSTANCE_ID_COMPONENT_SHIFT
) | /* .w channel */
88 (vue
<< GEN8_SGVS_INSTANCE_ID_ELEMENT_OFFSET_SHIFT
);
92 OUT_BATCH(_3DSTATE_VF_SGVS
<< 16 | (2 - 2));
97 OUT_BATCH(_3DSTATE_VF_INSTANCING
<< 16 | (3 - 2));
98 OUT_BATCH(vue
| GEN8_VF_INSTANCING_ENABLE
);
103 OUT_BATCH(_3DSTATE_VF_SGVS
<< 16 | (2 - 2));
108 /* If the VS doesn't read any inputs (calculating vertex position from
109 * a state variable for some reason, for example), emit a single pad
110 * VERTEX_ELEMENT struct and bail.
112 * The stale VB state stays in place, but they don't do anything unless
113 * a VE loads from them.
115 if (brw
->vb
.nr_enabled
== 0) {
117 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS
<< 16) | (3 - 2));
118 OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT
) |
120 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
<< BRW_VE0_FORMAT_SHIFT
) |
121 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
122 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
123 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
124 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
125 (BRW_VE1_COMPONENT_STORE_1_FLT
<< BRW_VE1_COMPONENT_3_SHIFT
));
130 /* Now emit 3DSTATE_VERTEX_BUFFERS and 3DSTATE_VERTEX_ELEMENTS packets. */
131 const bool uses_draw_params
=
132 brw
->vs
.prog_data
->uses_basevertex
||
133 brw
->vs
.prog_data
->uses_baseinstance
;
134 const unsigned nr_buffers
= brw
->vb
.nr_buffers
+
135 uses_draw_params
+ brw
->vs
.prog_data
->uses_drawid
;
138 assert(nr_buffers
<= 33);
140 BEGIN_BATCH(1 + 4 * nr_buffers
);
141 OUT_BATCH((_3DSTATE_VERTEX_BUFFERS
<< 16) | (4 * nr_buffers
- 1));
142 for (unsigned i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
143 const struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[i
];
144 EMIT_VERTEX_BUFFER_STATE(brw
, i
, buffer
->bo
,
146 buffer
->offset
+ buffer
->size
,
147 buffer
->stride
, 0 /* unused */);
150 if (uses_draw_params
) {
151 EMIT_VERTEX_BUFFER_STATE(brw
, brw
->vb
.nr_buffers
,
152 brw
->draw
.draw_params_bo
,
153 brw
->draw
.draw_params_offset
,
154 brw
->draw
.draw_params_bo
->size
,
159 if (brw
->vs
.prog_data
->uses_drawid
) {
160 EMIT_VERTEX_BUFFER_STATE(brw
, brw
->vb
.nr_buffers
+ 1,
161 brw
->draw
.draw_id_bo
,
162 brw
->draw
.draw_id_offset
,
163 brw
->draw
.draw_id_bo
->size
,
170 /* Normally we don't need an element for the SGVS attribute because the
171 * 3DSTATE_VF_SGVS instruction lets you store the generated attribute in an
172 * element that is past the list in 3DSTATE_VERTEX_ELEMENTS. However if
173 * we're using draw parameters then we need an element for the those
174 * values. Additionally if there is an edge flag element then the SGVS
175 * can't be inserted past that so we need a dummy element to ensure that
176 * the edge flag is the last one.
178 const bool needs_sgvs_element
= (brw
->vs
.prog_data
->uses_basevertex
||
179 brw
->vs
.prog_data
->uses_baseinstance
||
180 ((brw
->vs
.prog_data
->uses_instanceid
||
181 brw
->vs
.prog_data
->uses_vertexid
) &&
183 const unsigned nr_elements
=
184 brw
->vb
.nr_enabled
+ needs_sgvs_element
+ brw
->vs
.prog_data
->uses_drawid
;
186 /* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS,
187 * presumably for VertexID/InstanceID.
189 assert(nr_elements
<= 34);
191 struct brw_vertex_element
*gen6_edgeflag_input
= NULL
;
193 BEGIN_BATCH(1 + nr_elements
* 2);
194 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS
<< 16) | (2 * nr_elements
- 1));
195 for (unsigned i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
196 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
197 uint32_t format
= brw_get_vertex_surface_type(brw
, input
->glarray
);
198 uint32_t comp0
= BRW_VE1_COMPONENT_STORE_SRC
;
199 uint32_t comp1
= BRW_VE1_COMPONENT_STORE_SRC
;
200 uint32_t comp2
= BRW_VE1_COMPONENT_STORE_SRC
;
201 uint32_t comp3
= BRW_VE1_COMPONENT_STORE_SRC
;
203 /* From the BDW PRM, Volume 2d, page 588 (VERTEX_ELEMENT_STATE):
204 * "Any SourceElementFormat of *64*_PASSTHRU cannot be used with an
205 * element which has edge flag enabled."
207 assert(!(is_passthru_format(format
) && uses_edge_flag
));
209 /* The gen4 driver expects edgeflag to come in as a float, and passes
210 * that float on to the tests in the clipper. Mesa's current vertex
211 * attribute value for EdgeFlag is stored as a float, which works out.
212 * glEdgeFlagPointer, on the other hand, gives us an unnormalized
213 * integer ubyte. Just rewrite that to convert to a float.
215 if (input
== &brw
->vb
.inputs
[VERT_ATTRIB_EDGEFLAG
]) {
216 /* Gen6+ passes edgeflag as sideband along with the vertex, instead
217 * of in the VUE. We have to upload it sideband as the last vertex
218 * element according to the B-Spec.
220 gen6_edgeflag_input
= input
;
224 switch (input
->glarray
->Size
) {
225 case 0: comp0
= BRW_VE1_COMPONENT_STORE_0
;
226 case 1: comp1
= BRW_VE1_COMPONENT_STORE_0
;
227 case 2: comp2
= BRW_VE1_COMPONENT_STORE_0
;
228 case 3: comp3
= input
->glarray
->Integer
? BRW_VE1_COMPONENT_STORE_1_INT
229 : BRW_VE1_COMPONENT_STORE_1_FLT
;
233 /* From the BDW PRM, Volume 2d, page 586 (VERTEX_ELEMENT_STATE):
235 * "When SourceElementFormat is set to one of the *64*_PASSTHRU
236 * formats, 64-bit components are stored in the URB without any
237 * conversion. In this case, vertex elements must be written as 128
238 * or 256 bits, with VFCOMP_STORE_0 being used to pad the output
239 * as required. E.g., if R64_PASSTHRU is used to copy a 64-bit Red
240 * component into the URB, Component 1 must be specified as
241 * VFCOMP_STORE_0 (with Components 2,3 set to VFCOMP_NOSTORE)
242 * in order to output a 128-bit vertex element, or Components 1-3 must
243 * be specified as VFCOMP_STORE_0 in order to output a 256-bit vertex
244 * element. Likewise, use of R64G64B64_PASSTHRU requires Component 3
245 * to be specified as VFCOMP_STORE_0 in order to output a 256-bit vertex
248 if (input
->glarray
->Doubles
) {
249 switch (input
->glarray
->Size
) {
253 /* Use 128-bits instead of 256-bits to write double and dvec2
256 comp2
= BRW_VE1_COMPONENT_NOSTORE
;
257 comp3
= BRW_VE1_COMPONENT_NOSTORE
;
260 /* Pad the output using VFCOMP_STORE_0 as suggested
263 comp3
= BRW_VE1_COMPONENT_STORE_0
;
268 OUT_BATCH((input
->buffer
<< GEN6_VE0_INDEX_SHIFT
) |
270 (format
<< BRW_VE0_FORMAT_SHIFT
) |
271 (input
->offset
<< BRW_VE0_SRC_OFFSET_SHIFT
));
273 OUT_BATCH((comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
274 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
275 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
276 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
));
279 if (needs_sgvs_element
) {
280 if (brw
->vs
.prog_data
->uses_basevertex
||
281 brw
->vs
.prog_data
->uses_baseinstance
) {
282 OUT_BATCH(GEN6_VE0_VALID
|
283 brw
->vb
.nr_buffers
<< GEN6_VE0_INDEX_SHIFT
|
284 BRW_SURFACEFORMAT_R32G32_UINT
<< BRW_VE0_FORMAT_SHIFT
);
285 OUT_BATCH((BRW_VE1_COMPONENT_STORE_SRC
<< BRW_VE1_COMPONENT_0_SHIFT
) |
286 (BRW_VE1_COMPONENT_STORE_SRC
<< BRW_VE1_COMPONENT_1_SHIFT
) |
287 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
288 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_3_SHIFT
));
290 OUT_BATCH(GEN6_VE0_VALID
);
291 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
292 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
293 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
294 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_3_SHIFT
));
298 if (brw
->vs
.prog_data
->uses_drawid
) {
299 OUT_BATCH(GEN6_VE0_VALID
|
300 ((brw
->vb
.nr_buffers
+ 1) << GEN6_VE0_INDEX_SHIFT
) |
301 (BRW_SURFACEFORMAT_R32_UINT
<< BRW_VE0_FORMAT_SHIFT
));
302 OUT_BATCH((BRW_VE1_COMPONENT_STORE_SRC
<< BRW_VE1_COMPONENT_0_SHIFT
) |
303 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
304 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
305 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_3_SHIFT
));
308 if (gen6_edgeflag_input
) {
310 brw_get_vertex_surface_type(brw
, gen6_edgeflag_input
->glarray
);
312 OUT_BATCH((gen6_edgeflag_input
->buffer
<< GEN6_VE0_INDEX_SHIFT
) |
314 GEN6_VE0_EDGE_FLAG_ENABLE
|
315 (format
<< BRW_VE0_FORMAT_SHIFT
) |
316 (gen6_edgeflag_input
->offset
<< BRW_VE0_SRC_OFFSET_SHIFT
));
317 OUT_BATCH((BRW_VE1_COMPONENT_STORE_SRC
<< BRW_VE1_COMPONENT_0_SHIFT
) |
318 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
319 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
320 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_3_SHIFT
));
324 for (unsigned i
= 0, j
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
325 const struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
326 const struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[input
->buffer
];
327 unsigned element_index
;
329 /* The edge flag element is reordered to be the last one in the code
330 * above so we need to compensate for that in the element indices used
333 if (input
== gen6_edgeflag_input
)
334 element_index
= nr_elements
- 1;
339 OUT_BATCH(_3DSTATE_VF_INSTANCING
<< 16 | (3 - 2));
340 OUT_BATCH(element_index
|
341 (buffer
->step_rate
? GEN8_VF_INSTANCING_ENABLE
: 0));
342 OUT_BATCH(buffer
->step_rate
);
346 if (brw
->vs
.prog_data
->uses_drawid
) {
347 const unsigned element
= brw
->vb
.nr_enabled
+ needs_sgvs_element
;
349 OUT_BATCH(_3DSTATE_VF_INSTANCING
<< 16 | (3 - 2));
356 const struct brw_tracked_state gen8_vertices
= {
358 .mesa
= _NEW_POLYGON
,
359 .brw
= BRW_NEW_BATCH
|
362 BRW_NEW_VS_PROG_DATA
,
364 .emit
= gen8_emit_vertices
,
368 gen8_emit_index_buffer(struct brw_context
*brw
)
370 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
371 uint32_t mocs_wb
= brw
->gen
>= 9 ? SKL_MOCS_WB
: BDW_MOCS_WB
;
373 if (index_buffer
== NULL
)
377 OUT_BATCH(CMD_INDEX_BUFFER
<< 16 | (5 - 2));
378 OUT_BATCH(brw_get_index_type(index_buffer
->type
) | mocs_wb
);
379 OUT_RELOC64(brw
->ib
.bo
, I915_GEM_DOMAIN_VERTEX
, 0, 0);
380 OUT_BATCH(brw
->ib
.size
);
384 const struct brw_tracked_state gen8_index_buffer
= {
387 .brw
= BRW_NEW_BATCH
|
389 BRW_NEW_INDEX_BUFFER
,
391 .emit
= gen8_emit_index_buffer
,
395 gen8_emit_vf_topology(struct brw_context
*brw
)
398 OUT_BATCH(_3DSTATE_VF_TOPOLOGY
<< 16 | (2 - 2));
399 OUT_BATCH(brw
->primitive
);
403 const struct brw_tracked_state gen8_vf_topology
= {
406 .brw
= BRW_NEW_BLORP
|
409 .emit
= gen8_emit_vf_topology
,