2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keith@tungstengraphics.com>
33 #include "main/compiler.h"
34 #include "brw_context.h"
37 #include "brw_state.h"
38 #include "program/prog_print.h"
39 #include "program/prog_parameter.h"
41 #include "glsl/ralloc.h"
43 static inline void assign_vue_slot(struct brw_vue_map
*vue_map
,
46 /* Make sure this varying hasn't been assigned a slot already */
47 assert (vue_map
->varying_to_slot
[varying
] == -1);
49 vue_map
->varying_to_slot
[varying
] = vue_map
->num_slots
;
50 vue_map
->slot_to_varying
[vue_map
->num_slots
++] = varying
;
54 * Compute the VUE map for vertex shader program.
56 * Note that consumers of this map using cache keys must include
57 * prog_data->userclip and prog_data->outputs_written in their key
58 * (generated by CACHE_NEW_VS_PROG).
61 brw_compute_vue_map(struct brw_context
*brw
, struct brw_vue_map
*vue_map
,
62 GLbitfield64 slots_valid
, bool userclip_active
)
64 const struct intel_context
*intel
= &brw
->intel
;
66 /* Prior to Gen6, don't assign a slot for VARYING_SLOT_CLIP_VERTEX, since
70 slots_valid
&= ~VARYING_BIT_CLIP_VERTEX
;
72 vue_map
->slots_valid
= slots_valid
;
75 /* Make sure that the values we store in vue_map->varying_to_slot and
76 * vue_map->slot_to_varying won't overflow the signed chars that are used
77 * to store them. Note that since vue_map->slot_to_varying sometimes holds
78 * values equal to BRW_VARYING_SLOT_COUNT, we need to ensure that
79 * BRW_VARYING_SLOT_COUNT is <= 127, not 128.
81 STATIC_ASSERT(BRW_VARYING_SLOT_COUNT
<= 127);
83 vue_map
->num_slots
= 0;
84 for (i
= 0; i
< BRW_VARYING_SLOT_COUNT
; ++i
) {
85 vue_map
->varying_to_slot
[i
] = -1;
86 vue_map
->slot_to_varying
[i
] = BRW_VARYING_SLOT_COUNT
;
89 /* VUE header: format depends on chip generation and whether clipping is
94 /* There are 8 dwords in VUE header pre-Ironlake:
95 * dword 0-3 is indices, point width, clip flags.
96 * dword 4-7 is ndc position
97 * dword 8-11 is the first vertex data.
99 assign_vue_slot(vue_map
, VARYING_SLOT_PSIZ
);
100 assign_vue_slot(vue_map
, BRW_VARYING_SLOT_NDC
);
101 assign_vue_slot(vue_map
, VARYING_SLOT_POS
);
104 /* There are 20 DWs (D0-D19) in VUE header on Ironlake:
105 * dword 0-3 of the header is indices, point width, clip flags.
106 * dword 4-7 is the ndc position
107 * dword 8-11 of the vertex header is the 4D space position
108 * dword 12-19 of the vertex header is the user clip distance.
109 * dword 20-23 is a pad so that the vertex element data is aligned
110 * dword 24-27 is the first vertex data we fill.
112 * Note: future pipeline stages expect 4D space position to be
113 * contiguous with the other varyings, so we make dword 24-27 a
114 * duplicate copy of the 4D space position.
116 assign_vue_slot(vue_map
, VARYING_SLOT_PSIZ
);
117 assign_vue_slot(vue_map
, BRW_VARYING_SLOT_NDC
);
118 assign_vue_slot(vue_map
, BRW_VARYING_SLOT_POS_DUPLICATE
);
119 assign_vue_slot(vue_map
, VARYING_SLOT_CLIP_DIST0
);
120 assign_vue_slot(vue_map
, VARYING_SLOT_CLIP_DIST1
);
121 assign_vue_slot(vue_map
, BRW_VARYING_SLOT_PAD
);
122 assign_vue_slot(vue_map
, VARYING_SLOT_POS
);
126 /* There are 8 or 16 DWs (D0-D15) in VUE header on Sandybridge:
127 * dword 0-3 of the header is indices, point width, clip flags.
128 * dword 4-7 is the 4D space position
129 * dword 8-15 of the vertex header is the user clip distance if
131 * dword 8-11 or 16-19 is the first vertex element data we fill.
133 assign_vue_slot(vue_map
, VARYING_SLOT_PSIZ
);
134 assign_vue_slot(vue_map
, VARYING_SLOT_POS
);
135 if (userclip_active
) {
136 assign_vue_slot(vue_map
, VARYING_SLOT_CLIP_DIST0
);
137 assign_vue_slot(vue_map
, VARYING_SLOT_CLIP_DIST1
);
139 /* front and back colors need to be consecutive so that we can use
140 * ATTRIBUTE_SWIZZLE_INPUTATTR_FACING to swizzle them when doing
143 if (slots_valid
& BITFIELD64_BIT(VARYING_SLOT_COL0
))
144 assign_vue_slot(vue_map
, VARYING_SLOT_COL0
);
145 if (slots_valid
& BITFIELD64_BIT(VARYING_SLOT_BFC0
))
146 assign_vue_slot(vue_map
, VARYING_SLOT_BFC0
);
147 if (slots_valid
& BITFIELD64_BIT(VARYING_SLOT_COL1
))
148 assign_vue_slot(vue_map
, VARYING_SLOT_COL1
);
149 if (slots_valid
& BITFIELD64_BIT(VARYING_SLOT_BFC1
))
150 assign_vue_slot(vue_map
, VARYING_SLOT_BFC1
);
153 assert (!"VUE map not known for this chip generation");
157 /* The hardware doesn't care about the rest of the vertex outputs, so just
158 * assign them contiguously. Don't reassign outputs that already have a
161 * We generally don't need to assign a slot for VARYING_SLOT_CLIP_VERTEX,
162 * since it's encoded as the clip distances by emit_clip_distances().
163 * However, it may be output by transform feedback, and we'd rather not
164 * recompute state when TF changes, so we just always include it.
166 for (int i
= 0; i
< VARYING_SLOT_MAX
; ++i
) {
167 if ((slots_valid
& BITFIELD64_BIT(i
)) &&
168 vue_map
->varying_to_slot
[i
] == -1) {
169 assign_vue_slot(vue_map
, i
);
176 * Decide which set of clip planes should be used when clipping via
177 * gl_Position or gl_ClipVertex.
179 gl_clip_plane
*brw_select_clip_planes(struct gl_context
*ctx
)
181 if (ctx
->Shader
.CurrentVertexProgram
) {
182 /* There is currently a GLSL vertex shader, so clip according to GLSL
183 * rules, which means compare gl_ClipVertex (or gl_Position, if
184 * gl_ClipVertex wasn't assigned) against the eye-coordinate clip planes
185 * that were stored in EyeUserPlane at the time the clip planes were
188 return ctx
->Transform
.EyeUserPlane
;
190 /* Either we are using fixed function or an ARB vertex program. In
191 * either case the clip planes are going to be compared against
192 * gl_Position (which is in clip coordinates) so we have to clip using
193 * _ClipUserPlane, which was transformed into clip coordinates by Mesa
196 return ctx
->Transform
._ClipUserPlane
;
202 brw_vec4_prog_data_compare(const struct brw_vec4_prog_data
*a
,
203 const struct brw_vec4_prog_data
*b
)
205 /* Compare all the struct up to the pointers. */
206 if (memcmp(a
, b
, offsetof(struct brw_vec4_prog_data
, param
)))
209 if (memcmp(a
->param
, b
->param
, a
->nr_params
* sizeof(void *)))
212 if (memcmp(a
->pull_param
, b
->pull_param
, a
->nr_pull_params
* sizeof(void *)))
220 brw_vs_prog_data_compare(const void *in_a
, const void *in_b
,
221 int aux_size
, const void *in_key
)
223 const struct brw_vs_prog_data
*a
= in_a
;
224 const struct brw_vs_prog_data
*b
= in_b
;
226 /* Compare the base vec4 structure. */
227 if (!brw_vec4_prog_data_compare(&a
->base
, &b
->base
))
230 /* Compare the rest of the struct. */
231 const unsigned offset
= sizeof(struct brw_vec4_prog_data
);
232 if (memcmp(((char *) &a
) + offset
, ((char *) &b
) + offset
,
233 sizeof(struct brw_vs_prog_data
) - offset
)) {
241 do_vs_prog(struct brw_context
*brw
,
242 struct gl_shader_program
*prog
,
243 struct brw_vertex_program
*vp
,
244 struct brw_vs_prog_key
*key
)
246 struct intel_context
*intel
= &brw
->intel
;
248 const GLuint
*program
;
249 struct brw_vs_compile c
;
250 struct brw_vs_prog_data prog_data
;
253 struct gl_shader
*vs
= NULL
;
256 vs
= prog
->_LinkedShaders
[MESA_SHADER_VERTEX
];
258 memset(&c
, 0, sizeof(c
));
259 memcpy(&c
.key
, key
, sizeof(*key
));
260 memset(&prog_data
, 0, sizeof(prog_data
));
262 mem_ctx
= ralloc_context(NULL
);
266 /* Allocate the references to the uniforms that will end up in the
267 * prog_data associated with the compiled program, and which will be freed
268 * by the state cache.
272 /* We add padding around uniform values below vec4 size, with the worst
273 * case being a float value that gets blown up to a vec4, so be
276 param_count
= vs
->num_uniform_components
* 4;
279 param_count
= vp
->program
.Base
.Parameters
->NumParameters
* 4;
281 /* We also upload clip plane data as uniforms */
282 param_count
+= MAX_CLIP_PLANES
* 4;
284 prog_data
.base
.param
= rzalloc_array(NULL
, const float *, param_count
);
285 prog_data
.base
.pull_param
= rzalloc_array(NULL
, const float *, param_count
);
287 GLbitfield64 outputs_written
= vp
->program
.Base
.OutputsWritten
;
288 prog_data
.inputs_read
= vp
->program
.Base
.InputsRead
;
290 if (c
.key
.copy_edgeflag
) {
291 outputs_written
|= BITFIELD64_BIT(VARYING_SLOT_EDGE
);
292 prog_data
.inputs_read
|= VERT_BIT_EDGEFLAG
;
295 if (intel
->gen
< 6) {
296 /* Put dummy slots into the VUE for the SF to put the replaced
297 * point sprite coords in. We shouldn't need these dummy slots,
298 * which take up precious URB space, but it would mean that the SF
299 * doesn't get nice aligned pairs of input coords into output
300 * coords, which would be a pain to handle.
302 for (i
= 0; i
< 8; i
++) {
303 if (c
.key
.point_coord_replace
& (1 << i
))
304 outputs_written
|= BITFIELD64_BIT(VARYING_SLOT_TEX0
+ i
);
308 brw_compute_vue_map(brw
, &prog_data
.base
.vue_map
, outputs_written
,
309 c
.key
.base
.userclip_active
);
312 _mesa_fprint_program_opt(stdout
, &c
.vp
->program
.Base
, PROG_PRINT_DEBUG
,
318 program
= brw_vs_emit(brw
, prog
, &c
, &prog_data
, mem_ctx
, &program_size
);
319 if (program
== NULL
) {
320 ralloc_free(mem_ctx
);
324 if (prog_data
.base
.nr_pull_params
)
325 prog_data
.base
.num_surfaces
= 1;
326 if (c
.vp
->program
.Base
.SamplersUsed
)
327 prog_data
.base
.num_surfaces
= SURF_INDEX_VS_TEXTURE(BRW_MAX_TEX_UNIT
);
329 prog
->_LinkedShaders
[MESA_SHADER_VERTEX
]->NumUniformBlocks
) {
330 prog_data
.base
.num_surfaces
=
331 SURF_INDEX_VS_UBO(prog
->_LinkedShaders
[MESA_SHADER_VERTEX
]->NumUniformBlocks
);
334 /* Scratch space is used for register spilling */
335 if (c
.base
.last_scratch
) {
336 perf_debug("Vertex shader triggered register spilling. "
337 "Try reducing the number of live vec4 values to "
338 "improve performance.\n");
340 prog_data
.base
.total_scratch
341 = brw_get_scratch_size(c
.base
.last_scratch
*REG_SIZE
);
343 brw_get_scratch_bo(intel
, &brw
->vs
.scratch_bo
,
344 prog_data
.base
.total_scratch
* brw
->max_vs_threads
);
347 brw_upload_cache(&brw
->cache
, BRW_VS_PROG
,
348 &c
.key
, sizeof(c
.key
),
349 program
, program_size
,
350 &prog_data
, sizeof(prog_data
),
351 &brw
->vs
.prog_offset
, &brw
->vs
.prog_data
);
352 ralloc_free(mem_ctx
);
358 key_debug(struct intel_context
*intel
, const char *name
, int a
, int b
)
361 perf_debug(" %s %d->%d\n", name
, a
, b
);
368 brw_vs_debug_recompile(struct brw_context
*brw
,
369 struct gl_shader_program
*prog
,
370 const struct brw_vs_prog_key
*key
)
372 struct intel_context
*intel
= &brw
->intel
;
373 struct brw_cache_item
*c
= NULL
;
374 const struct brw_vs_prog_key
*old_key
= NULL
;
377 perf_debug("Recompiling vertex shader for program %d\n", prog
->Name
);
379 for (unsigned int i
= 0; i
< brw
->cache
.size
; i
++) {
380 for (c
= brw
->cache
.items
[i
]; c
; c
= c
->next
) {
381 if (c
->cache_id
== BRW_VS_PROG
) {
384 if (old_key
->base
.program_string_id
== key
->base
.program_string_id
)
393 perf_debug(" Didn't find previous compile in the shader cache for "
398 for (unsigned int i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
399 found
|= key_debug(intel
, "Vertex attrib w/a flags",
400 old_key
->gl_attrib_wa_flags
[i
],
401 key
->gl_attrib_wa_flags
[i
]);
404 found
|= key_debug(intel
, "user clip flags",
405 old_key
->base
.userclip_active
, key
->base
.userclip_active
);
407 found
|= key_debug(intel
, "user clipping planes as push constants",
408 old_key
->base
.nr_userclip_plane_consts
,
409 key
->base
.nr_userclip_plane_consts
);
411 found
|= key_debug(intel
, "clip distance enable",
412 old_key
->base
.uses_clip_distance
, key
->base
.uses_clip_distance
);
413 found
|= key_debug(intel
, "clip plane enable bitfield",
414 old_key
->base
.userclip_planes_enabled_gen_4_5
,
415 key
->base
.userclip_planes_enabled_gen_4_5
);
416 found
|= key_debug(intel
, "copy edgeflag",
417 old_key
->copy_edgeflag
, key
->copy_edgeflag
);
418 found
|= key_debug(intel
, "PointCoord replace",
419 old_key
->point_coord_replace
, key
->point_coord_replace
);
420 found
|= key_debug(intel
, "vertex color clamping",
421 old_key
->base
.clamp_vertex_color
, key
->base
.clamp_vertex_color
);
423 found
|= brw_debug_recompile_sampler_key(intel
, &old_key
->base
.tex
,
427 perf_debug(" Something else\n");
431 static void brw_upload_vs_prog(struct brw_context
*brw
)
433 struct intel_context
*intel
= &brw
->intel
;
434 struct gl_context
*ctx
= &intel
->ctx
;
435 struct brw_vs_prog_key key
;
436 /* BRW_NEW_VERTEX_PROGRAM */
437 struct brw_vertex_program
*vp
=
438 (struct brw_vertex_program
*)brw
->vertex_program
;
439 struct gl_program
*prog
= (struct gl_program
*) brw
->vertex_program
;
442 memset(&key
, 0, sizeof(key
));
444 /* Just upload the program verbatim for now. Always send it all
445 * the inputs it asks for, whether they are varying or not.
447 key
.base
.program_string_id
= vp
->id
;
448 key
.base
.userclip_active
= (ctx
->Transform
.ClipPlanesEnabled
!= 0);
449 key
.base
.uses_clip_distance
= vp
->program
.UsesClipDistance
;
450 if (key
.base
.userclip_active
&& !key
.base
.uses_clip_distance
) {
451 if (intel
->gen
< 6) {
452 key
.base
.nr_userclip_plane_consts
453 = _mesa_bitcount_64(ctx
->Transform
.ClipPlanesEnabled
);
454 key
.base
.userclip_planes_enabled_gen_4_5
455 = ctx
->Transform
.ClipPlanesEnabled
;
457 key
.base
.nr_userclip_plane_consts
458 = _mesa_logbase2(ctx
->Transform
.ClipPlanesEnabled
) + 1;
463 if (intel
->gen
< 6) {
464 key
.copy_edgeflag
= (ctx
->Polygon
.FrontMode
!= GL_FILL
||
465 ctx
->Polygon
.BackMode
!= GL_FILL
);
468 /* _NEW_LIGHT | _NEW_BUFFERS */
469 key
.base
.clamp_vertex_color
= ctx
->Light
._ClampVertexColor
;
472 if (intel
->gen
< 6 && ctx
->Point
.PointSprite
) {
473 for (i
= 0; i
< 8; i
++) {
474 if (ctx
->Point
.CoordReplace
[i
])
475 key
.point_coord_replace
|= (1 << i
);
480 brw_populate_sampler_prog_key_data(ctx
, prog
, &key
.base
.tex
);
482 /* BRW_NEW_VERTICES */
483 if (intel
->gen
< 8 && !intel
->is_haswell
) {
484 /* Prior to Haswell, the hardware can't natively support GL_FIXED or
485 * 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
487 for (i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
488 if (!(vp
->program
.Base
.InputsRead
& BITFIELD64_BIT(i
)))
491 uint8_t wa_flags
= 0;
493 switch (brw
->vb
.inputs
[i
].glarray
->Type
) {
496 wa_flags
= brw
->vb
.inputs
[i
].glarray
->Size
;
499 case GL_INT_2_10_10_10_REV
:
500 wa_flags
|= BRW_ATTRIB_WA_SIGN
;
503 case GL_UNSIGNED_INT_2_10_10_10_REV
:
504 if (brw
->vb
.inputs
[i
].glarray
->Format
== GL_BGRA
)
505 wa_flags
|= BRW_ATTRIB_WA_BGRA
;
507 if (brw
->vb
.inputs
[i
].glarray
->Normalized
)
508 wa_flags
|= BRW_ATTRIB_WA_NORMALIZE
;
509 else if (!brw
->vb
.inputs
[i
].glarray
->Integer
)
510 wa_flags
|= BRW_ATTRIB_WA_SCALE
;
515 key
.gl_attrib_wa_flags
[i
] = wa_flags
;
519 if (!brw_search_cache(&brw
->cache
, BRW_VS_PROG
,
521 &brw
->vs
.prog_offset
, &brw
->vs
.prog_data
)) {
522 bool success
= do_vs_prog(brw
, ctx
->Shader
.CurrentVertexProgram
,
527 if (memcmp(&brw
->vs
.prog_data
->base
.vue_map
, &brw
->vue_map_geom_out
,
528 sizeof(brw
->vue_map_geom_out
)) != 0) {
529 brw
->vue_map_geom_out
= brw
->vs
.prog_data
->base
.vue_map
;
530 brw
->state
.dirty
.brw
|= BRW_NEW_VUE_MAP_GEOM_OUT
;
536 const struct brw_tracked_state brw_vs_prog
= {
538 .mesa
= (_NEW_TRANSFORM
| _NEW_POLYGON
| _NEW_POINT
| _NEW_LIGHT
|
541 .brw
= (BRW_NEW_VERTEX_PROGRAM
|
545 .emit
= brw_upload_vs_prog
549 brw_vs_precompile(struct gl_context
*ctx
, struct gl_shader_program
*prog
)
551 struct brw_context
*brw
= brw_context(ctx
);
552 struct brw_vs_prog_key key
;
553 uint32_t old_prog_offset
= brw
->vs
.prog_offset
;
554 struct brw_vs_prog_data
*old_prog_data
= brw
->vs
.prog_data
;
557 if (!prog
->_LinkedShaders
[MESA_SHADER_VERTEX
])
560 struct gl_vertex_program
*vp
= (struct gl_vertex_program
*)
561 prog
->_LinkedShaders
[MESA_SHADER_VERTEX
]->Program
;
562 struct brw_vertex_program
*bvp
= brw_vertex_program(vp
);
564 memset(&key
, 0, sizeof(key
));
566 key
.base
.program_string_id
= bvp
->id
;
567 key
.base
.clamp_vertex_color
= ctx
->API
== API_OPENGL_COMPAT
;
569 for (int i
= 0; i
< MAX_SAMPLERS
; i
++) {
570 if (vp
->Base
.ShadowSamplers
& (1 << i
)) {
571 /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
572 key
.base
.tex
.swizzles
[i
] =
573 MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_ONE
);
575 /* Color sampler: assume no swizzling. */
576 key
.base
.tex
.swizzles
[i
] = SWIZZLE_XYZW
;
580 success
= do_vs_prog(brw
, prog
, bvp
, &key
);
582 brw
->vs
.prog_offset
= old_prog_offset
;
583 brw
->vs
.prog_data
= old_prog_data
;
590 brw_vec4_prog_data_free(const struct brw_vec4_prog_data
*prog_data
)
592 ralloc_free((void *)prog_data
->param
);
593 ralloc_free((void *)prog_data
->pull_param
);
598 brw_vs_prog_data_free(const void *in_prog_data
)
600 const struct brw_vs_prog_data
*prog_data
= in_prog_data
;
602 brw_vec4_prog_data_free(&prog_data
->base
);