i965/vec4: Ignore swizzle of VGRF for use by var_range_end().
[mesa.git] / src / mesa / drivers / dri / i965 / brw_curbe.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 /** @file brw_curbe.c
33 *
34 * Push constant handling for gen4/5.
35 *
36 * Push constants are constant values (such as GLSL uniforms) that are
37 * pre-loaded into a shader stage's register space at thread spawn time. On
38 * gen4 and gen5, we create a blob in memory containing all the push constants
39 * for all the stages in order. At CMD_CONST_BUFFER time that blob is loaded
40 * into URB space as a constant URB entry (CURBE) so that it can be accessed
41 * quickly at thread setup time. Each individual fixed function unit's state
42 * (brw_vs_state.c for example) tells the hardware which subset of the CURBE
43 * it wants in its register space, and we calculate those areas here under the
44 * BRW_NEW_CURBE_OFFSETS state flag. The brw_urb.c allocation will control
45 * how many CURBEs can be loaded into the hardware at once before a pipeline
46 * stall occurs at CMD_CONST_BUFFER time.
47 *
48 * On gen6+, constant handling becomes a much simpler set of per-unit state.
49 * See gen6_upload_vec4_push_constants() in gen6_vs_state.c for that code.
50 */
51
52
53 #include "main/context.h"
54 #include "main/macros.h"
55 #include "main/enums.h"
56 #include "program/prog_parameter.h"
57 #include "program/prog_print.h"
58 #include "program/prog_statevars.h"
59 #include "util/bitscan.h"
60 #include "intel_batchbuffer.h"
61 #include "intel_buffer_objects.h"
62 #include "brw_context.h"
63 #include "brw_defines.h"
64 #include "brw_state.h"
65 #include "brw_util.h"
66
67
68 /**
69 * Partition the CURBE between the various users of constant values.
70 *
71 * If the users all fit within the previous allocatation, we avoid changing
72 * the layout because that means reuploading all unit state and uploading new
73 * constant buffers.
74 */
75 static void calculate_curbe_offsets( struct brw_context *brw )
76 {
77 struct gl_context *ctx = &brw->ctx;
78 /* BRW_NEW_FS_PROG_DATA */
79 const GLuint nr_fp_regs = (brw->wm.prog_data->base.nr_params + 15) / 16;
80
81 /* BRW_NEW_VS_PROG_DATA */
82 const GLuint nr_vp_regs = (brw->vs.prog_data->base.base.nr_params + 15) / 16;
83 GLuint nr_clip_regs = 0;
84 GLuint total_regs;
85
86 /* _NEW_TRANSFORM */
87 if (ctx->Transform.ClipPlanesEnabled) {
88 GLuint nr_planes = 6 + _mesa_bitcount(ctx->Transform.ClipPlanesEnabled);
89 nr_clip_regs = (nr_planes * 4 + 15) / 16;
90 }
91
92
93 total_regs = nr_fp_regs + nr_vp_regs + nr_clip_regs;
94
95 /* The CURBE allocation size is limited to 32 512-bit units (128 EU
96 * registers, or 1024 floats). See CS_URB_STATE in the gen4 or gen5
97 * (volume 1, part 1) PRMs.
98 *
99 * Note that in brw_fs.cpp we're only loading up to 16 EU registers of
100 * values as push constants before spilling to pull constants, and in
101 * brw_vec4.cpp we're loading up to 32 registers of push constants. An EU
102 * register is 1/2 of one of these URB entry units, so that leaves us 16 EU
103 * regs for clip.
104 */
105 assert(total_regs <= 32);
106
107 /* Lazy resize:
108 */
109 if (nr_fp_regs > brw->curbe.wm_size ||
110 nr_vp_regs > brw->curbe.vs_size ||
111 nr_clip_regs != brw->curbe.clip_size ||
112 (total_regs < brw->curbe.total_size / 4 &&
113 brw->curbe.total_size > 16)) {
114
115 GLuint reg = 0;
116
117 /* Calculate a new layout:
118 */
119 reg = 0;
120 brw->curbe.wm_start = reg;
121 brw->curbe.wm_size = nr_fp_regs; reg += nr_fp_regs;
122 brw->curbe.clip_start = reg;
123 brw->curbe.clip_size = nr_clip_regs; reg += nr_clip_regs;
124 brw->curbe.vs_start = reg;
125 brw->curbe.vs_size = nr_vp_regs; reg += nr_vp_regs;
126 brw->curbe.total_size = reg;
127
128 if (0)
129 fprintf(stderr, "curbe wm %d+%d clip %d+%d vs %d+%d\n",
130 brw->curbe.wm_start,
131 brw->curbe.wm_size,
132 brw->curbe.clip_start,
133 brw->curbe.clip_size,
134 brw->curbe.vs_start,
135 brw->curbe.vs_size );
136
137 brw->ctx.NewDriverState |= BRW_NEW_CURBE_OFFSETS;
138 }
139 }
140
141
142 const struct brw_tracked_state brw_curbe_offsets = {
143 .dirty = {
144 .mesa = _NEW_TRANSFORM,
145 .brw = BRW_NEW_CONTEXT |
146 BRW_NEW_BLORP |
147 BRW_NEW_FS_PROG_DATA |
148 BRW_NEW_VS_PROG_DATA,
149 },
150 .emit = calculate_curbe_offsets
151 };
152
153
154
155
156 /** Uploads the CS_URB_STATE packet.
157 *
158 * Just like brw_vs_state.c and brw_wm_state.c define a URB entry size and
159 * number of entries for their stages, constant buffers do so using this state
160 * packet. Having multiple CURBEs in the URB at the same time allows the
161 * hardware to avoid a pipeline stall between primitives using different
162 * constant buffer contents.
163 */
164 void brw_upload_cs_urb_state(struct brw_context *brw)
165 {
166 BEGIN_BATCH(2);
167 OUT_BATCH(CMD_CS_URB_STATE << 16 | (2-2));
168
169 /* BRW_NEW_URB_FENCE */
170 if (brw->urb.csize == 0) {
171 OUT_BATCH(0);
172 } else {
173 /* BRW_NEW_URB_FENCE */
174 assert(brw->urb.nr_cs_entries);
175 OUT_BATCH((brw->urb.csize - 1) << 4 | brw->urb.nr_cs_entries);
176 }
177 ADVANCE_BATCH();
178 }
179
180 static const GLfloat fixed_plane[6][4] = {
181 { 0, 0, -1, 1 },
182 { 0, 0, 1, 1 },
183 { 0, -1, 0, 1 },
184 { 0, 1, 0, 1 },
185 {-1, 0, 0, 1 },
186 { 1, 0, 0, 1 }
187 };
188
189 /**
190 * Gathers together all the uniform values into a block of memory to be
191 * uploaded into the CURBE, then emits the state packet telling the hardware
192 * the new location.
193 */
194 static void
195 brw_upload_constant_buffer(struct brw_context *brw)
196 {
197 struct gl_context *ctx = &brw->ctx;
198 /* BRW_NEW_CURBE_OFFSETS */
199 const GLuint sz = brw->curbe.total_size;
200 const GLuint bufsz = sz * 16 * sizeof(GLfloat);
201 gl_constant_value *buf;
202 GLuint i;
203 gl_clip_plane *clip_planes;
204
205 if (sz == 0) {
206 goto emit;
207 }
208
209 buf = intel_upload_space(brw, bufsz, 64,
210 &brw->curbe.curbe_bo, &brw->curbe.curbe_offset);
211
212 STATIC_ASSERT(sizeof(gl_constant_value) == sizeof(float));
213
214 /* fragment shader constants */
215 if (brw->curbe.wm_size) {
216 _mesa_load_state_parameters(ctx, brw->fragment_program->Base.Parameters);
217
218 /* BRW_NEW_CURBE_OFFSETS */
219 GLuint offset = brw->curbe.wm_start * 16;
220
221 /* BRW_NEW_FS_PROG_DATA | _NEW_PROGRAM_CONSTANTS: copy uniform values */
222 for (i = 0; i < brw->wm.prog_data->base.nr_params; i++) {
223 buf[offset + i] = *brw->wm.prog_data->base.param[i];
224 }
225 }
226
227 /* clipper constants */
228 if (brw->curbe.clip_size) {
229 GLuint offset = brw->curbe.clip_start * 16;
230 GLbitfield mask;
231
232 /* If any planes are going this way, send them all this way:
233 */
234 for (i = 0; i < 6; i++) {
235 buf[offset + i * 4 + 0].f = fixed_plane[i][0];
236 buf[offset + i * 4 + 1].f = fixed_plane[i][1];
237 buf[offset + i * 4 + 2].f = fixed_plane[i][2];
238 buf[offset + i * 4 + 3].f = fixed_plane[i][3];
239 }
240
241 /* Clip planes: _NEW_TRANSFORM plus _NEW_PROJECTION to get to
242 * clip-space:
243 */
244 clip_planes = brw_select_clip_planes(ctx);
245 mask = ctx->Transform.ClipPlanesEnabled;
246 while (mask) {
247 const int j = u_bit_scan(&mask);
248 buf[offset + i * 4 + 0].f = clip_planes[j][0];
249 buf[offset + i * 4 + 1].f = clip_planes[j][1];
250 buf[offset + i * 4 + 2].f = clip_planes[j][2];
251 buf[offset + i * 4 + 3].f = clip_planes[j][3];
252 i++;
253 }
254 }
255
256 /* vertex shader constants */
257 if (brw->curbe.vs_size) {
258 _mesa_load_state_parameters(ctx, brw->vertex_program->Base.Parameters);
259
260 GLuint offset = brw->curbe.vs_start * 16;
261
262 /* BRW_NEW_VS_PROG_DATA | _NEW_PROGRAM_CONSTANTS: copy uniform values */
263 for (i = 0; i < brw->vs.prog_data->base.base.nr_params; i++) {
264 buf[offset + i] = *brw->vs.prog_data->base.base.param[i];
265 }
266 }
267
268 if (0) {
269 for (i = 0; i < sz*16; i+=4)
270 fprintf(stderr, "curbe %d.%d: %f %f %f %f\n", i/8, i&4,
271 buf[i+0].f, buf[i+1].f, buf[i+2].f, buf[i+3].f);
272 }
273
274 /* Because this provokes an action (ie copy the constants into the
275 * URB), it shouldn't be shortcircuited if identical to the
276 * previous time - because eg. the urb destination may have
277 * changed, or the urb contents different to last time.
278 *
279 * Note that the data referred to is actually copied internally,
280 * not just used in place according to passed pointer.
281 *
282 * It appears that the CS unit takes care of using each available
283 * URB entry (Const URB Entry == CURBE) in turn, and issuing
284 * flushes as necessary when doublebuffering of CURBEs isn't
285 * possible.
286 */
287
288 emit:
289 /* BRW_NEW_URB_FENCE: From the gen4 PRM, volume 1, section 3.9.8
290 * (CONSTANT_BUFFER (CURBE Load)):
291 *
292 * "Modifying the CS URB allocation via URB_FENCE invalidates any
293 * previous CURBE entries. Therefore software must subsequently
294 * [re]issue a CONSTANT_BUFFER command before CURBE data can be used
295 * in the pipeline."
296 */
297 BEGIN_BATCH(2);
298 if (brw->curbe.total_size == 0) {
299 OUT_BATCH((CMD_CONST_BUFFER << 16) | (2 - 2));
300 OUT_BATCH(0);
301 } else {
302 OUT_BATCH((CMD_CONST_BUFFER << 16) | (1 << 8) | (2 - 2));
303 OUT_RELOC(brw->curbe.curbe_bo,
304 I915_GEM_DOMAIN_INSTRUCTION, 0,
305 (brw->curbe.total_size - 1) + brw->curbe.curbe_offset);
306 }
307 ADVANCE_BATCH();
308
309 /* Work around a Broadwater/Crestline depth interpolator bug. The
310 * following sequence will cause GPU hangs:
311 *
312 * 1. Change state so that all depth related fields in CC_STATE are
313 * disabled, and in WM_STATE, only "PS Use Source Depth" is enabled.
314 * 2. Emit a CONSTANT_BUFFER packet.
315 * 3. Draw via 3DPRIMITIVE.
316 *
317 * The recommended workaround is to emit a non-pipelined state change after
318 * emitting CONSTANT_BUFFER, in order to drain the windowizer pipeline.
319 *
320 * We arbitrarily choose 3DSTATE_GLOBAL_DEPTH_CLAMP_OFFSET (as it's small),
321 * and always emit it when "PS Use Source Depth" is set. We could be more
322 * precise, but the additional complexity is probably not worth it.
323 *
324 * BRW_NEW_FRAGMENT_PROGRAM
325 */
326 if (brw->gen == 4 && !brw->is_g4x &&
327 (brw->fragment_program->Base.InputsRead & (1 << VARYING_SLOT_POS))) {
328 BEGIN_BATCH(2);
329 OUT_BATCH(_3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP << 16 | (2 - 2));
330 OUT_BATCH(0);
331 ADVANCE_BATCH();
332 }
333 }
334
335 const struct brw_tracked_state brw_constant_buffer = {
336 .dirty = {
337 .mesa = _NEW_PROGRAM_CONSTANTS,
338 .brw = BRW_NEW_BATCH |
339 BRW_NEW_BLORP |
340 BRW_NEW_CURBE_OFFSETS |
341 BRW_NEW_FRAGMENT_PROGRAM |
342 BRW_NEW_FS_PROG_DATA |
343 BRW_NEW_PSP | /* Implicit - hardware requires this, not used above */
344 BRW_NEW_URB_FENCE |
345 BRW_NEW_VS_PROG_DATA,
346 },
347 .emit = brw_upload_constant_buffer,
348 };
349