e88f4bb9f9fcfaf9ee7560f4815924fc949d2c86
[mesa.git] / src / mesa / drivers / dri / i965 / brw_curbe.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 /** @file brw_curbe.c
33 *
34 * Push constant handling for gen4/5.
35 *
36 * Push constants are constant values (such as GLSL uniforms) that are
37 * pre-loaded into a shader stage's register space at thread spawn time. On
38 * gen4 and gen5, we create a blob in memory containing all the push constants
39 * for all the stages in order. At CMD_CONST_BUFFER time that blob is loaded
40 * into URB space as a constant URB entry (CURBE) so that it can be accessed
41 * quickly at thread setup time. Each individual fixed function unit's state
42 * (brw_vs_state.c for example) tells the hardware which subset of the CURBE
43 * it wants in its register space, and we calculate those areas here under the
44 * BRW_NEW_CURBE_OFFSETS state flag. The brw_urb.c allocation will control
45 * how many CURBEs can be loaded into the hardware at once before a pipeline
46 * stall occurs at CMD_CONST_BUFFER time.
47 *
48 * On gen6+, constant handling becomes a much simpler set of per-unit state.
49 * See gen6_upload_vec4_push_constants() in gen6_vs_state.c for that code.
50 */
51
52
53 #include "compiler/nir/nir.h"
54 #include "main/context.h"
55 #include "main/macros.h"
56 #include "main/enums.h"
57 #include "program/prog_parameter.h"
58 #include "program/prog_print.h"
59 #include "program/prog_statevars.h"
60 #include "util/bitscan.h"
61 #include "intel_batchbuffer.h"
62 #include "intel_buffer_objects.h"
63 #include "brw_context.h"
64 #include "brw_defines.h"
65 #include "brw_state.h"
66 #include "brw_util.h"
67
68
69 /**
70 * Partition the CURBE between the various users of constant values.
71 *
72 * If the users all fit within the previous allocatation, we avoid changing
73 * the layout because that means reuploading all unit state and uploading new
74 * constant buffers.
75 */
76 static void calculate_curbe_offsets( struct brw_context *brw )
77 {
78 struct gl_context *ctx = &brw->ctx;
79 /* BRW_NEW_FS_PROG_DATA */
80 const GLuint nr_fp_regs = (brw->wm.base.prog_data->nr_params + 15) / 16;
81
82 /* BRW_NEW_VS_PROG_DATA */
83 const GLuint nr_vp_regs = (brw->vs.base.prog_data->nr_params + 15) / 16;
84 GLuint nr_clip_regs = 0;
85 GLuint total_regs;
86
87 /* _NEW_TRANSFORM */
88 if (ctx->Transform.ClipPlanesEnabled) {
89 GLuint nr_planes = 6 + _mesa_bitcount(ctx->Transform.ClipPlanesEnabled);
90 nr_clip_regs = (nr_planes * 4 + 15) / 16;
91 }
92
93
94 total_regs = nr_fp_regs + nr_vp_regs + nr_clip_regs;
95
96 /* The CURBE allocation size is limited to 32 512-bit units (128 EU
97 * registers, or 1024 floats). See CS_URB_STATE in the gen4 or gen5
98 * (volume 1, part 1) PRMs.
99 *
100 * Note that in brw_fs.cpp we're only loading up to 16 EU registers of
101 * values as push constants before spilling to pull constants, and in
102 * brw_vec4.cpp we're loading up to 32 registers of push constants. An EU
103 * register is 1/2 of one of these URB entry units, so that leaves us 16 EU
104 * regs for clip.
105 */
106 assert(total_regs <= 32);
107
108 /* Lazy resize:
109 */
110 if (nr_fp_regs > brw->curbe.wm_size ||
111 nr_vp_regs > brw->curbe.vs_size ||
112 nr_clip_regs != brw->curbe.clip_size ||
113 (total_regs < brw->curbe.total_size / 4 &&
114 brw->curbe.total_size > 16)) {
115
116 GLuint reg = 0;
117
118 /* Calculate a new layout:
119 */
120 reg = 0;
121 brw->curbe.wm_start = reg;
122 brw->curbe.wm_size = nr_fp_regs; reg += nr_fp_regs;
123 brw->curbe.clip_start = reg;
124 brw->curbe.clip_size = nr_clip_regs; reg += nr_clip_regs;
125 brw->curbe.vs_start = reg;
126 brw->curbe.vs_size = nr_vp_regs; reg += nr_vp_regs;
127 brw->curbe.total_size = reg;
128
129 if (0)
130 fprintf(stderr, "curbe wm %d+%d clip %d+%d vs %d+%d\n",
131 brw->curbe.wm_start,
132 brw->curbe.wm_size,
133 brw->curbe.clip_start,
134 brw->curbe.clip_size,
135 brw->curbe.vs_start,
136 brw->curbe.vs_size );
137
138 brw->ctx.NewDriverState |= BRW_NEW_CURBE_OFFSETS;
139 }
140 }
141
142
143 const struct brw_tracked_state brw_curbe_offsets = {
144 .dirty = {
145 .mesa = _NEW_TRANSFORM,
146 .brw = BRW_NEW_CONTEXT |
147 BRW_NEW_BLORP |
148 BRW_NEW_FS_PROG_DATA |
149 BRW_NEW_VS_PROG_DATA,
150 },
151 .emit = calculate_curbe_offsets
152 };
153
154
155
156
157 /** Uploads the CS_URB_STATE packet.
158 *
159 * Just like brw_vs_state.c and brw_wm_state.c define a URB entry size and
160 * number of entries for their stages, constant buffers do so using this state
161 * packet. Having multiple CURBEs in the URB at the same time allows the
162 * hardware to avoid a pipeline stall between primitives using different
163 * constant buffer contents.
164 */
165 void brw_upload_cs_urb_state(struct brw_context *brw)
166 {
167 BEGIN_BATCH(2);
168 OUT_BATCH(CMD_CS_URB_STATE << 16 | (2-2));
169
170 /* BRW_NEW_URB_FENCE */
171 if (brw->urb.csize == 0) {
172 OUT_BATCH(0);
173 } else {
174 /* BRW_NEW_URB_FENCE */
175 assert(brw->urb.nr_cs_entries);
176 OUT_BATCH((brw->urb.csize - 1) << 4 | brw->urb.nr_cs_entries);
177 }
178 ADVANCE_BATCH();
179 }
180
181 static const GLfloat fixed_plane[6][4] = {
182 { 0, 0, -1, 1 },
183 { 0, 0, 1, 1 },
184 { 0, -1, 0, 1 },
185 { 0, 1, 0, 1 },
186 {-1, 0, 0, 1 },
187 { 1, 0, 0, 1 }
188 };
189
190 /**
191 * Gathers together all the uniform values into a block of memory to be
192 * uploaded into the CURBE, then emits the state packet telling the hardware
193 * the new location.
194 */
195 static void
196 brw_upload_constant_buffer(struct brw_context *brw)
197 {
198 struct gl_context *ctx = &brw->ctx;
199 /* BRW_NEW_CURBE_OFFSETS */
200 const GLuint sz = brw->curbe.total_size;
201 const GLuint bufsz = sz * 16 * sizeof(GLfloat);
202 gl_constant_value *buf;
203 GLuint i;
204 gl_clip_plane *clip_planes;
205
206 if (sz == 0) {
207 goto emit;
208 }
209
210 buf = intel_upload_space(brw, bufsz, 64,
211 &brw->curbe.curbe_bo, &brw->curbe.curbe_offset);
212
213 STATIC_ASSERT(sizeof(gl_constant_value) == sizeof(float));
214
215 /* fragment shader constants */
216 if (brw->curbe.wm_size) {
217 _mesa_load_state_parameters(ctx, brw->fragment_program->Base.Parameters);
218
219 /* BRW_NEW_CURBE_OFFSETS */
220 GLuint offset = brw->curbe.wm_start * 16;
221
222 /* BRW_NEW_FS_PROG_DATA | _NEW_PROGRAM_CONSTANTS: copy uniform values */
223 for (i = 0; i < brw->wm.base.prog_data->nr_params; i++) {
224 buf[offset + i] = *brw->wm.base.prog_data->param[i];
225 }
226 }
227
228 /* clipper constants */
229 if (brw->curbe.clip_size) {
230 GLuint offset = brw->curbe.clip_start * 16;
231 GLbitfield mask;
232
233 /* If any planes are going this way, send them all this way:
234 */
235 for (i = 0; i < 6; i++) {
236 buf[offset + i * 4 + 0].f = fixed_plane[i][0];
237 buf[offset + i * 4 + 1].f = fixed_plane[i][1];
238 buf[offset + i * 4 + 2].f = fixed_plane[i][2];
239 buf[offset + i * 4 + 3].f = fixed_plane[i][3];
240 }
241
242 /* Clip planes: _NEW_TRANSFORM plus _NEW_PROJECTION to get to
243 * clip-space:
244 */
245 clip_planes = brw_select_clip_planes(ctx);
246 mask = ctx->Transform.ClipPlanesEnabled;
247 while (mask) {
248 const int j = u_bit_scan(&mask);
249 buf[offset + i * 4 + 0].f = clip_planes[j][0];
250 buf[offset + i * 4 + 1].f = clip_planes[j][1];
251 buf[offset + i * 4 + 2].f = clip_planes[j][2];
252 buf[offset + i * 4 + 3].f = clip_planes[j][3];
253 i++;
254 }
255 }
256
257 /* vertex shader constants */
258 if (brw->curbe.vs_size) {
259 _mesa_load_state_parameters(ctx, brw->vertex_program->Base.Parameters);
260
261 GLuint offset = brw->curbe.vs_start * 16;
262
263 /* BRW_NEW_VS_PROG_DATA | _NEW_PROGRAM_CONSTANTS: copy uniform values */
264 for (i = 0; i < brw->vs.base.prog_data->nr_params; i++) {
265 buf[offset + i] = *brw->vs.base.prog_data->param[i];
266 }
267 }
268
269 if (0) {
270 for (i = 0; i < sz*16; i+=4)
271 fprintf(stderr, "curbe %d.%d: %f %f %f %f\n", i/8, i&4,
272 buf[i+0].f, buf[i+1].f, buf[i+2].f, buf[i+3].f);
273 }
274
275 /* Because this provokes an action (ie copy the constants into the
276 * URB), it shouldn't be shortcircuited if identical to the
277 * previous time - because eg. the urb destination may have
278 * changed, or the urb contents different to last time.
279 *
280 * Note that the data referred to is actually copied internally,
281 * not just used in place according to passed pointer.
282 *
283 * It appears that the CS unit takes care of using each available
284 * URB entry (Const URB Entry == CURBE) in turn, and issuing
285 * flushes as necessary when doublebuffering of CURBEs isn't
286 * possible.
287 */
288
289 emit:
290 /* BRW_NEW_URB_FENCE: From the gen4 PRM, volume 1, section 3.9.8
291 * (CONSTANT_BUFFER (CURBE Load)):
292 *
293 * "Modifying the CS URB allocation via URB_FENCE invalidates any
294 * previous CURBE entries. Therefore software must subsequently
295 * [re]issue a CONSTANT_BUFFER command before CURBE data can be used
296 * in the pipeline."
297 */
298 BEGIN_BATCH(2);
299 if (brw->curbe.total_size == 0) {
300 OUT_BATCH((CMD_CONST_BUFFER << 16) | (2 - 2));
301 OUT_BATCH(0);
302 } else {
303 OUT_BATCH((CMD_CONST_BUFFER << 16) | (1 << 8) | (2 - 2));
304 OUT_RELOC(brw->curbe.curbe_bo,
305 I915_GEM_DOMAIN_INSTRUCTION, 0,
306 (brw->curbe.total_size - 1) + brw->curbe.curbe_offset);
307 }
308 ADVANCE_BATCH();
309
310 /* Work around a Broadwater/Crestline depth interpolator bug. The
311 * following sequence will cause GPU hangs:
312 *
313 * 1. Change state so that all depth related fields in CC_STATE are
314 * disabled, and in WM_STATE, only "PS Use Source Depth" is enabled.
315 * 2. Emit a CONSTANT_BUFFER packet.
316 * 3. Draw via 3DPRIMITIVE.
317 *
318 * The recommended workaround is to emit a non-pipelined state change after
319 * emitting CONSTANT_BUFFER, in order to drain the windowizer pipeline.
320 *
321 * We arbitrarily choose 3DSTATE_GLOBAL_DEPTH_CLAMP_OFFSET (as it's small),
322 * and always emit it when "PS Use Source Depth" is set. We could be more
323 * precise, but the additional complexity is probably not worth it.
324 *
325 * BRW_NEW_FRAGMENT_PROGRAM
326 */
327 if (brw->gen == 4 && !brw->is_g4x &&
328 (brw->fragment_program->Base.nir->info->inputs_read &
329 (1 << VARYING_SLOT_POS))) {
330 BEGIN_BATCH(2);
331 OUT_BATCH(_3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP << 16 | (2 - 2));
332 OUT_BATCH(0);
333 ADVANCE_BATCH();
334 }
335 }
336
337 const struct brw_tracked_state brw_constant_buffer = {
338 .dirty = {
339 .mesa = _NEW_PROGRAM_CONSTANTS,
340 .brw = BRW_NEW_BATCH |
341 BRW_NEW_BLORP |
342 BRW_NEW_CURBE_OFFSETS |
343 BRW_NEW_FRAGMENT_PROGRAM |
344 BRW_NEW_FS_PROG_DATA |
345 BRW_NEW_PSP | /* Implicit - hardware requires this, not used above */
346 BRW_NEW_URB_FENCE |
347 BRW_NEW_VS_PROG_DATA,
348 },
349 .emit = brw_upload_constant_buffer,
350 };
351