72b409cdbbca5522452790b300a1d0a95cec0fc2
[mesa.git] / src / mesa / drivers / dri / i965 / gen7_urb.c
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "main/macros.h"
25 #include "intel_batchbuffer.h"
26 #include "brw_context.h"
27 #include "brw_state.h"
28 #include "brw_defines.h"
29
30 /**
31 * The following diagram shows how we partition the URB:
32 *
33 * 16kB or 32kB Rest of the URB space
34 * __________-__________ _________________-_________________
35 * / \ / \
36 * +-------------------------------------------------------------+
37 * | VS/HS/DS/GS/FS Push | VS/HS/DS/GS URB |
38 * | Constants | Entries |
39 * +-------------------------------------------------------------+
40 *
41 * Notably, push constants must be stored at the beginning of the URB
42 * space, while entries can be stored anywhere. Ivybridge and Haswell
43 * GT1/GT2 have a maximum constant buffer size of 16kB, while Haswell GT3
44 * doubles this (32kB).
45 *
46 * Ivybridge and Haswell GT1/GT2 allow push constants to be located (and
47 * sized) in increments of 1kB. Haswell GT3 requires them to be located and
48 * sized in increments of 2kB.
49 *
50 * Currently we split the constant buffer space evenly among whatever stages
51 * are active. This is probably not ideal, but simple.
52 *
53 * Ivybridge GT1 and Haswell GT1 have 128kB of URB space.
54 * Ivybridge GT2 and Haswell GT2 have 256kB of URB space.
55 * Haswell GT3 has 512kB of URB space.
56 *
57 * See "Volume 2a: 3D Pipeline," section 1.8, "Volume 1b: Configurations",
58 * and the documentation for 3DSTATE_PUSH_CONSTANT_ALLOC_xS.
59 */
60 static void
61 gen7_allocate_push_constants(struct brw_context *brw)
62 {
63 /* BRW_NEW_GEOMETRY_PROGRAM */
64 bool gs_present = brw->geometry_program;
65
66 /* BRW_NEW_TESS_PROGRAMS */
67 bool tess_present = brw->tess_eval_program;
68
69 unsigned avail_size = 16;
70 unsigned multiplier =
71 (brw->gen >= 8 || (brw->is_haswell && brw->gt == 3)) ? 2 : 1;
72
73 int stages = 2 + gs_present + 2 * tess_present;
74
75 /* Divide up the available space equally between stages. Because we
76 * round down (using floor division), there may be some left over
77 * space. We allocate that to the pixel shader stage.
78 */
79 unsigned size_per_stage = avail_size / stages;
80
81 unsigned vs_size = size_per_stage;
82 unsigned hs_size = tess_present ? size_per_stage : 0;
83 unsigned ds_size = tess_present ? size_per_stage : 0;
84 unsigned gs_size = gs_present ? size_per_stage : 0;
85 unsigned fs_size = avail_size - size_per_stage * (stages - 1);
86
87 gen7_emit_push_constant_state(brw, multiplier * vs_size,
88 multiplier * hs_size, multiplier * ds_size,
89 multiplier * gs_size, multiplier * fs_size);
90
91 /* From p115 of the Ivy Bridge PRM (3.2.1.4 3DSTATE_PUSH_CONSTANT_ALLOC_VS):
92 *
93 * Programming Restriction:
94 *
95 * The 3DSTATE_CONSTANT_VS must be reprogrammed prior to the next
96 * 3DPRIMITIVE command after programming the
97 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS.
98 *
99 * Similar text exists for the other 3DSTATE_PUSH_CONSTANT_ALLOC_*
100 * commands.
101 */
102 brw->ctx.NewDriverState |= BRW_NEW_PUSH_CONSTANT_ALLOCATION;
103 }
104
105 void
106 gen7_emit_push_constant_state(struct brw_context *brw, unsigned vs_size,
107 unsigned hs_size, unsigned ds_size,
108 unsigned gs_size, unsigned fs_size)
109 {
110 unsigned offset = 0;
111
112 BEGIN_BATCH(10);
113 OUT_BATCH(_3DSTATE_PUSH_CONSTANT_ALLOC_VS << 16 | (2 - 2));
114 OUT_BATCH(vs_size | offset << GEN7_PUSH_CONSTANT_BUFFER_OFFSET_SHIFT);
115 offset += vs_size;
116
117 OUT_BATCH(_3DSTATE_PUSH_CONSTANT_ALLOC_HS << 16 | (2 - 2));
118 OUT_BATCH(hs_size | offset << GEN7_PUSH_CONSTANT_BUFFER_OFFSET_SHIFT);
119 offset += hs_size;
120
121 OUT_BATCH(_3DSTATE_PUSH_CONSTANT_ALLOC_DS << 16 | (2 - 2));
122 OUT_BATCH(ds_size | offset << GEN7_PUSH_CONSTANT_BUFFER_OFFSET_SHIFT);
123 offset += ds_size;
124
125 OUT_BATCH(_3DSTATE_PUSH_CONSTANT_ALLOC_GS << 16 | (2 - 2));
126 OUT_BATCH(gs_size | offset << GEN7_PUSH_CONSTANT_BUFFER_OFFSET_SHIFT);
127 offset += gs_size;
128
129 OUT_BATCH(_3DSTATE_PUSH_CONSTANT_ALLOC_PS << 16 | (2 - 2));
130 OUT_BATCH(fs_size | offset << GEN7_PUSH_CONSTANT_BUFFER_OFFSET_SHIFT);
131 ADVANCE_BATCH();
132
133 /* From p292 of the Ivy Bridge PRM (11.2.4 3DSTATE_PUSH_CONSTANT_ALLOC_PS):
134 *
135 * A PIPE_CONTROL command with the CS Stall bit set must be programmed
136 * in the ring after this instruction.
137 *
138 * No such restriction exists for Haswell or Baytrail.
139 */
140 if (brw->gen < 8 && !brw->is_haswell && !brw->is_baytrail)
141 gen7_emit_cs_stall_flush(brw);
142 }
143
144 const struct brw_tracked_state gen7_push_constant_space = {
145 .dirty = {
146 .mesa = 0,
147 .brw = BRW_NEW_CONTEXT |
148 BRW_NEW_GEOMETRY_PROGRAM |
149 BRW_NEW_TESS_PROGRAMS,
150 },
151 .emit = gen7_allocate_push_constants,
152 };
153
154 static void
155 upload_urb(struct brw_context *brw)
156 {
157 /* BRW_NEW_VS_PROG_DATA */
158 const struct brw_vue_prog_data *vs_vue_prog_data =
159 brw_vue_prog_data(brw->vs.base.prog_data);
160 const unsigned vs_size = MAX2(vs_vue_prog_data->urb_entry_size, 1);
161 /* BRW_NEW_GS_PROG_DATA */
162 const bool gs_present = brw->gs.base.prog_data;
163 /* BRW_NEW_TES_PROG_DATA */
164 const bool tess_present = brw->tes.base.prog_data;
165
166 gen7_upload_urb(brw, vs_size, gs_present, tess_present);
167 }
168
169 void
170 gen7_upload_urb(struct brw_context *brw, unsigned vs_size,
171 bool gs_present, bool tess_present)
172 {
173 const struct gen_device_info *devinfo = &brw->screen->devinfo;
174 const int push_size_kB =
175 (brw->gen >= 8 || (brw->is_haswell && brw->gt == 3)) ? 32 : 16;
176
177 const bool active[4] = { true, tess_present, tess_present, gs_present };
178
179 /* BRW_NEW_{VS,TCS,TES,GS}_PROG_DATA */
180 struct brw_vue_prog_data *prog_data[4] = {
181 [MESA_SHADER_VERTEX] =
182 brw_vue_prog_data(brw->vs.base.prog_data),
183 [MESA_SHADER_TESS_CTRL] =
184 tess_present ? brw_vue_prog_data(brw->tcs.base.prog_data) : NULL,
185 [MESA_SHADER_TESS_EVAL] =
186 tess_present ? brw_vue_prog_data(brw->tes.base.prog_data) : NULL,
187 [MESA_SHADER_GEOMETRY] =
188 gs_present ? brw_vue_prog_data(brw->gs.base.prog_data) : NULL,
189 };
190
191 unsigned entry_size[4];
192 entry_size[MESA_SHADER_VERTEX] = vs_size;
193 for (int i = MESA_SHADER_TESS_CTRL; i <= MESA_SHADER_GEOMETRY; i++) {
194 entry_size[i] = prog_data[i] ? prog_data[i]->urb_entry_size : 1;
195 }
196
197 /* If we're just switching between programs with the same URB requirements,
198 * skip the rest of the logic.
199 */
200 if (!(brw->ctx.NewDriverState & BRW_NEW_CONTEXT) &&
201 !(brw->ctx.NewDriverState & BRW_NEW_URB_SIZE) &&
202 brw->urb.vsize == entry_size[MESA_SHADER_VERTEX] &&
203 brw->urb.gs_present == gs_present &&
204 brw->urb.gsize == entry_size[MESA_SHADER_GEOMETRY] &&
205 brw->urb.tess_present == tess_present &&
206 brw->urb.hsize == entry_size[MESA_SHADER_TESS_CTRL] &&
207 brw->urb.dsize == entry_size[MESA_SHADER_TESS_EVAL]) {
208 return;
209 }
210 brw->urb.vsize = entry_size[MESA_SHADER_VERTEX];
211 brw->urb.gs_present = gs_present;
212 brw->urb.gsize = entry_size[MESA_SHADER_GEOMETRY];
213 brw->urb.tess_present = tess_present;
214 brw->urb.hsize = entry_size[MESA_SHADER_TESS_CTRL];
215 brw->urb.dsize = entry_size[MESA_SHADER_TESS_EVAL];
216
217 /* URB allocations must be done in 8k chunks. */
218 unsigned chunk_size_bytes = 8192;
219
220 /* Determine the size of the URB in chunks.
221 * BRW_NEW_URB_SIZE
222 */
223 unsigned urb_chunks = brw->urb.size * 1024 / chunk_size_bytes;
224
225 /* Reserve space for push constants */
226 unsigned push_constant_bytes = 1024 * push_size_kB;
227 unsigned push_constant_chunks = push_constant_bytes / chunk_size_bytes;
228
229 /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
230 *
231 * VS Number of URB Entries must be divisible by 8 if the VS URB Entry
232 * Allocation Size is less than 9 512-bit URB entries.
233 *
234 * Similar text exists for HS, DS and GS.
235 */
236 unsigned granularity[4];
237 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
238 granularity[i] = (entry_size[i] < 9) ? 8 : 1;
239 }
240
241 unsigned min_entries[4] = {
242 /* VS has a lower limit on the number of URB entries.
243 *
244 * From the Broadwell PRM, 3DSTATE_URB_VS instruction:
245 * "When tessellation is enabled, the VS Number of URB Entries must be
246 * greater than or equal to 192."
247 */
248 [MESA_SHADER_VERTEX] = tess_present && brw->gen == 8 ?
249 192 : devinfo->urb.min_entries[MESA_SHADER_VERTEX],
250
251 /* There are two constraints on the minimum amount of URB space we can
252 * allocate:
253 *
254 * (1) We need room for at least 2 URB entries, since we always operate
255 * the GS in DUAL_OBJECT mode.
256 *
257 * (2) We can't allocate less than nr_gs_entries_granularity.
258 */
259 [MESA_SHADER_GEOMETRY] = gs_present ? 2 : 0,
260
261 [MESA_SHADER_TESS_CTRL] = tess_present ? 1 : 0,
262
263 [MESA_SHADER_TESS_EVAL] = tess_present ?
264 devinfo->urb.min_entries[MESA_SHADER_TESS_EVAL] : 0,
265 };
266
267 /* Min VS Entries isn't a multiple of 8 on Cherryview/Broxton; round up.
268 * Round them all up.
269 */
270 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
271 min_entries[i] = ALIGN(min_entries[i], granularity[i]);
272 }
273
274 unsigned entry_size_bytes[4];
275 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
276 entry_size_bytes[i] = 64 * entry_size[i];
277 }
278
279 /* Initially, assign each stage the minimum amount of URB space it needs,
280 * and make a note of how much additional space it "wants" (the amount of
281 * additional space it could actually make use of).
282 */
283 unsigned chunks[4];
284 unsigned wants[4];
285 unsigned total_needs = push_constant_chunks;
286 unsigned total_wants = 0;
287
288 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
289 if (active[i]) {
290 chunks[i] = DIV_ROUND_UP(min_entries[i] * entry_size_bytes[i],
291 chunk_size_bytes);
292
293 wants[i] =
294 DIV_ROUND_UP(devinfo->urb.max_entries[i] * entry_size_bytes[i],
295 chunk_size_bytes) - chunks[i];
296 } else {
297 chunks[i] = 0;
298 wants[i] = 0;
299 }
300
301 total_needs += chunks[i];
302 total_wants += wants[i];
303 }
304
305 assert(total_needs <= urb_chunks);
306
307 /* Mete out remaining space (if any) in proportion to "wants". */
308 unsigned remaining_space = MIN2(urb_chunks - total_needs, total_wants);
309
310 if (remaining_space > 0) {
311 for (int i = MESA_SHADER_VERTEX;
312 total_wants > 0 && i <= MESA_SHADER_TESS_EVAL; i++) {
313 unsigned additional = (unsigned)
314 roundf(wants[i] * (((float) remaining_space) / total_wants));
315 chunks[i] += additional;
316 remaining_space -= additional;
317 total_wants -= additional;
318 }
319
320 chunks[MESA_SHADER_GEOMETRY] += remaining_space;
321 }
322
323 /* Sanity check that we haven't over-allocated. */
324 unsigned total_chunks = push_constant_chunks;
325 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
326 total_chunks += chunks[i];
327 }
328 assert(total_chunks <= urb_chunks);
329
330 /* Finally, compute the number of entries that can fit in the space
331 * allocated to each stage.
332 */
333 unsigned entries[4];
334 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
335 entries[i] = chunks[i] * chunk_size_bytes / entry_size_bytes[i];
336
337 /* Since we rounded up when computing wants[], this may be slightly
338 * more than the maximum allowed amount, so correct for that.
339 */
340 entries[i] = MIN2(entries[i], devinfo->urb.max_entries[i]);
341
342 /* Ensure that we program a multiple of the granularity. */
343 entries[i] = ROUND_DOWN_TO(entries[i], granularity[i]);
344
345 /* Finally, sanity check to make sure we have at least the minimum
346 * number of entries needed for each stage.
347 */
348 assert(entries[i] >= min_entries[i]);
349 }
350
351 /* Lay out the URB in pipeline order: push constants, VS, HS, DS, GS. */
352 unsigned start[4];
353 start[0] = push_constant_chunks;
354 for (int i = MESA_SHADER_TESS_CTRL; i <= MESA_SHADER_GEOMETRY; i++) {
355 start[i] = start[i - 1] + chunks[i - 1];
356 }
357
358 if (brw->gen == 7 && !brw->is_haswell && !brw->is_baytrail)
359 gen7_emit_vs_workaround_flush(brw);
360
361 BEGIN_BATCH(8);
362 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
363 OUT_BATCH((_3DSTATE_URB_VS + i) << 16 | (2 - 2));
364 OUT_BATCH(entries[i] |
365 ((entry_size[i] - 1) << GEN7_URB_ENTRY_SIZE_SHIFT) |
366 (start[i] << GEN7_URB_STARTING_ADDRESS_SHIFT));
367 }
368 ADVANCE_BATCH();
369 }
370
371 const struct brw_tracked_state gen7_urb = {
372 .dirty = {
373 .mesa = 0,
374 .brw = BRW_NEW_CONTEXT |
375 BRW_NEW_URB_SIZE |
376 BRW_NEW_GS_PROG_DATA |
377 BRW_NEW_TCS_PROG_DATA |
378 BRW_NEW_TES_PROG_DATA |
379 BRW_NEW_VS_PROG_DATA,
380 },
381 .emit = upload_urb,
382 };