2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "main/macros.h"
25 #include "intel_batchbuffer.h"
26 #include "brw_context.h"
27 #include "brw_state.h"
28 #include "brw_defines.h"
31 * The following diagram shows how we partition the URB:
33 * 16kB or 32kB Rest of the URB space
34 * __________-__________ _________________-_________________
36 * +-------------------------------------------------------------+
37 * | VS/HS/DS/GS/FS Push | VS/HS/DS/GS URB |
38 * | Constants | Entries |
39 * +-------------------------------------------------------------+
41 * Notably, push constants must be stored at the beginning of the URB
42 * space, while entries can be stored anywhere. Ivybridge and Haswell
43 * GT1/GT2 have a maximum constant buffer size of 16kB, while Haswell GT3
44 * doubles this (32kB).
46 * Ivybridge and Haswell GT1/GT2 allow push constants to be located (and
47 * sized) in increments of 1kB. Haswell GT3 requires them to be located and
48 * sized in increments of 2kB.
50 * Currently we split the constant buffer space evenly among whatever stages
51 * are active. This is probably not ideal, but simple.
53 * Ivybridge GT1 and Haswell GT1 have 128kB of URB space.
54 * Ivybridge GT2 and Haswell GT2 have 256kB of URB space.
55 * Haswell GT3 has 512kB of URB space.
57 * See "Volume 2a: 3D Pipeline," section 1.8, "Volume 1b: Configurations",
58 * and the documentation for 3DSTATE_PUSH_CONSTANT_ALLOC_xS.
61 gen7_allocate_push_constants(struct brw_context
*brw
)
63 /* BRW_NEW_GEOMETRY_PROGRAM */
64 bool gs_present
= brw
->geometry_program
;
66 /* BRW_NEW_TESS_PROGRAMS */
67 bool tess_present
= brw
->tess_eval_program
;
69 unsigned avail_size
= 16;
71 (brw
->gen
>= 8 || (brw
->is_haswell
&& brw
->gt
== 3)) ? 2 : 1;
73 int stages
= 2 + gs_present
+ 2 * tess_present
;
75 /* Divide up the available space equally between stages. Because we
76 * round down (using floor division), there may be some left over
77 * space. We allocate that to the pixel shader stage.
79 unsigned size_per_stage
= avail_size
/ stages
;
81 unsigned vs_size
= size_per_stage
;
82 unsigned hs_size
= tess_present
? size_per_stage
: 0;
83 unsigned ds_size
= tess_present
? size_per_stage
: 0;
84 unsigned gs_size
= gs_present
? size_per_stage
: 0;
85 unsigned fs_size
= avail_size
- size_per_stage
* (stages
- 1);
87 gen7_emit_push_constant_state(brw
, multiplier
* vs_size
,
88 multiplier
* hs_size
, multiplier
* ds_size
,
89 multiplier
* gs_size
, multiplier
* fs_size
);
91 /* From p115 of the Ivy Bridge PRM (3.2.1.4 3DSTATE_PUSH_CONSTANT_ALLOC_VS):
93 * Programming Restriction:
95 * The 3DSTATE_CONSTANT_VS must be reprogrammed prior to the next
96 * 3DPRIMITIVE command after programming the
97 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS.
99 * Similar text exists for the other 3DSTATE_PUSH_CONSTANT_ALLOC_*
102 brw
->ctx
.NewDriverState
|= BRW_NEW_PUSH_CONSTANT_ALLOCATION
;
106 gen7_emit_push_constant_state(struct brw_context
*brw
, unsigned vs_size
,
107 unsigned hs_size
, unsigned ds_size
,
108 unsigned gs_size
, unsigned fs_size
)
113 OUT_BATCH(_3DSTATE_PUSH_CONSTANT_ALLOC_VS
<< 16 | (2 - 2));
114 OUT_BATCH(vs_size
| offset
<< GEN7_PUSH_CONSTANT_BUFFER_OFFSET_SHIFT
);
117 OUT_BATCH(_3DSTATE_PUSH_CONSTANT_ALLOC_HS
<< 16 | (2 - 2));
118 OUT_BATCH(hs_size
| offset
<< GEN7_PUSH_CONSTANT_BUFFER_OFFSET_SHIFT
);
121 OUT_BATCH(_3DSTATE_PUSH_CONSTANT_ALLOC_DS
<< 16 | (2 - 2));
122 OUT_BATCH(ds_size
| offset
<< GEN7_PUSH_CONSTANT_BUFFER_OFFSET_SHIFT
);
125 OUT_BATCH(_3DSTATE_PUSH_CONSTANT_ALLOC_GS
<< 16 | (2 - 2));
126 OUT_BATCH(gs_size
| offset
<< GEN7_PUSH_CONSTANT_BUFFER_OFFSET_SHIFT
);
129 OUT_BATCH(_3DSTATE_PUSH_CONSTANT_ALLOC_PS
<< 16 | (2 - 2));
130 OUT_BATCH(fs_size
| offset
<< GEN7_PUSH_CONSTANT_BUFFER_OFFSET_SHIFT
);
133 /* From p292 of the Ivy Bridge PRM (11.2.4 3DSTATE_PUSH_CONSTANT_ALLOC_PS):
135 * A PIPE_CONTROL command with the CS Stall bit set must be programmed
136 * in the ring after this instruction.
138 * No such restriction exists for Haswell or Baytrail.
140 if (brw
->gen
< 8 && !brw
->is_haswell
&& !brw
->is_baytrail
)
141 gen7_emit_cs_stall_flush(brw
);
144 const struct brw_tracked_state gen7_push_constant_space
= {
147 .brw
= BRW_NEW_CONTEXT
|
148 BRW_NEW_GEOMETRY_PROGRAM
|
149 BRW_NEW_TESS_PROGRAMS
,
151 .emit
= gen7_allocate_push_constants
,
155 gen7_emit_urb_state(struct brw_context
*brw
,
156 unsigned nr_vs_entries
,
157 unsigned vs_size
, unsigned vs_start
,
158 unsigned nr_hs_entries
,
159 unsigned hs_size
, unsigned hs_start
,
160 unsigned nr_ds_entries
,
161 unsigned ds_size
, unsigned ds_start
,
162 unsigned nr_gs_entries
,
163 unsigned gs_size
, unsigned gs_start
)
166 OUT_BATCH(_3DSTATE_URB_VS
<< 16 | (2 - 2));
167 OUT_BATCH(nr_vs_entries
|
168 ((vs_size
- 1) << GEN7_URB_ENTRY_SIZE_SHIFT
) |
169 (vs_start
<< GEN7_URB_STARTING_ADDRESS_SHIFT
));
171 OUT_BATCH(_3DSTATE_URB_GS
<< 16 | (2 - 2));
172 OUT_BATCH(nr_gs_entries
|
173 ((gs_size
- 1) << GEN7_URB_ENTRY_SIZE_SHIFT
) |
174 (gs_start
<< GEN7_URB_STARTING_ADDRESS_SHIFT
));
176 OUT_BATCH(_3DSTATE_URB_HS
<< 16 | (2 - 2));
177 OUT_BATCH(nr_hs_entries
|
178 ((hs_size
- 1) << GEN7_URB_ENTRY_SIZE_SHIFT
) |
179 (hs_start
<< GEN7_URB_STARTING_ADDRESS_SHIFT
));
181 OUT_BATCH(_3DSTATE_URB_DS
<< 16 | (2 - 2));
182 OUT_BATCH(nr_ds_entries
|
183 ((ds_size
- 1) << GEN7_URB_ENTRY_SIZE_SHIFT
) |
184 (ds_start
<< GEN7_URB_STARTING_ADDRESS_SHIFT
));
189 upload_urb(struct brw_context
*brw
)
191 /* BRW_NEW_VS_PROG_DATA */
192 const unsigned vs_size
= MAX2(brw
->vs
.prog_data
->base
.urb_entry_size
, 1);
193 /* BRW_NEW_GEOMETRY_PROGRAM, BRW_NEW_GS_PROG_DATA */
194 const bool gs_present
= brw
->geometry_program
;
195 /* BRW_NEW_TESS_PROGRAMS */
196 const bool tess_present
= brw
->tess_eval_program
;
198 gen7_upload_urb(brw
, vs_size
, gs_present
, tess_present
);
202 gen7_upload_urb(struct brw_context
*brw
, unsigned vs_size
,
203 bool gs_present
, bool tess_present
)
205 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
206 const int push_size_kB
=
207 (brw
->gen
>= 8 || (brw
->is_haswell
&& brw
->gt
== 3)) ? 32 : 16;
209 /* BRW_NEW_VS_PROG_DATA */
210 unsigned vs_entry_size_bytes
= vs_size
* 64;
211 /* BRW_NEW_GEOMETRY_PROGRAM, BRW_NEW_GS_PROG_DATA */
212 unsigned gs_size
= gs_present
? brw
->gs
.prog_data
->base
.urb_entry_size
: 1;
213 unsigned gs_entry_size_bytes
= gs_size
* 64;
215 /* BRW_NEW_TCS_PROG_DATA */
216 unsigned hs_size
= tess_present
? brw
->tcs
.prog_data
->base
.urb_entry_size
: 1;
217 unsigned hs_entry_size_bytes
= hs_size
* 64;
218 /* BRW_NEW_TES_PROG_DATA */
219 unsigned ds_size
= tess_present
? brw
->tes
.prog_data
->base
.urb_entry_size
: 1;
220 unsigned ds_entry_size_bytes
= ds_size
* 64;
222 /* If we're just switching between programs with the same URB requirements,
223 * skip the rest of the logic.
225 if (!(brw
->ctx
.NewDriverState
& BRW_NEW_CONTEXT
) &&
226 !(brw
->ctx
.NewDriverState
& BRW_NEW_URB_SIZE
) &&
227 brw
->urb
.vsize
== vs_size
&&
228 brw
->urb
.gs_present
== gs_present
&&
229 brw
->urb
.gsize
== gs_size
&&
230 brw
->urb
.tess_present
== tess_present
&&
231 brw
->urb
.hsize
== hs_size
&&
232 brw
->urb
.dsize
== ds_size
) {
235 brw
->urb
.vsize
= vs_size
;
236 brw
->urb
.gs_present
= gs_present
;
237 brw
->urb
.gsize
= gs_size
;
238 brw
->urb
.tess_present
= tess_present
;
239 brw
->urb
.hsize
= hs_size
;
240 brw
->urb
.dsize
= ds_size
;
242 /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
244 * VS Number of URB Entries must be divisible by 8 if the VS URB Entry
245 * Allocation Size is less than 9 512-bit URB entries.
247 * Similar text exists for HS, DS and GS.
249 unsigned vs_granularity
= (vs_size
< 9) ? 8 : 1;
250 unsigned hs_granularity
= (hs_size
< 9) ? 8 : 1;
251 unsigned ds_granularity
= (ds_size
< 9) ? 8 : 1;
252 unsigned gs_granularity
= (gs_size
< 9) ? 8 : 1;
254 /* URB allocations must be done in 8k chunks. */
255 unsigned chunk_size_bytes
= 8192;
257 /* Determine the size of the URB in chunks.
260 unsigned urb_chunks
= brw
->urb
.size
* 1024 / chunk_size_bytes
;
262 /* Reserve space for push constants */
263 unsigned push_constant_bytes
= 1024 * push_size_kB
;
264 unsigned push_constant_chunks
=
265 push_constant_bytes
/ chunk_size_bytes
;
267 /* Initially, assign each stage the minimum amount of URB space it needs,
268 * and make a note of how much additional space it "wants" (the amount of
269 * additional space it could actually make use of).
272 /* VS has a lower limit on the number of URB entries.
274 * From the Broadwell PRM, 3DSTATE_URB_VS instruction:
275 * "When tessellation is enabled, the VS Number of URB Entries must be
276 * greater than or equal to 192."
278 unsigned vs_min_entries
=
279 tess_present
&& brw
->gen
== 8 ? 192 : devinfo
->urb
.min_vs_entries
;
280 /* Min VS Entries isn't a multiple of 8 on Cherryview/Broxton; round up */
281 vs_min_entries
= ALIGN(vs_min_entries
, vs_granularity
);
284 DIV_ROUND_UP(vs_min_entries
* vs_entry_size_bytes
, chunk_size_bytes
);
286 DIV_ROUND_UP(devinfo
->urb
.max_vs_entries
* vs_entry_size_bytes
,
287 chunk_size_bytes
) - vs_chunks
;
289 unsigned gs_chunks
= 0;
290 unsigned gs_wants
= 0;
292 /* There are two constraints on the minimum amount of URB space we can
295 * (1) We need room for at least 2 URB entries, since we always operate
296 * the GS in DUAL_OBJECT mode.
298 * (2) We can't allocate less than nr_gs_entries_granularity.
300 gs_chunks
= DIV_ROUND_UP(MAX2(gs_granularity
, 2) * gs_entry_size_bytes
,
302 gs_wants
= DIV_ROUND_UP(devinfo
->urb
.max_gs_entries
* gs_entry_size_bytes
,
303 chunk_size_bytes
) - gs_chunks
;
306 unsigned hs_chunks
= 0;
307 unsigned hs_wants
= 0;
308 unsigned ds_chunks
= 0;
309 unsigned ds_wants
= 0;
313 DIV_ROUND_UP(hs_granularity
* hs_entry_size_bytes
,
316 DIV_ROUND_UP(devinfo
->urb
.max_tcs_entries
* hs_entry_size_bytes
,
317 chunk_size_bytes
) - hs_chunks
;
320 DIV_ROUND_UP(devinfo
->urb
.min_ds_entries
* ds_entry_size_bytes
,
323 DIV_ROUND_UP(devinfo
->urb
.max_ds_entries
* ds_entry_size_bytes
,
324 chunk_size_bytes
) - ds_chunks
;
327 /* There should always be enough URB space to satisfy the minimum
328 * requirements of each stage.
330 unsigned total_needs
= push_constant_chunks
+
331 vs_chunks
+ hs_chunks
+ ds_chunks
+ gs_chunks
;
332 assert(total_needs
<= urb_chunks
);
334 /* Mete out remaining space (if any) in proportion to "wants". */
335 unsigned total_wants
= vs_wants
+ hs_wants
+ ds_wants
+ gs_wants
;
336 unsigned remaining_space
= urb_chunks
- total_needs
;
337 if (remaining_space
> total_wants
)
338 remaining_space
= total_wants
;
339 if (remaining_space
> 0) {
340 unsigned vs_additional
= (unsigned)
341 roundf(vs_wants
* (((float) remaining_space
) / total_wants
));
342 vs_chunks
+= vs_additional
;
343 remaining_space
-= vs_additional
;
344 total_wants
-= vs_wants
;
346 if (total_wants
> 0) {
347 unsigned hs_additional
= (unsigned)
348 round(hs_wants
* (((double) remaining_space
) / total_wants
));
349 hs_chunks
+= hs_additional
;
350 remaining_space
-= hs_additional
;
351 total_wants
-= hs_wants
;
354 if (total_wants
> 0) {
355 unsigned ds_additional
= (unsigned)
356 round(ds_wants
* (((double) remaining_space
) / total_wants
));
357 ds_chunks
+= ds_additional
;
358 remaining_space
-= ds_additional
;
359 total_wants
-= ds_wants
;
362 gs_chunks
+= remaining_space
;
365 /* Sanity check that we haven't over-allocated. */
366 assert(push_constant_chunks
+
367 vs_chunks
+ hs_chunks
+ ds_chunks
+ gs_chunks
<= urb_chunks
);
369 /* Finally, compute the number of entries that can fit in the space
370 * allocated to each stage.
372 unsigned nr_vs_entries
= vs_chunks
* chunk_size_bytes
/ vs_entry_size_bytes
;
373 unsigned nr_hs_entries
= hs_chunks
* chunk_size_bytes
/ hs_entry_size_bytes
;
374 unsigned nr_ds_entries
= ds_chunks
* chunk_size_bytes
/ ds_entry_size_bytes
;
375 unsigned nr_gs_entries
= gs_chunks
* chunk_size_bytes
/ gs_entry_size_bytes
;
377 /* Since we rounded up when computing *_wants, this may be slightly more
378 * than the maximum allowed amount, so correct for that.
380 nr_vs_entries
= MIN2(nr_vs_entries
, devinfo
->urb
.max_vs_entries
);
381 nr_hs_entries
= MIN2(nr_hs_entries
, devinfo
->urb
.max_tcs_entries
);
382 nr_ds_entries
= MIN2(nr_ds_entries
, devinfo
->urb
.max_ds_entries
);
383 nr_gs_entries
= MIN2(nr_gs_entries
, devinfo
->urb
.max_gs_entries
);
385 /* Ensure that we program a multiple of the granularity. */
386 nr_vs_entries
= ROUND_DOWN_TO(nr_vs_entries
, vs_granularity
);
387 nr_hs_entries
= ROUND_DOWN_TO(nr_hs_entries
, hs_granularity
);
388 nr_ds_entries
= ROUND_DOWN_TO(nr_ds_entries
, ds_granularity
);
389 nr_gs_entries
= ROUND_DOWN_TO(nr_gs_entries
, gs_granularity
);
391 /* Finally, sanity check to make sure we have at least the minimum number
392 * of entries needed for each stage.
394 assert(nr_vs_entries
>= vs_min_entries
);
396 assert(nr_gs_entries
>= 2);
398 assert(nr_hs_entries
>= 1);
399 assert(nr_ds_entries
>= devinfo
->urb
.min_ds_entries
);
402 /* Gen7 doesn't actually use brw->urb.nr_{vs,gs}_entries, but it seems
403 * better to put reasonable data in there rather than leave them
406 brw
->urb
.nr_vs_entries
= nr_vs_entries
;
407 brw
->urb
.nr_hs_entries
= nr_hs_entries
;
408 brw
->urb
.nr_ds_entries
= nr_ds_entries
;
409 brw
->urb
.nr_gs_entries
= nr_gs_entries
;
411 /* Lay out the URB in the following order:
418 brw
->urb
.vs_start
= push_constant_chunks
;
419 brw
->urb
.hs_start
= push_constant_chunks
+ vs_chunks
;
420 brw
->urb
.ds_start
= push_constant_chunks
+ vs_chunks
+ hs_chunks
;
421 brw
->urb
.gs_start
= push_constant_chunks
+ vs_chunks
+ hs_chunks
+
424 if (brw
->gen
== 7 && !brw
->is_haswell
&& !brw
->is_baytrail
)
425 gen7_emit_vs_workaround_flush(brw
);
426 gen7_emit_urb_state(brw
,
427 brw
->urb
.nr_vs_entries
, vs_size
, brw
->urb
.vs_start
,
428 brw
->urb
.nr_hs_entries
, hs_size
, brw
->urb
.hs_start
,
429 brw
->urb
.nr_ds_entries
, ds_size
, brw
->urb
.ds_start
,
430 brw
->urb
.nr_gs_entries
, gs_size
, brw
->urb
.gs_start
);
433 const struct brw_tracked_state gen7_urb
= {
436 .brw
= BRW_NEW_CONTEXT
|
438 BRW_NEW_GEOMETRY_PROGRAM
|
439 BRW_NEW_TESS_PROGRAMS
|
440 BRW_NEW_GS_PROG_DATA
|
441 BRW_NEW_TCS_PROG_DATA
|
442 BRW_NEW_TES_PROG_DATA
|
443 BRW_NEW_VS_PROG_DATA
,