i965/vec4: Only zero out unused message components when there are any.
[mesa.git] / src / mesa / drivers / dri / i965 / gen7_urb.c
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "main/macros.h"
25 #include "intel_batchbuffer.h"
26 #include "brw_context.h"
27 #include "brw_state.h"
28 #include "brw_defines.h"
29
30 /**
31 * The following diagram shows how we partition the URB:
32 *
33 * 16kB or 32kB Rest of the URB space
34 * __________-__________ _________________-_________________
35 * / \ / \
36 * +-------------------------------------------------------------+
37 * | VS/FS/GS Push | VS/GS URB |
38 * | Constants | Entries |
39 * +-------------------------------------------------------------+
40 *
41 * Notably, push constants must be stored at the beginning of the URB
42 * space, while entries can be stored anywhere. Ivybridge and Haswell
43 * GT1/GT2 have a maximum constant buffer size of 16kB, while Haswell GT3
44 * doubles this (32kB).
45 *
46 * Ivybridge and Haswell GT1/GT2 allow push constants to be located (and
47 * sized) in increments of 1kB. Haswell GT3 requires them to be located and
48 * sized in increments of 2kB.
49 *
50 * Currently we split the constant buffer space evenly among whatever stages
51 * are active. This is probably not ideal, but simple.
52 *
53 * Ivybridge GT1 and Haswell GT1 have 128kB of URB space.
54 * Ivybridge GT2 and Haswell GT2 have 256kB of URB space.
55 * Haswell GT3 has 512kB of URB space.
56 *
57 * See "Volume 2a: 3D Pipeline," section 1.8, "Volume 1b: Configurations",
58 * and the documentation for 3DSTATE_PUSH_CONSTANT_ALLOC_xS.
59 */
60 static void
61 gen7_allocate_push_constants(struct brw_context *brw)
62 {
63 unsigned avail_size = 16;
64 unsigned multiplier = (brw->is_haswell && brw->gt == 3) ? 2 : 1;
65
66 /* BRW_NEW_GEOMETRY_PROGRAM */
67 bool gs_present = brw->geometry_program;
68
69 unsigned vs_size, gs_size;
70 if (gs_present) {
71 vs_size = avail_size / 3;
72 avail_size -= vs_size;
73 gs_size = avail_size / 2;
74 avail_size -= gs_size;
75 } else {
76 vs_size = avail_size / 2;
77 avail_size -= vs_size;
78 gs_size = 0;
79 }
80 unsigned fs_size = avail_size;
81
82 gen7_emit_push_constant_state(brw, multiplier * vs_size,
83 multiplier * gs_size, multiplier * fs_size);
84
85 /* From p115 of the Ivy Bridge PRM (3.2.1.4 3DSTATE_PUSH_CONSTANT_ALLOC_VS):
86 *
87 * Programming Restriction:
88 *
89 * The 3DSTATE_CONSTANT_VS must be reprogrammed prior to the next
90 * 3DPRIMITIVE command after programming the
91 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS.
92 *
93 * Similar text exists for the other 3DSTATE_PUSH_CONSTANT_ALLOC_*
94 * commands.
95 */
96 brw->state.dirty.brw |= BRW_NEW_PUSH_CONSTANT_ALLOCATION;
97 }
98
99 void
100 gen7_emit_push_constant_state(struct brw_context *brw, unsigned vs_size,
101 unsigned gs_size, unsigned fs_size)
102 {
103 unsigned offset = 0;
104
105 BEGIN_BATCH(6);
106 OUT_BATCH(_3DSTATE_PUSH_CONSTANT_ALLOC_VS << 16 | (2 - 2));
107 OUT_BATCH(vs_size | offset << GEN7_PUSH_CONSTANT_BUFFER_OFFSET_SHIFT);
108 offset += vs_size;
109
110 OUT_BATCH(_3DSTATE_PUSH_CONSTANT_ALLOC_GS << 16 | (2 - 2));
111 OUT_BATCH(gs_size | offset << GEN7_PUSH_CONSTANT_BUFFER_OFFSET_SHIFT);
112 offset += gs_size;
113
114 OUT_BATCH(_3DSTATE_PUSH_CONSTANT_ALLOC_PS << 16 | (2 - 2));
115 OUT_BATCH(offset | fs_size << GEN7_PUSH_CONSTANT_BUFFER_OFFSET_SHIFT);
116 ADVANCE_BATCH();
117
118 /* From p292 of the Ivy Bridge PRM (11.2.4 3DSTATE_PUSH_CONSTANT_ALLOC_PS):
119 *
120 * A PIPE_CONTOL command with the CS Stall bit set must be programmed
121 * in the ring after this instruction.
122 *
123 * No such restriction exists for Haswell.
124 */
125 if (!brw->is_haswell) {
126 BEGIN_BATCH(4);
127 OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
128 /* From p61 of the Ivy Bridge PRM (1.10.4 PIPE_CONTROL Command: DW1[20]
129 * CS Stall):
130 *
131 * One of the following must also be set:
132 * - Render Target Cache Flush Enable ([12] of DW1)
133 * - Depth Cache Flush Enable ([0] of DW1)
134 * - Stall at Pixel Scoreboard ([1] of DW1)
135 * - Depth Stall ([13] of DW1)
136 * - Post-Sync Operation ([13] of DW1)
137 *
138 * We choose to do a Post-Sync Operation (Write Immediate Data), since
139 * it seems like it will incur the least additional performance penalty.
140 */
141 OUT_BATCH(PIPE_CONTROL_CS_STALL | PIPE_CONTROL_WRITE_IMMEDIATE);
142 OUT_RELOC(brw->batch.workaround_bo,
143 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
144 OUT_BATCH(0);
145 ADVANCE_BATCH();
146 }
147 }
148
149 const struct brw_tracked_state gen7_push_constant_space = {
150 .dirty = {
151 .mesa = 0,
152 .brw = BRW_NEW_CONTEXT | BRW_NEW_GEOMETRY_PROGRAM,
153 .cache = 0,
154 },
155 .emit = gen7_allocate_push_constants,
156 };
157
158 static void
159 gen7_upload_urb(struct brw_context *brw)
160 {
161 const int push_size_kB = brw->is_haswell && brw->gt == 3 ? 32 : 16;
162
163 /* CACHE_NEW_VS_PROG */
164 unsigned vs_size = MAX2(brw->vs.prog_data->base.urb_entry_size, 1);
165 unsigned vs_entry_size_bytes = vs_size * 64;
166 /* BRW_NEW_GEOMETRY_PROGRAM, CACHE_NEW_GS_PROG */
167 bool gs_present = brw->geometry_program;
168 unsigned gs_size = gs_present ? brw->gs.prog_data->base.urb_entry_size : 1;
169 unsigned gs_entry_size_bytes = gs_size * 64;
170
171 /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
172 *
173 * VS Number of URB Entries must be divisible by 8 if the VS URB Entry
174 * Allocation Size is less than 9 512-bit URB entries.
175 *
176 * Similar text exists for GS.
177 */
178 unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
179 unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
180
181 /* URB allocations must be done in 8k chunks. */
182 unsigned chunk_size_bytes = 8192;
183
184 /* Determine the size of the URB in chunks.
185 */
186 unsigned urb_chunks = brw->urb.size * 1024 / chunk_size_bytes;
187
188 /* Reserve space for push constants */
189 unsigned push_constant_bytes = 1024 * push_size_kB;
190 unsigned push_constant_chunks =
191 push_constant_bytes / chunk_size_bytes;
192
193 /* Initially, assign each stage the minimum amount of URB space it needs,
194 * and make a note of how much additional space it "wants" (the amount of
195 * additional space it could actually make use of).
196 */
197
198 /* VS has a lower limit on the number of URB entries */
199 unsigned vs_chunks =
200 ALIGN(brw->urb.min_vs_entries * vs_entry_size_bytes, chunk_size_bytes) /
201 chunk_size_bytes;
202 unsigned vs_wants =
203 ALIGN(brw->urb.max_vs_entries * vs_entry_size_bytes,
204 chunk_size_bytes) / chunk_size_bytes - vs_chunks;
205
206 unsigned gs_chunks = 0;
207 unsigned gs_wants = 0;
208 if (gs_present) {
209 /* There are two constraints on the minimum amount of URB space we can
210 * allocate:
211 *
212 * (1) We need room for at least 2 URB entries, since we always operate
213 * the GS in DUAL_OBJECT mode.
214 *
215 * (2) We can't allocate less than nr_gs_entries_granularity.
216 */
217 gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
218 chunk_size_bytes) / chunk_size_bytes;
219 gs_wants =
220 ALIGN(brw->urb.max_gs_entries * gs_entry_size_bytes,
221 chunk_size_bytes) / chunk_size_bytes - gs_chunks;
222 }
223
224 /* There should always be enough URB space to satisfy the minimum
225 * requirements of each stage.
226 */
227 unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
228 assert(total_needs <= urb_chunks);
229
230 /* Mete out remaining space (if any) in proportion to "wants". */
231 unsigned total_wants = vs_wants + gs_wants;
232 unsigned remaining_space = urb_chunks - total_needs;
233 if (remaining_space > total_wants)
234 remaining_space = total_wants;
235 if (remaining_space > 0) {
236 unsigned vs_additional = (unsigned)
237 round(vs_wants * (((double) remaining_space) / total_wants));
238 vs_chunks += vs_additional;
239 remaining_space -= vs_additional;
240 gs_chunks += remaining_space;
241 }
242
243 /* Sanity check that we haven't over-allocated. */
244 assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
245
246 /* Finally, compute the number of entries that can fit in the space
247 * allocated to each stage.
248 */
249 unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
250 unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
251
252 /* Since we rounded up when computing *_wants, this may be slightly more
253 * than the maximum allowed amount, so correct for that.
254 */
255 nr_vs_entries = MIN2(nr_vs_entries, brw->urb.max_vs_entries);
256 nr_gs_entries = MIN2(nr_gs_entries, brw->urb.max_gs_entries);
257
258 /* Ensure that we program a multiple of the granularity. */
259 nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
260 nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
261
262 /* Finally, sanity check to make sure we have at least the minimum number
263 * of entries needed for each stage.
264 */
265 assert(nr_vs_entries >= brw->urb.min_vs_entries);
266 if (gs_present)
267 assert(nr_gs_entries >= 2);
268
269 /* Gen7 doesn't actually use brw->urb.nr_{vs,gs}_entries, but it seems
270 * better to put reasonable data in there rather than leave them
271 * uninitialized.
272 */
273 brw->urb.nr_vs_entries = nr_vs_entries;
274 brw->urb.nr_gs_entries = nr_gs_entries;
275
276 /* Lay out the URB in the following order:
277 * - push constants
278 * - VS
279 * - GS
280 */
281 brw->urb.vs_start = push_constant_chunks;
282 brw->urb.gs_start = push_constant_chunks + vs_chunks;
283
284 gen7_emit_vs_workaround_flush(brw);
285 gen7_emit_urb_state(brw,
286 brw->urb.nr_vs_entries, vs_size, brw->urb.vs_start,
287 brw->urb.nr_gs_entries, gs_size, brw->urb.gs_start);
288 }
289
290 void
291 gen7_emit_urb_state(struct brw_context *brw,
292 unsigned nr_vs_entries, unsigned vs_size,
293 unsigned vs_start, unsigned nr_gs_entries,
294 unsigned gs_size, unsigned gs_start)
295 {
296 BEGIN_BATCH(8);
297 OUT_BATCH(_3DSTATE_URB_VS << 16 | (2 - 2));
298 OUT_BATCH(nr_vs_entries |
299 ((vs_size - 1) << GEN7_URB_ENTRY_SIZE_SHIFT) |
300 (vs_start << GEN7_URB_STARTING_ADDRESS_SHIFT));
301
302 OUT_BATCH(_3DSTATE_URB_GS << 16 | (2 - 2));
303 OUT_BATCH(nr_gs_entries |
304 ((gs_size - 1) << GEN7_URB_ENTRY_SIZE_SHIFT) |
305 (gs_start << GEN7_URB_STARTING_ADDRESS_SHIFT));
306
307 /* Allocate the HS and DS zero space - we don't use them. */
308 OUT_BATCH(_3DSTATE_URB_HS << 16 | (2 - 2));
309 OUT_BATCH((0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
310 (vs_start << GEN7_URB_STARTING_ADDRESS_SHIFT));
311
312 OUT_BATCH(_3DSTATE_URB_DS << 16 | (2 - 2));
313 OUT_BATCH((0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
314 (vs_start << GEN7_URB_STARTING_ADDRESS_SHIFT));
315 ADVANCE_BATCH();
316 }
317
318 const struct brw_tracked_state gen7_urb = {
319 .dirty = {
320 .mesa = 0,
321 .brw = BRW_NEW_CONTEXT | BRW_NEW_GEOMETRY_PROGRAM,
322 .cache = (CACHE_NEW_VS_PROG | CACHE_NEW_GS_PROG),
323 },
324 .emit = gen7_upload_urb,
325 };