Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / gallium / drivers / nouveau / nv50 / nv50_shader_state.c
1 /*
2 * Copyright 2008 Ben Skeggs
3 * Copyright 2010 Christoph Bumiller
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "pipe/p_context.h"
25 #include "pipe/p_defines.h"
26 #include "pipe/p_state.h"
27 #include "util/u_inlines.h"
28
29 #include "nv50/nv50_context.h"
30 #include "nv50/nv50_query_hw.h"
31
32 void
33 nv50_constbufs_validate(struct nv50_context *nv50)
34 {
35 struct nouveau_pushbuf *push = nv50->base.pushbuf;
36 unsigned s;
37
38 for (s = 0; s < 3; ++s) {
39 unsigned p;
40
41 if (s == PIPE_SHADER_FRAGMENT)
42 p = NV50_3D_SET_PROGRAM_CB_PROGRAM_FRAGMENT;
43 else
44 if (s == PIPE_SHADER_GEOMETRY)
45 p = NV50_3D_SET_PROGRAM_CB_PROGRAM_GEOMETRY;
46 else
47 p = NV50_3D_SET_PROGRAM_CB_PROGRAM_VERTEX;
48
49 while (nv50->constbuf_dirty[s]) {
50 const unsigned i = (unsigned)ffs(nv50->constbuf_dirty[s]) - 1;
51
52 assert(i < NV50_MAX_PIPE_CONSTBUFS);
53 nv50->constbuf_dirty[s] &= ~(1 << i);
54
55 if (nv50->constbuf[s][i].user) {
56 const unsigned b = NV50_CB_PVP + s;
57 unsigned start = 0;
58 unsigned words = nv50->constbuf[s][0].size / 4;
59 if (i) {
60 NOUVEAU_ERR("user constbufs only supported in slot 0\n");
61 continue;
62 }
63 if (!nv50->state.uniform_buffer_bound[s]) {
64 nv50->state.uniform_buffer_bound[s] = true;
65 BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
66 PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
67 }
68 while (words) {
69 unsigned nr = MIN2(words, NV04_PFIFO_MAX_PACKET_LEN);
70
71 PUSH_SPACE(push, nr + 3);
72 BEGIN_NV04(push, NV50_3D(CB_ADDR), 1);
73 PUSH_DATA (push, (start << 8) | b);
74 BEGIN_NI04(push, NV50_3D(CB_DATA(0)), nr);
75 PUSH_DATAp(push, &nv50->constbuf[s][0].u.data[start * 4], nr);
76
77 start += nr;
78 words -= nr;
79 }
80 } else {
81 struct nv04_resource *res =
82 nv04_resource(nv50->constbuf[s][i].u.buf);
83 if (res) {
84 /* TODO: allocate persistent bindings */
85 const unsigned b = s * 16 + i;
86
87 assert(nouveau_resource_mapped_by_gpu(&res->base));
88
89 BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3);
90 PUSH_DATAh(push, res->address + nv50->constbuf[s][i].offset);
91 PUSH_DATA (push, res->address + nv50->constbuf[s][i].offset);
92 PUSH_DATA (push, (b << 16) |
93 (nv50->constbuf[s][i].size & 0xffff));
94 BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
95 PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
96
97 BCTX_REFN(nv50->bufctx_3d, CB(s, i), res, RD);
98
99 nv50->cb_dirty = 1; /* Force cache flush for UBO. */
100 } else {
101 BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
102 PUSH_DATA (push, (i << 8) | p | 0);
103 }
104 if (i == 0)
105 nv50->state.uniform_buffer_bound[s] = false;
106 }
107 }
108 }
109 }
110
111 static bool
112 nv50_program_validate(struct nv50_context *nv50, struct nv50_program *prog)
113 {
114 if (!prog->translated) {
115 prog->translated = nv50_program_translate(
116 prog, nv50->screen->base.device->chipset);
117 if (!prog->translated)
118 return false;
119 } else
120 if (prog->mem)
121 return true;
122
123 return nv50_program_upload_code(nv50, prog);
124 }
125
126 static inline void
127 nv50_program_update_context_state(struct nv50_context *nv50,
128 struct nv50_program *prog, int stage)
129 {
130 const unsigned flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR;
131
132 if (prog && prog->tls_space) {
133 if (nv50->state.new_tls_space)
134 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_TLS);
135 if (!nv50->state.tls_required || nv50->state.new_tls_space)
136 BCTX_REFN_bo(nv50->bufctx_3d, TLS, flags, nv50->screen->tls_bo);
137 nv50->state.new_tls_space = false;
138 nv50->state.tls_required |= 1 << stage;
139 } else {
140 if (nv50->state.tls_required == (1 << stage))
141 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_TLS);
142 nv50->state.tls_required &= ~(1 << stage);
143 }
144 }
145
146 void
147 nv50_vertprog_validate(struct nv50_context *nv50)
148 {
149 struct nouveau_pushbuf *push = nv50->base.pushbuf;
150 struct nv50_program *vp = nv50->vertprog;
151
152 if (!nv50_program_validate(nv50, vp))
153 return;
154 nv50_program_update_context_state(nv50, vp, 0);
155
156 BEGIN_NV04(push, NV50_3D(VP_ATTR_EN(0)), 2);
157 PUSH_DATA (push, vp->vp.attrs[0]);
158 PUSH_DATA (push, vp->vp.attrs[1]);
159 BEGIN_NV04(push, NV50_3D(VP_REG_ALLOC_RESULT), 1);
160 PUSH_DATA (push, vp->max_out);
161 BEGIN_NV04(push, NV50_3D(VP_REG_ALLOC_TEMP), 1);
162 PUSH_DATA (push, vp->max_gpr);
163 BEGIN_NV04(push, NV50_3D(VP_START_ID), 1);
164 PUSH_DATA (push, vp->code_base);
165 }
166
167 void
168 nv50_fragprog_validate(struct nv50_context *nv50)
169 {
170 struct nouveau_pushbuf *push = nv50->base.pushbuf;
171 struct nv50_program *fp = nv50->fragprog;
172 struct pipe_rasterizer_state *rast = &nv50->rast->pipe;
173
174 if (fp->fp.force_persample_interp != rast->force_persample_interp) {
175 /* Force the program to be reuploaded, which will trigger interp fixups
176 * to get applied
177 */
178 if (fp->mem)
179 nouveau_heap_free(&fp->mem);
180
181 fp->fp.force_persample_interp = rast->force_persample_interp;
182 }
183
184 if (fp->mem && !(nv50->dirty & (NV50_NEW_FRAGPROG | NV50_NEW_MIN_SAMPLES)))
185 return;
186
187 if (!nv50_program_validate(nv50, fp))
188 return;
189 nv50_program_update_context_state(nv50, fp, 1);
190
191 BEGIN_NV04(push, NV50_3D(FP_REG_ALLOC_TEMP), 1);
192 PUSH_DATA (push, fp->max_gpr);
193 BEGIN_NV04(push, NV50_3D(FP_RESULT_COUNT), 1);
194 PUSH_DATA (push, fp->max_out);
195 BEGIN_NV04(push, NV50_3D(FP_CONTROL), 1);
196 PUSH_DATA (push, fp->fp.flags[0]);
197 BEGIN_NV04(push, NV50_3D(FP_CTRL_UNK196C), 1);
198 PUSH_DATA (push, fp->fp.flags[1]);
199 BEGIN_NV04(push, NV50_3D(FP_START_ID), 1);
200 PUSH_DATA (push, fp->code_base);
201
202 if (nv50->screen->tesla->oclass >= NVA3_3D_CLASS) {
203 BEGIN_NV04(push, SUBC_3D(NVA3_3D_FP_MULTISAMPLE), 1);
204 if (nv50->min_samples > 1 || fp->fp.has_samplemask)
205 PUSH_DATA(push,
206 NVA3_3D_FP_MULTISAMPLE_FORCE_PER_SAMPLE |
207 (NVA3_3D_FP_MULTISAMPLE_EXPORT_SAMPLE_MASK *
208 fp->fp.has_samplemask));
209 else
210 PUSH_DATA(push, 0);
211 }
212 }
213
214 void
215 nv50_gmtyprog_validate(struct nv50_context *nv50)
216 {
217 struct nouveau_pushbuf *push = nv50->base.pushbuf;
218 struct nv50_program *gp = nv50->gmtyprog;
219
220 if (gp) {
221 if (!nv50_program_validate(nv50, gp))
222 return;
223 BEGIN_NV04(push, NV50_3D(GP_REG_ALLOC_TEMP), 1);
224 PUSH_DATA (push, gp->max_gpr);
225 BEGIN_NV04(push, NV50_3D(GP_REG_ALLOC_RESULT), 1);
226 PUSH_DATA (push, gp->max_out);
227 BEGIN_NV04(push, NV50_3D(GP_OUTPUT_PRIMITIVE_TYPE), 1);
228 PUSH_DATA (push, gp->gp.prim_type);
229 BEGIN_NV04(push, NV50_3D(GP_VERTEX_OUTPUT_COUNT), 1);
230 PUSH_DATA (push, gp->gp.vert_count);
231 BEGIN_NV04(push, NV50_3D(GP_START_ID), 1);
232 PUSH_DATA (push, gp->code_base);
233
234 nv50->state.prim_size = gp->gp.prim_type; /* enum matches vertex count */
235 }
236 nv50_program_update_context_state(nv50, gp, 2);
237
238 /* GP_ENABLE is updated in linkage validation */
239 }
240
241 static void
242 nv50_sprite_coords_validate(struct nv50_context *nv50)
243 {
244 struct nouveau_pushbuf *push = nv50->base.pushbuf;
245 uint32_t pntc[8], mode;
246 struct nv50_program *fp = nv50->fragprog;
247 unsigned i, c;
248 unsigned m = (nv50->state.interpolant_ctrl >> 8) & 0xff;
249
250 if (!nv50->rast->pipe.point_quad_rasterization) {
251 if (nv50->state.point_sprite) {
252 BEGIN_NV04(push, NV50_3D(POINT_COORD_REPLACE_MAP(0)), 8);
253 for (i = 0; i < 8; ++i)
254 PUSH_DATA(push, 0);
255
256 nv50->state.point_sprite = false;
257 }
258 return;
259 } else {
260 nv50->state.point_sprite = true;
261 }
262
263 memset(pntc, 0, sizeof(pntc));
264
265 for (i = 0; i < fp->in_nr; i++) {
266 unsigned n = util_bitcount(fp->in[i].mask);
267
268 if (fp->in[i].sn != TGSI_SEMANTIC_GENERIC) {
269 m += n;
270 continue;
271 }
272 if (!(nv50->rast->pipe.sprite_coord_enable & (1 << fp->in[i].si))) {
273 m += n;
274 continue;
275 }
276
277 for (c = 0; c < 4; ++c) {
278 if (fp->in[i].mask & (1 << c)) {
279 pntc[m / 8] |= (c + 1) << ((m % 8) * 4);
280 ++m;
281 }
282 }
283 }
284
285 if (nv50->rast->pipe.sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT)
286 mode = 0x00;
287 else
288 mode = 0x10;
289
290 BEGIN_NV04(push, NV50_3D(POINT_SPRITE_CTRL), 1);
291 PUSH_DATA (push, mode);
292
293 BEGIN_NV04(push, NV50_3D(POINT_COORD_REPLACE_MAP(0)), 8);
294 PUSH_DATAp(push, pntc, 8);
295 }
296
297 /* Validate state derived from shaders and the rasterizer cso. */
298 void
299 nv50_validate_derived_rs(struct nv50_context *nv50)
300 {
301 struct nouveau_pushbuf *push = nv50->base.pushbuf;
302 uint32_t color, psize;
303
304 nv50_sprite_coords_validate(nv50);
305
306 if (nv50->state.rasterizer_discard != nv50->rast->pipe.rasterizer_discard) {
307 nv50->state.rasterizer_discard = nv50->rast->pipe.rasterizer_discard;
308 BEGIN_NV04(push, NV50_3D(RASTERIZE_ENABLE), 1);
309 PUSH_DATA (push, !nv50->rast->pipe.rasterizer_discard);
310 }
311
312 if (nv50->dirty & NV50_NEW_FRAGPROG)
313 return;
314 psize = nv50->state.semantic_psize & ~NV50_3D_SEMANTIC_PTSZ_PTSZ_EN__MASK;
315 color = nv50->state.semantic_color & ~NV50_3D_SEMANTIC_COLOR_CLMP_EN;
316
317 if (nv50->rast->pipe.clamp_vertex_color)
318 color |= NV50_3D_SEMANTIC_COLOR_CLMP_EN;
319
320 if (color != nv50->state.semantic_color) {
321 nv50->state.semantic_color = color;
322 BEGIN_NV04(push, NV50_3D(SEMANTIC_COLOR), 1);
323 PUSH_DATA (push, color);
324 }
325
326 if (nv50->rast->pipe.point_size_per_vertex)
327 psize |= NV50_3D_SEMANTIC_PTSZ_PTSZ_EN__MASK;
328
329 if (psize != nv50->state.semantic_psize) {
330 nv50->state.semantic_psize = psize;
331 BEGIN_NV04(push, NV50_3D(SEMANTIC_PTSZ), 1);
332 PUSH_DATA (push, psize);
333 }
334 }
335
336 static int
337 nv50_vec4_map(uint8_t *map, int mid, uint32_t lin[4],
338 struct nv50_varying *in, struct nv50_varying *out)
339 {
340 int c;
341 uint8_t mv = out->mask, mf = in->mask, oid = out->hw;
342
343 for (c = 0; c < 4; ++c) {
344 if (mf & 1) {
345 if (in->linear)
346 lin[mid / 32] |= 1 << (mid % 32);
347 if (mv & 1)
348 map[mid] = oid;
349 else
350 if (c == 3)
351 map[mid] |= 1;
352 ++mid;
353 }
354
355 oid += mv & 1;
356 mf >>= 1;
357 mv >>= 1;
358 }
359
360 return mid;
361 }
362
363 void
364 nv50_fp_linkage_validate(struct nv50_context *nv50)
365 {
366 struct nouveau_pushbuf *push = nv50->base.pushbuf;
367 struct nv50_program *vp = nv50->gmtyprog ? nv50->gmtyprog : nv50->vertprog;
368 struct nv50_program *fp = nv50->fragprog;
369 struct nv50_varying dummy;
370 int i, n, c, m;
371 uint32_t primid = 0;
372 uint32_t layerid = 0;
373 uint32_t viewportid = 0;
374 uint32_t psiz = 0x000;
375 uint32_t interp = fp->fp.interp;
376 uint32_t colors = fp->fp.colors;
377 uint32_t lin[4];
378 uint8_t map[64];
379 uint8_t so_map[64];
380
381 if (!(nv50->dirty & (NV50_NEW_VERTPROG |
382 NV50_NEW_FRAGPROG |
383 NV50_NEW_GMTYPROG))) {
384 uint8_t bfc, ffc;
385 ffc = (nv50->state.semantic_color & NV50_3D_SEMANTIC_COLOR_FFC0_ID__MASK);
386 bfc = (nv50->state.semantic_color & NV50_3D_SEMANTIC_COLOR_BFC0_ID__MASK)
387 >> 8;
388 if (nv50->rast->pipe.light_twoside == ((ffc == bfc) ? 0 : 1))
389 return;
390 }
391
392 memset(lin, 0x00, sizeof(lin));
393
394 /* XXX: in buggy-endian mode, is the first element of map (u32)0x000000xx
395 * or is it the first byte ?
396 */
397 memset(map, nv50->gmtyprog ? 0x80 : 0x40, sizeof(map));
398
399 dummy.mask = 0xf; /* map all components of HPOS */
400 dummy.linear = 0;
401 m = nv50_vec4_map(map, 0, lin, &dummy, &vp->out[0]);
402
403 for (c = 0; c < vp->vp.clpd_nr; ++c)
404 map[m++] = vp->vp.clpd[c / 4] + (c % 4);
405
406 colors |= m << 8; /* adjust BFC0 id */
407
408 dummy.mask = 0x0;
409
410 /* if light_twoside is active, FFC0_ID == BFC0_ID is invalid */
411 if (nv50->rast->pipe.light_twoside) {
412 for (i = 0; i < 2; ++i) {
413 n = vp->vp.bfc[i];
414 if (fp->vp.bfc[i] >= fp->in_nr)
415 continue;
416 m = nv50_vec4_map(map, m, lin, &fp->in[fp->vp.bfc[i]],
417 (n < vp->out_nr) ? &vp->out[n] : &dummy);
418 }
419 }
420 colors += m - 4; /* adjust FFC0 id */
421 interp |= m << 8; /* set map id where 'normal' FP inputs start */
422
423 for (i = 0; i < fp->in_nr; ++i) {
424 for (n = 0; n < vp->out_nr; ++n)
425 if (vp->out[n].sn == fp->in[i].sn &&
426 vp->out[n].si == fp->in[i].si)
427 break;
428 switch (fp->in[i].sn) {
429 case TGSI_SEMANTIC_PRIMID:
430 primid = m;
431 break;
432 case TGSI_SEMANTIC_LAYER:
433 layerid = m;
434 break;
435 case TGSI_SEMANTIC_VIEWPORT_INDEX:
436 viewportid = m;
437 break;
438 }
439 m = nv50_vec4_map(map, m, lin,
440 &fp->in[i], (n < vp->out_nr) ? &vp->out[n] : &dummy);
441 }
442
443 if (vp->gp.has_layer && !layerid) {
444 layerid = m;
445 map[m++] = vp->gp.layerid;
446 }
447
448 if (vp->gp.has_viewport && !viewportid) {
449 viewportid = m;
450 map[m++] = vp->gp.viewportid;
451 }
452
453 if (nv50->rast->pipe.point_size_per_vertex) {
454 psiz = (m << 4) | 1;
455 map[m++] = vp->vp.psiz;
456 }
457
458 if (nv50->rast->pipe.clamp_vertex_color)
459 colors |= NV50_3D_SEMANTIC_COLOR_CLMP_EN;
460
461 if (unlikely(vp->so)) {
462 /* Slot i in STRMOUT_MAP specifies the offset where slot i in RESULT_MAP
463 * gets written.
464 *
465 * TODO:
466 * Inverting vp->so->map (output -> offset) would probably speed this up.
467 */
468 memset(so_map, 0, sizeof(so_map));
469 for (i = 0; i < vp->so->map_size; ++i) {
470 if (vp->so->map[i] == 0xff)
471 continue;
472 for (c = 0; c < m; ++c)
473 if (map[c] == vp->so->map[i] && !so_map[c])
474 break;
475 if (c == m) {
476 c = m;
477 map[m++] = vp->so->map[i];
478 }
479 so_map[c] = 0x80 | i;
480 }
481 for (c = m; c & 3; ++c)
482 so_map[c] = 0;
483 }
484
485 n = (m + 3) / 4;
486 assert(m <= 64);
487
488 if (unlikely(nv50->gmtyprog)) {
489 BEGIN_NV04(push, NV50_3D(GP_RESULT_MAP_SIZE), 1);
490 PUSH_DATA (push, m);
491 BEGIN_NV04(push, NV50_3D(GP_RESULT_MAP(0)), n);
492 PUSH_DATAp(push, map, n);
493 } else {
494 BEGIN_NV04(push, NV50_3D(VP_GP_BUILTIN_ATTR_EN), 1);
495 PUSH_DATA (push, vp->vp.attrs[2] | fp->vp.attrs[2]);
496
497 BEGIN_NV04(push, NV50_3D(SEMANTIC_PRIM_ID), 1);
498 PUSH_DATA (push, primid);
499
500 assert(m > 0);
501 BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP_SIZE), 1);
502 PUSH_DATA (push, m);
503 BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP(0)), n);
504 PUSH_DATAp(push, map, n);
505 }
506
507 BEGIN_NV04(push, NV50_3D(GP_VIEWPORT_ID_ENABLE), 5);
508 PUSH_DATA (push, vp->gp.has_viewport);
509 PUSH_DATA (push, colors);
510 PUSH_DATA (push, (vp->vp.clpd_nr << 8) | 4);
511 PUSH_DATA (push, layerid);
512 PUSH_DATA (push, psiz);
513
514 BEGIN_NV04(push, NV50_3D(SEMANTIC_VIEWPORT), 1);
515 PUSH_DATA (push, viewportid);
516
517 BEGIN_NV04(push, NV50_3D(LAYER), 1);
518 PUSH_DATA (push, vp->gp.has_layer << 16);
519
520 BEGIN_NV04(push, NV50_3D(FP_INTERPOLANT_CTRL), 1);
521 PUSH_DATA (push, interp);
522
523 nv50->state.interpolant_ctrl = interp;
524
525 nv50->state.semantic_color = colors;
526 nv50->state.semantic_psize = psiz;
527
528 BEGIN_NV04(push, NV50_3D(NOPERSPECTIVE_BITMAP(0)), 4);
529 PUSH_DATAp(push, lin, 4);
530
531 BEGIN_NV04(push, NV50_3D(GP_ENABLE), 1);
532 PUSH_DATA (push, nv50->gmtyprog ? 1 : 0);
533
534 if (vp->so) {
535 BEGIN_NV04(push, NV50_3D(STRMOUT_MAP(0)), n);
536 PUSH_DATAp(push, so_map, n);
537 }
538 }
539
540 static int
541 nv50_vp_gp_mapping(uint8_t *map, int m,
542 struct nv50_program *vp, struct nv50_program *gp)
543 {
544 int i, j, c;
545
546 for (i = 0; i < gp->in_nr; ++i) {
547 uint8_t oid = 0, mv = 0, mg = gp->in[i].mask;
548
549 for (j = 0; j < vp->out_nr; ++j) {
550 if (vp->out[j].sn == gp->in[i].sn &&
551 vp->out[j].si == gp->in[i].si) {
552 mv = vp->out[j].mask;
553 oid = vp->out[j].hw;
554 break;
555 }
556 }
557
558 for (c = 0; c < 4; ++c, mv >>= 1, mg >>= 1) {
559 if (mg & mv & 1)
560 map[m++] = oid;
561 else
562 if (mg & 1)
563 map[m++] = (c == 3) ? 0x41 : 0x40;
564 oid += mv & 1;
565 }
566 }
567 if (!m)
568 map[m++] = 0;
569 return m;
570 }
571
572 void
573 nv50_gp_linkage_validate(struct nv50_context *nv50)
574 {
575 struct nouveau_pushbuf *push = nv50->base.pushbuf;
576 struct nv50_program *vp = nv50->vertprog;
577 struct nv50_program *gp = nv50->gmtyprog;
578 int m = 0;
579 int n;
580 uint8_t map[64];
581
582 if (!gp)
583 return;
584 memset(map, 0, sizeof(map));
585
586 m = nv50_vp_gp_mapping(map, m, vp, gp);
587
588 n = (m + 3) / 4;
589
590 BEGIN_NV04(push, NV50_3D(VP_GP_BUILTIN_ATTR_EN), 1);
591 PUSH_DATA (push, vp->vp.attrs[2] | gp->vp.attrs[2]);
592
593 assert(m > 0);
594 BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP_SIZE), 1);
595 PUSH_DATA (push, m);
596 BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP(0)), n);
597 PUSH_DATAp(push, map, n);
598 }
599
600 void
601 nv50_stream_output_validate(struct nv50_context *nv50)
602 {
603 struct nouveau_pushbuf *push = nv50->base.pushbuf;
604 struct nv50_stream_output_state *so;
605 uint32_t ctrl;
606 unsigned i;
607 unsigned prims = ~0;
608
609 so = nv50->gmtyprog ? nv50->gmtyprog->so : nv50->vertprog->so;
610
611 BEGIN_NV04(push, NV50_3D(STRMOUT_ENABLE), 1);
612 PUSH_DATA (push, 0);
613 if (!so || !nv50->num_so_targets) {
614 if (nv50->screen->base.class_3d < NVA0_3D_CLASS) {
615 BEGIN_NV04(push, NV50_3D(STRMOUT_PRIMITIVE_LIMIT), 1);
616 PUSH_DATA (push, 0);
617 }
618 BEGIN_NV04(push, NV50_3D(STRMOUT_PARAMS_LATCH), 1);
619 PUSH_DATA (push, 1);
620 return;
621 }
622
623 /* previous TFB needs to complete */
624 if (nv50->screen->base.class_3d < NVA0_3D_CLASS) {
625 BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
626 PUSH_DATA (push, 0);
627 }
628
629 ctrl = so->ctrl;
630 if (nv50->screen->base.class_3d >= NVA0_3D_CLASS)
631 ctrl |= NVA0_3D_STRMOUT_BUFFERS_CTRL_LIMIT_MODE_OFFSET;
632
633 BEGIN_NV04(push, NV50_3D(STRMOUT_BUFFERS_CTRL), 1);
634 PUSH_DATA (push, ctrl);
635
636 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_SO);
637
638 for (i = 0; i < nv50->num_so_targets; ++i) {
639 struct nv50_so_target *targ = nv50_so_target(nv50->so_target[i]);
640 struct nv04_resource *buf = nv04_resource(targ->pipe.buffer);
641
642 const unsigned n = nv50->screen->base.class_3d >= NVA0_3D_CLASS ? 4 : 3;
643
644 if (n == 4 && !targ->clean)
645 nv84_hw_query_fifo_wait(push, nv50_query(targ->pq));
646 BEGIN_NV04(push, NV50_3D(STRMOUT_ADDRESS_HIGH(i)), n);
647 PUSH_DATAh(push, buf->address + targ->pipe.buffer_offset);
648 PUSH_DATA (push, buf->address + targ->pipe.buffer_offset);
649 PUSH_DATA (push, so->num_attribs[i]);
650 if (n == 4) {
651 PUSH_DATA(push, targ->pipe.buffer_size);
652 if (!targ->clean) {
653 assert(targ->pq);
654 nv50_hw_query_pushbuf_submit(push, NVA0_3D_STRMOUT_OFFSET(i),
655 nv50_query(targ->pq), 0x4);
656 } else {
657 BEGIN_NV04(push, NVA0_3D(STRMOUT_OFFSET(i)), 1);
658 PUSH_DATA(push, 0);
659 targ->clean = false;
660 }
661 } else {
662 const unsigned limit = targ->pipe.buffer_size /
663 (so->stride[i] * nv50->state.prim_size);
664 prims = MIN2(prims, limit);
665 }
666 targ->stride = so->stride[i];
667 BCTX_REFN(nv50->bufctx_3d, SO, buf, WR);
668 }
669 if (prims != ~0) {
670 BEGIN_NV04(push, NV50_3D(STRMOUT_PRIMITIVE_LIMIT), 1);
671 PUSH_DATA (push, prims);
672 }
673 BEGIN_NV04(push, NV50_3D(STRMOUT_PARAMS_LATCH), 1);
674 PUSH_DATA (push, 1);
675 BEGIN_NV04(push, NV50_3D(STRMOUT_ENABLE), 1);
676 PUSH_DATA (push, 1);
677 }