nouveau: use bool instead of boolean
[mesa.git] / src / gallium / drivers / nouveau / nv50 / nv50_shader_state.c
1 /*
2 * Copyright 2008 Ben Skeggs
3 * Copyright 2010 Christoph Bumiller
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "pipe/p_context.h"
25 #include "pipe/p_defines.h"
26 #include "pipe/p_state.h"
27 #include "util/u_inlines.h"
28
29 #include "nv50/nv50_context.h"
30
31 void
32 nv50_constbufs_validate(struct nv50_context *nv50)
33 {
34 struct nouveau_pushbuf *push = nv50->base.pushbuf;
35 unsigned s;
36
37 for (s = 0; s < 3; ++s) {
38 unsigned p;
39
40 if (s == PIPE_SHADER_FRAGMENT)
41 p = NV50_3D_SET_PROGRAM_CB_PROGRAM_FRAGMENT;
42 else
43 if (s == PIPE_SHADER_GEOMETRY)
44 p = NV50_3D_SET_PROGRAM_CB_PROGRAM_GEOMETRY;
45 else
46 p = NV50_3D_SET_PROGRAM_CB_PROGRAM_VERTEX;
47
48 while (nv50->constbuf_dirty[s]) {
49 const unsigned i = (unsigned)ffs(nv50->constbuf_dirty[s]) - 1;
50
51 assert(i < NV50_MAX_PIPE_CONSTBUFS);
52 nv50->constbuf_dirty[s] &= ~(1 << i);
53
54 if (nv50->constbuf[s][i].user) {
55 const unsigned b = NV50_CB_PVP + s;
56 unsigned start = 0;
57 unsigned words = nv50->constbuf[s][0].size / 4;
58 if (i) {
59 NOUVEAU_ERR("user constbufs only supported in slot 0\n");
60 continue;
61 }
62 if (!nv50->state.uniform_buffer_bound[s]) {
63 nv50->state.uniform_buffer_bound[s] = true;
64 BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
65 PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
66 }
67 while (words) {
68 unsigned nr;
69
70 if (!PUSH_SPACE(push, 16))
71 break;
72 nr = PUSH_AVAIL(push);
73 assert(nr >= 16);
74 nr = MIN2(MIN2(nr - 3, words), NV04_PFIFO_MAX_PACKET_LEN);
75
76 BEGIN_NV04(push, NV50_3D(CB_ADDR), 1);
77 PUSH_DATA (push, (start << 8) | b);
78 BEGIN_NI04(push, NV50_3D(CB_DATA(0)), nr);
79 PUSH_DATAp(push, &nv50->constbuf[s][0].u.data[start * 4], nr);
80
81 start += nr;
82 words -= nr;
83 }
84 } else {
85 struct nv04_resource *res =
86 nv04_resource(nv50->constbuf[s][i].u.buf);
87 if (res) {
88 /* TODO: allocate persistent bindings */
89 const unsigned b = s * 16 + i;
90
91 assert(nouveau_resource_mapped_by_gpu(&res->base));
92
93 BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3);
94 PUSH_DATAh(push, res->address + nv50->constbuf[s][i].offset);
95 PUSH_DATA (push, res->address + nv50->constbuf[s][i].offset);
96 PUSH_DATA (push, (b << 16) |
97 (nv50->constbuf[s][i].size & 0xffff));
98 BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
99 PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
100
101 BCTX_REFN(nv50->bufctx_3d, CB(s, i), res, RD);
102 } else {
103 BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
104 PUSH_DATA (push, (i << 8) | p | 0);
105 }
106 if (i == 0)
107 nv50->state.uniform_buffer_bound[s] = false;
108 }
109 }
110 }
111 }
112
113 static bool
114 nv50_program_validate(struct nv50_context *nv50, struct nv50_program *prog)
115 {
116 if (!prog->translated) {
117 prog->translated = nv50_program_translate(
118 prog, nv50->screen->base.device->chipset);
119 if (!prog->translated)
120 return false;
121 } else
122 if (prog->mem)
123 return true;
124
125 return nv50_program_upload_code(nv50, prog);
126 }
127
128 static INLINE void
129 nv50_program_update_context_state(struct nv50_context *nv50,
130 struct nv50_program *prog, int stage)
131 {
132 const unsigned flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR;
133
134 if (prog && prog->tls_space) {
135 if (nv50->state.new_tls_space)
136 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_TLS);
137 if (!nv50->state.tls_required || nv50->state.new_tls_space)
138 BCTX_REFN_bo(nv50->bufctx_3d, TLS, flags, nv50->screen->tls_bo);
139 nv50->state.new_tls_space = false;
140 nv50->state.tls_required |= 1 << stage;
141 } else {
142 if (nv50->state.tls_required == (1 << stage))
143 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_TLS);
144 nv50->state.tls_required &= ~(1 << stage);
145 }
146 }
147
148 void
149 nv50_vertprog_validate(struct nv50_context *nv50)
150 {
151 struct nouveau_pushbuf *push = nv50->base.pushbuf;
152 struct nv50_program *vp = nv50->vertprog;
153
154 if (!nv50_program_validate(nv50, vp))
155 return;
156 nv50_program_update_context_state(nv50, vp, 0);
157
158 BEGIN_NV04(push, NV50_3D(VP_ATTR_EN(0)), 2);
159 PUSH_DATA (push, vp->vp.attrs[0]);
160 PUSH_DATA (push, vp->vp.attrs[1]);
161 BEGIN_NV04(push, NV50_3D(VP_REG_ALLOC_RESULT), 1);
162 PUSH_DATA (push, vp->max_out);
163 BEGIN_NV04(push, NV50_3D(VP_REG_ALLOC_TEMP), 1);
164 PUSH_DATA (push, vp->max_gpr);
165 BEGIN_NV04(push, NV50_3D(VP_START_ID), 1);
166 PUSH_DATA (push, vp->code_base);
167 }
168
169 void
170 nv50_fragprog_validate(struct nv50_context *nv50)
171 {
172 struct nouveau_pushbuf *push = nv50->base.pushbuf;
173 struct nv50_program *fp = nv50->fragprog;
174
175 fp->fp.sample_interp = nv50->min_samples > 1;
176
177 if (!nv50_program_validate(nv50, fp))
178 return;
179 nv50_program_update_context_state(nv50, fp, 1);
180
181 BEGIN_NV04(push, NV50_3D(FP_REG_ALLOC_TEMP), 1);
182 PUSH_DATA (push, fp->max_gpr);
183 BEGIN_NV04(push, NV50_3D(FP_RESULT_COUNT), 1);
184 PUSH_DATA (push, fp->max_out);
185 BEGIN_NV04(push, NV50_3D(FP_CONTROL), 1);
186 PUSH_DATA (push, fp->fp.flags[0]);
187 BEGIN_NV04(push, NV50_3D(FP_CTRL_UNK196C), 1);
188 PUSH_DATA (push, fp->fp.flags[1]);
189 BEGIN_NV04(push, NV50_3D(FP_START_ID), 1);
190 PUSH_DATA (push, fp->code_base);
191
192 if (nv50->screen->tesla->oclass >= NVA3_3D_CLASS) {
193 BEGIN_NV04(push, SUBC_3D(NVA3_3D_FP_MULTISAMPLE), 1);
194 if (nv50->min_samples > 1 || fp->fp.has_samplemask)
195 PUSH_DATA(push,
196 NVA3_3D_FP_MULTISAMPLE_FORCE_PER_SAMPLE |
197 (NVA3_3D_FP_MULTISAMPLE_EXPORT_SAMPLE_MASK *
198 fp->fp.has_samplemask));
199 else
200 PUSH_DATA(push, 0);
201 }
202 }
203
204 void
205 nv50_gmtyprog_validate(struct nv50_context *nv50)
206 {
207 struct nouveau_pushbuf *push = nv50->base.pushbuf;
208 struct nv50_program *gp = nv50->gmtyprog;
209
210 if (gp) {
211 if (!nv50_program_validate(nv50, gp))
212 return;
213 BEGIN_NV04(push, NV50_3D(GP_REG_ALLOC_TEMP), 1);
214 PUSH_DATA (push, gp->max_gpr);
215 BEGIN_NV04(push, NV50_3D(GP_REG_ALLOC_RESULT), 1);
216 PUSH_DATA (push, gp->max_out);
217 BEGIN_NV04(push, NV50_3D(GP_OUTPUT_PRIMITIVE_TYPE), 1);
218 PUSH_DATA (push, gp->gp.prim_type);
219 BEGIN_NV04(push, NV50_3D(GP_VERTEX_OUTPUT_COUNT), 1);
220 PUSH_DATA (push, gp->gp.vert_count);
221 BEGIN_NV04(push, NV50_3D(GP_START_ID), 1);
222 PUSH_DATA (push, gp->code_base);
223
224 nv50->state.prim_size = gp->gp.prim_type; /* enum matches vertex count */
225 }
226 nv50_program_update_context_state(nv50, gp, 2);
227
228 /* GP_ENABLE is updated in linkage validation */
229 }
230
231 static void
232 nv50_sprite_coords_validate(struct nv50_context *nv50)
233 {
234 struct nouveau_pushbuf *push = nv50->base.pushbuf;
235 uint32_t pntc[8], mode;
236 struct nv50_program *fp = nv50->fragprog;
237 unsigned i, c;
238 unsigned m = (nv50->state.interpolant_ctrl >> 8) & 0xff;
239
240 if (!nv50->rast->pipe.point_quad_rasterization) {
241 if (nv50->state.point_sprite) {
242 BEGIN_NV04(push, NV50_3D(POINT_COORD_REPLACE_MAP(0)), 8);
243 for (i = 0; i < 8; ++i)
244 PUSH_DATA(push, 0);
245
246 nv50->state.point_sprite = false;
247 }
248 return;
249 } else {
250 nv50->state.point_sprite = true;
251 }
252
253 memset(pntc, 0, sizeof(pntc));
254
255 for (i = 0; i < fp->in_nr; i++) {
256 unsigned n = util_bitcount(fp->in[i].mask);
257
258 if (fp->in[i].sn != TGSI_SEMANTIC_GENERIC) {
259 m += n;
260 continue;
261 }
262 if (!(nv50->rast->pipe.sprite_coord_enable & (1 << fp->in[i].si))) {
263 m += n;
264 continue;
265 }
266
267 for (c = 0; c < 4; ++c) {
268 if (fp->in[i].mask & (1 << c)) {
269 pntc[m / 8] |= (c + 1) << ((m % 8) * 4);
270 ++m;
271 }
272 }
273 }
274
275 if (nv50->rast->pipe.sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT)
276 mode = 0x00;
277 else
278 mode = 0x10;
279
280 BEGIN_NV04(push, NV50_3D(POINT_SPRITE_CTRL), 1);
281 PUSH_DATA (push, mode);
282
283 BEGIN_NV04(push, NV50_3D(POINT_COORD_REPLACE_MAP(0)), 8);
284 PUSH_DATAp(push, pntc, 8);
285 }
286
287 /* Validate state derived from shaders and the rasterizer cso. */
288 void
289 nv50_validate_derived_rs(struct nv50_context *nv50)
290 {
291 struct nouveau_pushbuf *push = nv50->base.pushbuf;
292 uint32_t color, psize;
293
294 nv50_sprite_coords_validate(nv50);
295
296 if (nv50->state.rasterizer_discard != nv50->rast->pipe.rasterizer_discard) {
297 nv50->state.rasterizer_discard = nv50->rast->pipe.rasterizer_discard;
298 BEGIN_NV04(push, NV50_3D(RASTERIZE_ENABLE), 1);
299 PUSH_DATA (push, !nv50->rast->pipe.rasterizer_discard);
300 }
301
302 if (nv50->dirty & NV50_NEW_FRAGPROG)
303 return;
304 psize = nv50->state.semantic_psize & ~NV50_3D_SEMANTIC_PTSZ_PTSZ_EN__MASK;
305 color = nv50->state.semantic_color & ~NV50_3D_SEMANTIC_COLOR_CLMP_EN;
306
307 if (nv50->rast->pipe.clamp_vertex_color)
308 color |= NV50_3D_SEMANTIC_COLOR_CLMP_EN;
309
310 if (color != nv50->state.semantic_color) {
311 nv50->state.semantic_color = color;
312 BEGIN_NV04(push, NV50_3D(SEMANTIC_COLOR), 1);
313 PUSH_DATA (push, color);
314 }
315
316 if (nv50->rast->pipe.point_size_per_vertex)
317 psize |= NV50_3D_SEMANTIC_PTSZ_PTSZ_EN__MASK;
318
319 if (psize != nv50->state.semantic_psize) {
320 nv50->state.semantic_psize = psize;
321 BEGIN_NV04(push, NV50_3D(SEMANTIC_PTSZ), 1);
322 PUSH_DATA (push, psize);
323 }
324 }
325
326 static int
327 nv50_vec4_map(uint8_t *map, int mid, uint32_t lin[4],
328 struct nv50_varying *in, struct nv50_varying *out)
329 {
330 int c;
331 uint8_t mv = out->mask, mf = in->mask, oid = out->hw;
332
333 for (c = 0; c < 4; ++c) {
334 if (mf & 1) {
335 if (in->linear)
336 lin[mid / 32] |= 1 << (mid % 32);
337 if (mv & 1)
338 map[mid] = oid;
339 else
340 if (c == 3)
341 map[mid] |= 1;
342 ++mid;
343 }
344
345 oid += mv & 1;
346 mf >>= 1;
347 mv >>= 1;
348 }
349
350 return mid;
351 }
352
353 void
354 nv50_fp_linkage_validate(struct nv50_context *nv50)
355 {
356 struct nouveau_pushbuf *push = nv50->base.pushbuf;
357 struct nv50_program *vp = nv50->gmtyprog ? nv50->gmtyprog : nv50->vertprog;
358 struct nv50_program *fp = nv50->fragprog;
359 struct nv50_varying dummy;
360 int i, n, c, m;
361 uint32_t primid = 0;
362 uint32_t layerid = 0;
363 uint32_t viewportid = 0;
364 uint32_t psiz = 0x000;
365 uint32_t interp = fp->fp.interp;
366 uint32_t colors = fp->fp.colors;
367 uint32_t lin[4];
368 uint8_t map[64];
369 uint8_t so_map[64];
370
371 if (!(nv50->dirty & (NV50_NEW_VERTPROG |
372 NV50_NEW_FRAGPROG |
373 NV50_NEW_GMTYPROG))) {
374 uint8_t bfc, ffc;
375 ffc = (nv50->state.semantic_color & NV50_3D_SEMANTIC_COLOR_FFC0_ID__MASK);
376 bfc = (nv50->state.semantic_color & NV50_3D_SEMANTIC_COLOR_BFC0_ID__MASK)
377 >> 8;
378 if (nv50->rast->pipe.light_twoside == ((ffc == bfc) ? 0 : 1))
379 return;
380 }
381
382 memset(lin, 0x00, sizeof(lin));
383
384 /* XXX: in buggy-endian mode, is the first element of map (u32)0x000000xx
385 * or is it the first byte ?
386 */
387 memset(map, nv50->gmtyprog ? 0x80 : 0x40, sizeof(map));
388
389 dummy.mask = 0xf; /* map all components of HPOS */
390 dummy.linear = 0;
391 m = nv50_vec4_map(map, 0, lin, &dummy, &vp->out[0]);
392
393 for (c = 0; c < vp->vp.clpd_nr; ++c)
394 map[m++] = vp->vp.clpd[c / 4] + (c % 4);
395
396 colors |= m << 8; /* adjust BFC0 id */
397
398 dummy.mask = 0x0;
399
400 /* if light_twoside is active, FFC0_ID == BFC0_ID is invalid */
401 if (nv50->rast->pipe.light_twoside) {
402 for (i = 0; i < 2; ++i) {
403 n = vp->vp.bfc[i];
404 if (fp->vp.bfc[i] >= fp->in_nr)
405 continue;
406 m = nv50_vec4_map(map, m, lin, &fp->in[fp->vp.bfc[i]],
407 (n < vp->out_nr) ? &vp->out[n] : &dummy);
408 }
409 }
410 colors += m - 4; /* adjust FFC0 id */
411 interp |= m << 8; /* set map id where 'normal' FP inputs start */
412
413 for (i = 0; i < fp->in_nr; ++i) {
414 for (n = 0; n < vp->out_nr; ++n)
415 if (vp->out[n].sn == fp->in[i].sn &&
416 vp->out[n].si == fp->in[i].si)
417 break;
418 switch (fp->in[i].sn) {
419 case TGSI_SEMANTIC_PRIMID:
420 primid = m;
421 break;
422 case TGSI_SEMANTIC_LAYER:
423 layerid = m;
424 break;
425 case TGSI_SEMANTIC_VIEWPORT_INDEX:
426 viewportid = m;
427 break;
428 }
429 m = nv50_vec4_map(map, m, lin,
430 &fp->in[i], (n < vp->out_nr) ? &vp->out[n] : &dummy);
431 }
432
433 if (vp->gp.has_layer && !layerid) {
434 layerid = m;
435 map[m++] = vp->gp.layerid;
436 }
437
438 if (vp->gp.has_viewport && !viewportid) {
439 viewportid = m;
440 map[m++] = vp->gp.viewportid;
441 }
442
443 if (nv50->rast->pipe.point_size_per_vertex) {
444 psiz = (m << 4) | 1;
445 map[m++] = vp->vp.psiz;
446 }
447
448 if (nv50->rast->pipe.clamp_vertex_color)
449 colors |= NV50_3D_SEMANTIC_COLOR_CLMP_EN;
450
451 if (unlikely(vp->so)) {
452 /* Slot i in STRMOUT_MAP specifies the offset where slot i in RESULT_MAP
453 * gets written.
454 *
455 * TODO:
456 * Inverting vp->so->map (output -> offset) would probably speed this up.
457 */
458 memset(so_map, 0, sizeof(so_map));
459 for (i = 0; i < vp->so->map_size; ++i) {
460 if (vp->so->map[i] == 0xff)
461 continue;
462 for (c = 0; c < m; ++c)
463 if (map[c] == vp->so->map[i] && !so_map[c])
464 break;
465 if (c == m) {
466 c = m;
467 map[m++] = vp->so->map[i];
468 }
469 so_map[c] = 0x80 | i;
470 }
471 for (c = m; c & 3; ++c)
472 so_map[c] = 0;
473 }
474
475 n = (m + 3) / 4;
476 assert(m <= 64);
477
478 if (unlikely(nv50->gmtyprog)) {
479 BEGIN_NV04(push, NV50_3D(GP_RESULT_MAP_SIZE), 1);
480 PUSH_DATA (push, m);
481 BEGIN_NV04(push, NV50_3D(GP_RESULT_MAP(0)), n);
482 PUSH_DATAp(push, map, n);
483 } else {
484 BEGIN_NV04(push, NV50_3D(VP_GP_BUILTIN_ATTR_EN), 1);
485 PUSH_DATA (push, vp->vp.attrs[2] | fp->vp.attrs[2]);
486
487 BEGIN_NV04(push, NV50_3D(SEMANTIC_PRIM_ID), 1);
488 PUSH_DATA (push, primid);
489
490 assert(m > 0);
491 BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP_SIZE), 1);
492 PUSH_DATA (push, m);
493 BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP(0)), n);
494 PUSH_DATAp(push, map, n);
495 }
496
497 BEGIN_NV04(push, NV50_3D(GP_VIEWPORT_ID_ENABLE), 5);
498 PUSH_DATA (push, vp->gp.has_viewport);
499 PUSH_DATA (push, colors);
500 PUSH_DATA (push, (vp->vp.clpd_nr << 8) | 4);
501 PUSH_DATA (push, layerid);
502 PUSH_DATA (push, psiz);
503
504 BEGIN_NV04(push, NV50_3D(SEMANTIC_VIEWPORT), 1);
505 PUSH_DATA (push, viewportid);
506
507 BEGIN_NV04(push, NV50_3D(LAYER), 1);
508 PUSH_DATA (push, vp->gp.has_layer << 16);
509
510 BEGIN_NV04(push, NV50_3D(FP_INTERPOLANT_CTRL), 1);
511 PUSH_DATA (push, interp);
512
513 nv50->state.interpolant_ctrl = interp;
514
515 nv50->state.semantic_color = colors;
516 nv50->state.semantic_psize = psiz;
517
518 BEGIN_NV04(push, NV50_3D(NOPERSPECTIVE_BITMAP(0)), 4);
519 PUSH_DATAp(push, lin, 4);
520
521 BEGIN_NV04(push, NV50_3D(GP_ENABLE), 1);
522 PUSH_DATA (push, nv50->gmtyprog ? 1 : 0);
523
524 if (vp->so) {
525 BEGIN_NV04(push, NV50_3D(STRMOUT_MAP(0)), n);
526 PUSH_DATAp(push, so_map, n);
527 }
528 }
529
530 static int
531 nv50_vp_gp_mapping(uint8_t *map, int m,
532 struct nv50_program *vp, struct nv50_program *gp)
533 {
534 int i, j, c;
535
536 for (i = 0; i < gp->in_nr; ++i) {
537 uint8_t oid = 0, mv = 0, mg = gp->in[i].mask;
538
539 for (j = 0; j < vp->out_nr; ++j) {
540 if (vp->out[j].sn == gp->in[i].sn &&
541 vp->out[j].si == gp->in[i].si) {
542 mv = vp->out[j].mask;
543 oid = vp->out[j].hw;
544 break;
545 }
546 }
547
548 for (c = 0; c < 4; ++c, mv >>= 1, mg >>= 1) {
549 if (mg & mv & 1)
550 map[m++] = oid;
551 else
552 if (mg & 1)
553 map[m++] = (c == 3) ? 0x41 : 0x40;
554 oid += mv & 1;
555 }
556 }
557 if (!m)
558 map[m++] = 0;
559 return m;
560 }
561
562 void
563 nv50_gp_linkage_validate(struct nv50_context *nv50)
564 {
565 struct nouveau_pushbuf *push = nv50->base.pushbuf;
566 struct nv50_program *vp = nv50->vertprog;
567 struct nv50_program *gp = nv50->gmtyprog;
568 int m = 0;
569 int n;
570 uint8_t map[64];
571
572 if (!gp)
573 return;
574 memset(map, 0, sizeof(map));
575
576 m = nv50_vp_gp_mapping(map, m, vp, gp);
577
578 n = (m + 3) / 4;
579
580 BEGIN_NV04(push, NV50_3D(VP_GP_BUILTIN_ATTR_EN), 1);
581 PUSH_DATA (push, vp->vp.attrs[2] | gp->vp.attrs[2]);
582
583 assert(m > 0);
584 BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP_SIZE), 1);
585 PUSH_DATA (push, m);
586 BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP(0)), n);
587 PUSH_DATAp(push, map, n);
588 }
589
590 void
591 nv50_stream_output_validate(struct nv50_context *nv50)
592 {
593 struct nouveau_pushbuf *push = nv50->base.pushbuf;
594 struct nv50_stream_output_state *so;
595 uint32_t ctrl;
596 unsigned i;
597 unsigned prims = ~0;
598
599 so = nv50->gmtyprog ? nv50->gmtyprog->so : nv50->vertprog->so;
600
601 BEGIN_NV04(push, NV50_3D(STRMOUT_ENABLE), 1);
602 PUSH_DATA (push, 0);
603 if (!so || !nv50->num_so_targets) {
604 if (nv50->screen->base.class_3d < NVA0_3D_CLASS) {
605 BEGIN_NV04(push, NV50_3D(STRMOUT_PRIMITIVE_LIMIT), 1);
606 PUSH_DATA (push, 0);
607 }
608 BEGIN_NV04(push, NV50_3D(STRMOUT_PARAMS_LATCH), 1);
609 PUSH_DATA (push, 1);
610 return;
611 }
612
613 /* previous TFB needs to complete */
614 if (nv50->screen->base.class_3d < NVA0_3D_CLASS) {
615 BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
616 PUSH_DATA (push, 0);
617 }
618
619 ctrl = so->ctrl;
620 if (nv50->screen->base.class_3d >= NVA0_3D_CLASS)
621 ctrl |= NVA0_3D_STRMOUT_BUFFERS_CTRL_LIMIT_MODE_OFFSET;
622
623 BEGIN_NV04(push, NV50_3D(STRMOUT_BUFFERS_CTRL), 1);
624 PUSH_DATA (push, ctrl);
625
626 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_SO);
627
628 for (i = 0; i < nv50->num_so_targets; ++i) {
629 struct nv50_so_target *targ = nv50_so_target(nv50->so_target[i]);
630 struct nv04_resource *buf = nv04_resource(targ->pipe.buffer);
631
632 const unsigned n = nv50->screen->base.class_3d >= NVA0_3D_CLASS ? 4 : 3;
633
634 if (n == 4 && !targ->clean)
635 nv84_query_fifo_wait(push, targ->pq);
636 BEGIN_NV04(push, NV50_3D(STRMOUT_ADDRESS_HIGH(i)), n);
637 PUSH_DATAh(push, buf->address + targ->pipe.buffer_offset);
638 PUSH_DATA (push, buf->address + targ->pipe.buffer_offset);
639 PUSH_DATA (push, so->num_attribs[i]);
640 if (n == 4) {
641 PUSH_DATA(push, targ->pipe.buffer_size);
642
643 BEGIN_NV04(push, NVA0_3D(STRMOUT_OFFSET(i)), 1);
644 if (!targ->clean) {
645 assert(targ->pq);
646 nv50_query_pushbuf_submit(push, targ->pq, 0x4);
647 } else {
648 PUSH_DATA(push, 0);
649 targ->clean = false;
650 }
651 } else {
652 const unsigned limit = targ->pipe.buffer_size /
653 (so->stride[i] * nv50->state.prim_size);
654 prims = MIN2(prims, limit);
655 }
656 BCTX_REFN(nv50->bufctx_3d, SO, buf, WR);
657 }
658 if (prims != ~0) {
659 BEGIN_NV04(push, NV50_3D(STRMOUT_PRIMITIVE_LIMIT), 1);
660 PUSH_DATA (push, prims);
661 }
662 BEGIN_NV04(push, NV50_3D(STRMOUT_PARAMS_LATCH), 1);
663 PUSH_DATA (push, 1);
664 BEGIN_NV04(push, NV50_3D(STRMOUT_ENABLE), 1);
665 PUSH_DATA (push, 1);
666 }