b3fece07c3f316df711c8025144ccfc60e376271
[mesa.git] / src / gallium / drivers / nouveau / nv50 / nv50_shader_state.c
1 /*
2 * Copyright 2008 Ben Skeggs
3 * Copyright 2010 Christoph Bumiller
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "pipe/p_context.h"
25 #include "pipe/p_defines.h"
26 #include "pipe/p_state.h"
27 #include "util/u_inlines.h"
28
29 #include "nv50/nv50_context.h"
30
31 void
32 nv50_constbufs_validate(struct nv50_context *nv50)
33 {
34 struct nouveau_pushbuf *push = nv50->base.pushbuf;
35 unsigned s;
36
37 for (s = 0; s < 3; ++s) {
38 unsigned p;
39
40 if (s == PIPE_SHADER_FRAGMENT)
41 p = NV50_3D_SET_PROGRAM_CB_PROGRAM_FRAGMENT;
42 else
43 if (s == PIPE_SHADER_GEOMETRY)
44 p = NV50_3D_SET_PROGRAM_CB_PROGRAM_GEOMETRY;
45 else
46 p = NV50_3D_SET_PROGRAM_CB_PROGRAM_VERTEX;
47
48 while (nv50->constbuf_dirty[s]) {
49 const unsigned i = (unsigned)ffs(nv50->constbuf_dirty[s]) - 1;
50
51 assert(i < NV50_MAX_PIPE_CONSTBUFS);
52 nv50->constbuf_dirty[s] &= ~(1 << i);
53
54 if (nv50->constbuf[s][i].user) {
55 const unsigned b = NV50_CB_PVP + s;
56 unsigned start = 0;
57 unsigned words = nv50->constbuf[s][0].size / 4;
58 if (i) {
59 NOUVEAU_ERR("user constbufs only supported in slot 0\n");
60 continue;
61 }
62 if (!nv50->state.uniform_buffer_bound[s]) {
63 nv50->state.uniform_buffer_bound[s] = true;
64 BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
65 PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
66 }
67 while (words) {
68 unsigned nr = MIN2(words, NV04_PFIFO_MAX_PACKET_LEN);
69
70 PUSH_SPACE(push, nr + 3);
71 BEGIN_NV04(push, NV50_3D(CB_ADDR), 1);
72 PUSH_DATA (push, (start << 8) | b);
73 BEGIN_NI04(push, NV50_3D(CB_DATA(0)), nr);
74 PUSH_DATAp(push, &nv50->constbuf[s][0].u.data[start * 4], nr);
75
76 start += nr;
77 words -= nr;
78 }
79 } else {
80 struct nv04_resource *res =
81 nv04_resource(nv50->constbuf[s][i].u.buf);
82 if (res) {
83 /* TODO: allocate persistent bindings */
84 const unsigned b = s * 16 + i;
85
86 assert(nouveau_resource_mapped_by_gpu(&res->base));
87
88 BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3);
89 PUSH_DATAh(push, res->address + nv50->constbuf[s][i].offset);
90 PUSH_DATA (push, res->address + nv50->constbuf[s][i].offset);
91 PUSH_DATA (push, (b << 16) |
92 (nv50->constbuf[s][i].size & 0xffff));
93 BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
94 PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
95
96 BCTX_REFN(nv50->bufctx_3d, CB(s, i), res, RD);
97
98 nv50->cb_dirty = 1; /* Force cache flush for UBO. */
99 } else {
100 BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
101 PUSH_DATA (push, (i << 8) | p | 0);
102 }
103 if (i == 0)
104 nv50->state.uniform_buffer_bound[s] = false;
105 }
106 }
107 }
108 }
109
110 static bool
111 nv50_program_validate(struct nv50_context *nv50, struct nv50_program *prog)
112 {
113 if (!prog->translated) {
114 prog->translated = nv50_program_translate(
115 prog, nv50->screen->base.device->chipset);
116 if (!prog->translated)
117 return false;
118 } else
119 if (prog->mem)
120 return true;
121
122 return nv50_program_upload_code(nv50, prog);
123 }
124
125 static inline void
126 nv50_program_update_context_state(struct nv50_context *nv50,
127 struct nv50_program *prog, int stage)
128 {
129 const unsigned flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR;
130
131 if (prog && prog->tls_space) {
132 if (nv50->state.new_tls_space)
133 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_TLS);
134 if (!nv50->state.tls_required || nv50->state.new_tls_space)
135 BCTX_REFN_bo(nv50->bufctx_3d, TLS, flags, nv50->screen->tls_bo);
136 nv50->state.new_tls_space = false;
137 nv50->state.tls_required |= 1 << stage;
138 } else {
139 if (nv50->state.tls_required == (1 << stage))
140 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_TLS);
141 nv50->state.tls_required &= ~(1 << stage);
142 }
143 }
144
145 void
146 nv50_vertprog_validate(struct nv50_context *nv50)
147 {
148 struct nouveau_pushbuf *push = nv50->base.pushbuf;
149 struct nv50_program *vp = nv50->vertprog;
150
151 if (!nv50_program_validate(nv50, vp))
152 return;
153 nv50_program_update_context_state(nv50, vp, 0);
154
155 BEGIN_NV04(push, NV50_3D(VP_ATTR_EN(0)), 2);
156 PUSH_DATA (push, vp->vp.attrs[0]);
157 PUSH_DATA (push, vp->vp.attrs[1]);
158 BEGIN_NV04(push, NV50_3D(VP_REG_ALLOC_RESULT), 1);
159 PUSH_DATA (push, vp->max_out);
160 BEGIN_NV04(push, NV50_3D(VP_REG_ALLOC_TEMP), 1);
161 PUSH_DATA (push, vp->max_gpr);
162 BEGIN_NV04(push, NV50_3D(VP_START_ID), 1);
163 PUSH_DATA (push, vp->code_base);
164 }
165
166 void
167 nv50_fragprog_validate(struct nv50_context *nv50)
168 {
169 struct nouveau_pushbuf *push = nv50->base.pushbuf;
170 struct nv50_program *fp = nv50->fragprog;
171 struct pipe_rasterizer_state *rast = &nv50->rast->pipe;
172
173 if (fp->fp.force_persample_interp != rast->force_persample_interp) {
174 /* Force the program to be reuploaded, which will trigger interp fixups
175 * to get applied
176 */
177 if (fp->mem)
178 nouveau_heap_free(&fp->mem);
179
180 fp->fp.force_persample_interp = rast->force_persample_interp;
181 }
182
183 if (fp->mem && !(nv50->dirty & (NV50_NEW_FRAGPROG | NV50_NEW_MIN_SAMPLES)))
184 return;
185
186 if (!nv50_program_validate(nv50, fp))
187 return;
188 nv50_program_update_context_state(nv50, fp, 1);
189
190 BEGIN_NV04(push, NV50_3D(FP_REG_ALLOC_TEMP), 1);
191 PUSH_DATA (push, fp->max_gpr);
192 BEGIN_NV04(push, NV50_3D(FP_RESULT_COUNT), 1);
193 PUSH_DATA (push, fp->max_out);
194 BEGIN_NV04(push, NV50_3D(FP_CONTROL), 1);
195 PUSH_DATA (push, fp->fp.flags[0]);
196 BEGIN_NV04(push, NV50_3D(FP_CTRL_UNK196C), 1);
197 PUSH_DATA (push, fp->fp.flags[1]);
198 BEGIN_NV04(push, NV50_3D(FP_START_ID), 1);
199 PUSH_DATA (push, fp->code_base);
200
201 if (nv50->screen->tesla->oclass >= NVA3_3D_CLASS) {
202 BEGIN_NV04(push, SUBC_3D(NVA3_3D_FP_MULTISAMPLE), 1);
203 if (nv50->min_samples > 1 || fp->fp.has_samplemask)
204 PUSH_DATA(push,
205 NVA3_3D_FP_MULTISAMPLE_FORCE_PER_SAMPLE |
206 (NVA3_3D_FP_MULTISAMPLE_EXPORT_SAMPLE_MASK *
207 fp->fp.has_samplemask));
208 else
209 PUSH_DATA(push, 0);
210 }
211 }
212
213 void
214 nv50_gmtyprog_validate(struct nv50_context *nv50)
215 {
216 struct nouveau_pushbuf *push = nv50->base.pushbuf;
217 struct nv50_program *gp = nv50->gmtyprog;
218
219 if (gp) {
220 if (!nv50_program_validate(nv50, gp))
221 return;
222 BEGIN_NV04(push, NV50_3D(GP_REG_ALLOC_TEMP), 1);
223 PUSH_DATA (push, gp->max_gpr);
224 BEGIN_NV04(push, NV50_3D(GP_REG_ALLOC_RESULT), 1);
225 PUSH_DATA (push, gp->max_out);
226 BEGIN_NV04(push, NV50_3D(GP_OUTPUT_PRIMITIVE_TYPE), 1);
227 PUSH_DATA (push, gp->gp.prim_type);
228 BEGIN_NV04(push, NV50_3D(GP_VERTEX_OUTPUT_COUNT), 1);
229 PUSH_DATA (push, gp->gp.vert_count);
230 BEGIN_NV04(push, NV50_3D(GP_START_ID), 1);
231 PUSH_DATA (push, gp->code_base);
232
233 nv50->state.prim_size = gp->gp.prim_type; /* enum matches vertex count */
234 }
235 nv50_program_update_context_state(nv50, gp, 2);
236
237 /* GP_ENABLE is updated in linkage validation */
238 }
239
240 static void
241 nv50_sprite_coords_validate(struct nv50_context *nv50)
242 {
243 struct nouveau_pushbuf *push = nv50->base.pushbuf;
244 uint32_t pntc[8], mode;
245 struct nv50_program *fp = nv50->fragprog;
246 unsigned i, c;
247 unsigned m = (nv50->state.interpolant_ctrl >> 8) & 0xff;
248
249 if (!nv50->rast->pipe.point_quad_rasterization) {
250 if (nv50->state.point_sprite) {
251 BEGIN_NV04(push, NV50_3D(POINT_COORD_REPLACE_MAP(0)), 8);
252 for (i = 0; i < 8; ++i)
253 PUSH_DATA(push, 0);
254
255 nv50->state.point_sprite = false;
256 }
257 return;
258 } else {
259 nv50->state.point_sprite = true;
260 }
261
262 memset(pntc, 0, sizeof(pntc));
263
264 for (i = 0; i < fp->in_nr; i++) {
265 unsigned n = util_bitcount(fp->in[i].mask);
266
267 if (fp->in[i].sn != TGSI_SEMANTIC_GENERIC) {
268 m += n;
269 continue;
270 }
271 if (!(nv50->rast->pipe.sprite_coord_enable & (1 << fp->in[i].si))) {
272 m += n;
273 continue;
274 }
275
276 for (c = 0; c < 4; ++c) {
277 if (fp->in[i].mask & (1 << c)) {
278 pntc[m / 8] |= (c + 1) << ((m % 8) * 4);
279 ++m;
280 }
281 }
282 }
283
284 if (nv50->rast->pipe.sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT)
285 mode = 0x00;
286 else
287 mode = 0x10;
288
289 BEGIN_NV04(push, NV50_3D(POINT_SPRITE_CTRL), 1);
290 PUSH_DATA (push, mode);
291
292 BEGIN_NV04(push, NV50_3D(POINT_COORD_REPLACE_MAP(0)), 8);
293 PUSH_DATAp(push, pntc, 8);
294 }
295
296 /* Validate state derived from shaders and the rasterizer cso. */
297 void
298 nv50_validate_derived_rs(struct nv50_context *nv50)
299 {
300 struct nouveau_pushbuf *push = nv50->base.pushbuf;
301 uint32_t color, psize;
302
303 nv50_sprite_coords_validate(nv50);
304
305 if (nv50->state.rasterizer_discard != nv50->rast->pipe.rasterizer_discard) {
306 nv50->state.rasterizer_discard = nv50->rast->pipe.rasterizer_discard;
307 BEGIN_NV04(push, NV50_3D(RASTERIZE_ENABLE), 1);
308 PUSH_DATA (push, !nv50->rast->pipe.rasterizer_discard);
309 }
310
311 if (nv50->dirty & NV50_NEW_FRAGPROG)
312 return;
313 psize = nv50->state.semantic_psize & ~NV50_3D_SEMANTIC_PTSZ_PTSZ_EN__MASK;
314 color = nv50->state.semantic_color & ~NV50_3D_SEMANTIC_COLOR_CLMP_EN;
315
316 if (nv50->rast->pipe.clamp_vertex_color)
317 color |= NV50_3D_SEMANTIC_COLOR_CLMP_EN;
318
319 if (color != nv50->state.semantic_color) {
320 nv50->state.semantic_color = color;
321 BEGIN_NV04(push, NV50_3D(SEMANTIC_COLOR), 1);
322 PUSH_DATA (push, color);
323 }
324
325 if (nv50->rast->pipe.point_size_per_vertex)
326 psize |= NV50_3D_SEMANTIC_PTSZ_PTSZ_EN__MASK;
327
328 if (psize != nv50->state.semantic_psize) {
329 nv50->state.semantic_psize = psize;
330 BEGIN_NV04(push, NV50_3D(SEMANTIC_PTSZ), 1);
331 PUSH_DATA (push, psize);
332 }
333 }
334
335 static int
336 nv50_vec4_map(uint8_t *map, int mid, uint32_t lin[4],
337 struct nv50_varying *in, struct nv50_varying *out)
338 {
339 int c;
340 uint8_t mv = out->mask, mf = in->mask, oid = out->hw;
341
342 for (c = 0; c < 4; ++c) {
343 if (mf & 1) {
344 if (in->linear)
345 lin[mid / 32] |= 1 << (mid % 32);
346 if (mv & 1)
347 map[mid] = oid;
348 else
349 if (c == 3)
350 map[mid] |= 1;
351 ++mid;
352 }
353
354 oid += mv & 1;
355 mf >>= 1;
356 mv >>= 1;
357 }
358
359 return mid;
360 }
361
362 void
363 nv50_fp_linkage_validate(struct nv50_context *nv50)
364 {
365 struct nouveau_pushbuf *push = nv50->base.pushbuf;
366 struct nv50_program *vp = nv50->gmtyprog ? nv50->gmtyprog : nv50->vertprog;
367 struct nv50_program *fp = nv50->fragprog;
368 struct nv50_varying dummy;
369 int i, n, c, m;
370 uint32_t primid = 0;
371 uint32_t layerid = 0;
372 uint32_t viewportid = 0;
373 uint32_t psiz = 0x000;
374 uint32_t interp = fp->fp.interp;
375 uint32_t colors = fp->fp.colors;
376 uint32_t lin[4];
377 uint8_t map[64];
378 uint8_t so_map[64];
379
380 if (!(nv50->dirty & (NV50_NEW_VERTPROG |
381 NV50_NEW_FRAGPROG |
382 NV50_NEW_GMTYPROG))) {
383 uint8_t bfc, ffc;
384 ffc = (nv50->state.semantic_color & NV50_3D_SEMANTIC_COLOR_FFC0_ID__MASK);
385 bfc = (nv50->state.semantic_color & NV50_3D_SEMANTIC_COLOR_BFC0_ID__MASK)
386 >> 8;
387 if (nv50->rast->pipe.light_twoside == ((ffc == bfc) ? 0 : 1))
388 return;
389 }
390
391 memset(lin, 0x00, sizeof(lin));
392
393 /* XXX: in buggy-endian mode, is the first element of map (u32)0x000000xx
394 * or is it the first byte ?
395 */
396 memset(map, nv50->gmtyprog ? 0x80 : 0x40, sizeof(map));
397
398 dummy.mask = 0xf; /* map all components of HPOS */
399 dummy.linear = 0;
400 m = nv50_vec4_map(map, 0, lin, &dummy, &vp->out[0]);
401
402 for (c = 0; c < vp->vp.clpd_nr; ++c)
403 map[m++] = vp->vp.clpd[c / 4] + (c % 4);
404
405 colors |= m << 8; /* adjust BFC0 id */
406
407 dummy.mask = 0x0;
408
409 /* if light_twoside is active, FFC0_ID == BFC0_ID is invalid */
410 if (nv50->rast->pipe.light_twoside) {
411 for (i = 0; i < 2; ++i) {
412 n = vp->vp.bfc[i];
413 if (fp->vp.bfc[i] >= fp->in_nr)
414 continue;
415 m = nv50_vec4_map(map, m, lin, &fp->in[fp->vp.bfc[i]],
416 (n < vp->out_nr) ? &vp->out[n] : &dummy);
417 }
418 }
419 colors += m - 4; /* adjust FFC0 id */
420 interp |= m << 8; /* set map id where 'normal' FP inputs start */
421
422 for (i = 0; i < fp->in_nr; ++i) {
423 for (n = 0; n < vp->out_nr; ++n)
424 if (vp->out[n].sn == fp->in[i].sn &&
425 vp->out[n].si == fp->in[i].si)
426 break;
427 switch (fp->in[i].sn) {
428 case TGSI_SEMANTIC_PRIMID:
429 primid = m;
430 break;
431 case TGSI_SEMANTIC_LAYER:
432 layerid = m;
433 break;
434 case TGSI_SEMANTIC_VIEWPORT_INDEX:
435 viewportid = m;
436 break;
437 }
438 m = nv50_vec4_map(map, m, lin,
439 &fp->in[i], (n < vp->out_nr) ? &vp->out[n] : &dummy);
440 }
441
442 if (vp->gp.has_layer && !layerid) {
443 layerid = m;
444 map[m++] = vp->gp.layerid;
445 }
446
447 if (vp->gp.has_viewport && !viewportid) {
448 viewportid = m;
449 map[m++] = vp->gp.viewportid;
450 }
451
452 if (nv50->rast->pipe.point_size_per_vertex) {
453 psiz = (m << 4) | 1;
454 map[m++] = vp->vp.psiz;
455 }
456
457 if (nv50->rast->pipe.clamp_vertex_color)
458 colors |= NV50_3D_SEMANTIC_COLOR_CLMP_EN;
459
460 if (unlikely(vp->so)) {
461 /* Slot i in STRMOUT_MAP specifies the offset where slot i in RESULT_MAP
462 * gets written.
463 *
464 * TODO:
465 * Inverting vp->so->map (output -> offset) would probably speed this up.
466 */
467 memset(so_map, 0, sizeof(so_map));
468 for (i = 0; i < vp->so->map_size; ++i) {
469 if (vp->so->map[i] == 0xff)
470 continue;
471 for (c = 0; c < m; ++c)
472 if (map[c] == vp->so->map[i] && !so_map[c])
473 break;
474 if (c == m) {
475 c = m;
476 map[m++] = vp->so->map[i];
477 }
478 so_map[c] = 0x80 | i;
479 }
480 for (c = m; c & 3; ++c)
481 so_map[c] = 0;
482 }
483
484 n = (m + 3) / 4;
485 assert(m <= 64);
486
487 if (unlikely(nv50->gmtyprog)) {
488 BEGIN_NV04(push, NV50_3D(GP_RESULT_MAP_SIZE), 1);
489 PUSH_DATA (push, m);
490 BEGIN_NV04(push, NV50_3D(GP_RESULT_MAP(0)), n);
491 PUSH_DATAp(push, map, n);
492 } else {
493 BEGIN_NV04(push, NV50_3D(VP_GP_BUILTIN_ATTR_EN), 1);
494 PUSH_DATA (push, vp->vp.attrs[2] | fp->vp.attrs[2]);
495
496 BEGIN_NV04(push, NV50_3D(SEMANTIC_PRIM_ID), 1);
497 PUSH_DATA (push, primid);
498
499 assert(m > 0);
500 BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP_SIZE), 1);
501 PUSH_DATA (push, m);
502 BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP(0)), n);
503 PUSH_DATAp(push, map, n);
504 }
505
506 BEGIN_NV04(push, NV50_3D(GP_VIEWPORT_ID_ENABLE), 5);
507 PUSH_DATA (push, vp->gp.has_viewport);
508 PUSH_DATA (push, colors);
509 PUSH_DATA (push, (vp->vp.clpd_nr << 8) | 4);
510 PUSH_DATA (push, layerid);
511 PUSH_DATA (push, psiz);
512
513 BEGIN_NV04(push, NV50_3D(SEMANTIC_VIEWPORT), 1);
514 PUSH_DATA (push, viewportid);
515
516 BEGIN_NV04(push, NV50_3D(LAYER), 1);
517 PUSH_DATA (push, vp->gp.has_layer << 16);
518
519 BEGIN_NV04(push, NV50_3D(FP_INTERPOLANT_CTRL), 1);
520 PUSH_DATA (push, interp);
521
522 nv50->state.interpolant_ctrl = interp;
523
524 nv50->state.semantic_color = colors;
525 nv50->state.semantic_psize = psiz;
526
527 BEGIN_NV04(push, NV50_3D(NOPERSPECTIVE_BITMAP(0)), 4);
528 PUSH_DATAp(push, lin, 4);
529
530 BEGIN_NV04(push, NV50_3D(GP_ENABLE), 1);
531 PUSH_DATA (push, nv50->gmtyprog ? 1 : 0);
532
533 if (vp->so) {
534 BEGIN_NV04(push, NV50_3D(STRMOUT_MAP(0)), n);
535 PUSH_DATAp(push, so_map, n);
536 }
537 }
538
539 static int
540 nv50_vp_gp_mapping(uint8_t *map, int m,
541 struct nv50_program *vp, struct nv50_program *gp)
542 {
543 int i, j, c;
544
545 for (i = 0; i < gp->in_nr; ++i) {
546 uint8_t oid = 0, mv = 0, mg = gp->in[i].mask;
547
548 for (j = 0; j < vp->out_nr; ++j) {
549 if (vp->out[j].sn == gp->in[i].sn &&
550 vp->out[j].si == gp->in[i].si) {
551 mv = vp->out[j].mask;
552 oid = vp->out[j].hw;
553 break;
554 }
555 }
556
557 for (c = 0; c < 4; ++c, mv >>= 1, mg >>= 1) {
558 if (mg & mv & 1)
559 map[m++] = oid;
560 else
561 if (mg & 1)
562 map[m++] = (c == 3) ? 0x41 : 0x40;
563 oid += mv & 1;
564 }
565 }
566 if (!m)
567 map[m++] = 0;
568 return m;
569 }
570
571 void
572 nv50_gp_linkage_validate(struct nv50_context *nv50)
573 {
574 struct nouveau_pushbuf *push = nv50->base.pushbuf;
575 struct nv50_program *vp = nv50->vertprog;
576 struct nv50_program *gp = nv50->gmtyprog;
577 int m = 0;
578 int n;
579 uint8_t map[64];
580
581 if (!gp)
582 return;
583 memset(map, 0, sizeof(map));
584
585 m = nv50_vp_gp_mapping(map, m, vp, gp);
586
587 n = (m + 3) / 4;
588
589 BEGIN_NV04(push, NV50_3D(VP_GP_BUILTIN_ATTR_EN), 1);
590 PUSH_DATA (push, vp->vp.attrs[2] | gp->vp.attrs[2]);
591
592 assert(m > 0);
593 BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP_SIZE), 1);
594 PUSH_DATA (push, m);
595 BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP(0)), n);
596 PUSH_DATAp(push, map, n);
597 }
598
599 void
600 nv50_stream_output_validate(struct nv50_context *nv50)
601 {
602 struct nouveau_pushbuf *push = nv50->base.pushbuf;
603 struct nv50_stream_output_state *so;
604 uint32_t ctrl;
605 unsigned i;
606 unsigned prims = ~0;
607
608 so = nv50->gmtyprog ? nv50->gmtyprog->so : nv50->vertprog->so;
609
610 BEGIN_NV04(push, NV50_3D(STRMOUT_ENABLE), 1);
611 PUSH_DATA (push, 0);
612 if (!so || !nv50->num_so_targets) {
613 if (nv50->screen->base.class_3d < NVA0_3D_CLASS) {
614 BEGIN_NV04(push, NV50_3D(STRMOUT_PRIMITIVE_LIMIT), 1);
615 PUSH_DATA (push, 0);
616 }
617 BEGIN_NV04(push, NV50_3D(STRMOUT_PARAMS_LATCH), 1);
618 PUSH_DATA (push, 1);
619 return;
620 }
621
622 /* previous TFB needs to complete */
623 if (nv50->screen->base.class_3d < NVA0_3D_CLASS) {
624 BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
625 PUSH_DATA (push, 0);
626 }
627
628 ctrl = so->ctrl;
629 if (nv50->screen->base.class_3d >= NVA0_3D_CLASS)
630 ctrl |= NVA0_3D_STRMOUT_BUFFERS_CTRL_LIMIT_MODE_OFFSET;
631
632 BEGIN_NV04(push, NV50_3D(STRMOUT_BUFFERS_CTRL), 1);
633 PUSH_DATA (push, ctrl);
634
635 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_SO);
636
637 for (i = 0; i < nv50->num_so_targets; ++i) {
638 struct nv50_so_target *targ = nv50_so_target(nv50->so_target[i]);
639 struct nv04_resource *buf = nv04_resource(targ->pipe.buffer);
640
641 const unsigned n = nv50->screen->base.class_3d >= NVA0_3D_CLASS ? 4 : 3;
642
643 if (n == 4 && !targ->clean)
644 nv84_query_fifo_wait(push, targ->pq);
645 BEGIN_NV04(push, NV50_3D(STRMOUT_ADDRESS_HIGH(i)), n);
646 PUSH_DATAh(push, buf->address + targ->pipe.buffer_offset);
647 PUSH_DATA (push, buf->address + targ->pipe.buffer_offset);
648 PUSH_DATA (push, so->num_attribs[i]);
649 if (n == 4) {
650 PUSH_DATA(push, targ->pipe.buffer_size);
651 if (!targ->clean) {
652 assert(targ->pq);
653 nv50_query_pushbuf_submit(push, NVA0_3D_STRMOUT_OFFSET(i),
654 targ->pq, 0x4);
655 } else {
656 BEGIN_NV04(push, NVA0_3D(STRMOUT_OFFSET(i)), 1);
657 PUSH_DATA(push, 0);
658 targ->clean = false;
659 }
660 } else {
661 const unsigned limit = targ->pipe.buffer_size /
662 (so->stride[i] * nv50->state.prim_size);
663 prims = MIN2(prims, limit);
664 }
665 targ->stride = so->stride[i];
666 BCTX_REFN(nv50->bufctx_3d, SO, buf, WR);
667 }
668 if (prims != ~0) {
669 BEGIN_NV04(push, NV50_3D(STRMOUT_PRIMITIVE_LIMIT), 1);
670 PUSH_DATA (push, prims);
671 }
672 BEGIN_NV04(push, NV50_3D(STRMOUT_PARAMS_LATCH), 1);
673 PUSH_DATA (push, 1);
674 BEGIN_NV04(push, NV50_3D(STRMOUT_ENABLE), 1);
675 PUSH_DATA (push, 1);
676 }