nv50: handle gl_Layer writes in GP
[mesa.git] / src / gallium / drivers / nouveau / nv50 / nv50_shader_state.c
1 /*
2 * Copyright 2008 Ben Skeggs
3 * Copyright 2010 Christoph Bumiller
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "pipe/p_context.h"
25 #include "pipe/p_defines.h"
26 #include "pipe/p_state.h"
27 #include "util/u_inlines.h"
28
29 #include "nv50/nv50_context.h"
30
31 void
32 nv50_constbufs_validate(struct nv50_context *nv50)
33 {
34 struct nouveau_pushbuf *push = nv50->base.pushbuf;
35 unsigned s;
36
37 for (s = 0; s < 3; ++s) {
38 unsigned p;
39
40 if (s == PIPE_SHADER_FRAGMENT)
41 p = NV50_3D_SET_PROGRAM_CB_PROGRAM_FRAGMENT;
42 else
43 if (s == PIPE_SHADER_GEOMETRY)
44 p = NV50_3D_SET_PROGRAM_CB_PROGRAM_GEOMETRY;
45 else
46 p = NV50_3D_SET_PROGRAM_CB_PROGRAM_VERTEX;
47
48 while (nv50->constbuf_dirty[s]) {
49 const unsigned i = (unsigned)ffs(nv50->constbuf_dirty[s]) - 1;
50
51 assert(i < NV50_MAX_PIPE_CONSTBUFS);
52 nv50->constbuf_dirty[s] &= ~(1 << i);
53
54 if (nv50->constbuf[s][i].user) {
55 const unsigned b = NV50_CB_PVP + s;
56 unsigned start = 0;
57 unsigned words = nv50->constbuf[s][0].size / 4;
58 if (i) {
59 NOUVEAU_ERR("user constbufs only supported in slot 0\n");
60 continue;
61 }
62 if (!nv50->state.uniform_buffer_bound[s]) {
63 nv50->state.uniform_buffer_bound[s] = TRUE;
64 BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
65 PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
66 }
67 while (words) {
68 unsigned nr;
69
70 if (!PUSH_SPACE(push, 16))
71 break;
72 nr = PUSH_AVAIL(push);
73 assert(nr >= 16);
74 nr = MIN2(MIN2(nr - 3, words), NV04_PFIFO_MAX_PACKET_LEN);
75
76 BEGIN_NV04(push, NV50_3D(CB_ADDR), 1);
77 PUSH_DATA (push, (start << 8) | b);
78 BEGIN_NI04(push, NV50_3D(CB_DATA(0)), nr);
79 PUSH_DATAp(push, &nv50->constbuf[s][0].u.data[start * 4], nr);
80
81 start += nr;
82 words -= nr;
83 }
84 } else {
85 struct nv04_resource *res =
86 nv04_resource(nv50->constbuf[s][i].u.buf);
87 if (res) {
88 /* TODO: allocate persistent bindings */
89 const unsigned b = s * 16 + i;
90
91 assert(nouveau_resource_mapped_by_gpu(&res->base));
92
93 BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3);
94 PUSH_DATAh(push, res->address + nv50->constbuf[s][i].offset);
95 PUSH_DATA (push, res->address + nv50->constbuf[s][i].offset);
96 PUSH_DATA (push, (b << 16) |
97 (nv50->constbuf[s][i].size & 0xffff));
98 BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
99 PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
100
101 BCTX_REFN(nv50->bufctx_3d, CB(s, i), res, RD);
102 } else {
103 BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
104 PUSH_DATA (push, (i << 8) | p | 0);
105 }
106 if (i == 0)
107 nv50->state.uniform_buffer_bound[s] = FALSE;
108 }
109 }
110 }
111 }
112
113 static boolean
114 nv50_program_validate(struct nv50_context *nv50, struct nv50_program *prog)
115 {
116 if (!prog->translated) {
117 prog->translated = nv50_program_translate(
118 prog, nv50->screen->base.device->chipset);
119 if (!prog->translated)
120 return FALSE;
121 } else
122 if (prog->mem)
123 return TRUE;
124
125 return nv50_program_upload_code(nv50, prog);
126 }
127
128 static INLINE void
129 nv50_program_update_context_state(struct nv50_context *nv50,
130 struct nv50_program *prog, int stage)
131 {
132 const unsigned flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR;
133
134 if (prog && prog->tls_space) {
135 if (nv50->state.new_tls_space)
136 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_TLS);
137 if (!nv50->state.tls_required || nv50->state.new_tls_space)
138 BCTX_REFN_bo(nv50->bufctx_3d, TLS, flags, nv50->screen->tls_bo);
139 nv50->state.new_tls_space = FALSE;
140 nv50->state.tls_required |= 1 << stage;
141 } else {
142 if (nv50->state.tls_required == (1 << stage))
143 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_TLS);
144 nv50->state.tls_required &= ~(1 << stage);
145 }
146 }
147
148 void
149 nv50_vertprog_validate(struct nv50_context *nv50)
150 {
151 struct nouveau_pushbuf *push = nv50->base.pushbuf;
152 struct nv50_program *vp = nv50->vertprog;
153
154 if (!nv50_program_validate(nv50, vp))
155 return;
156 nv50_program_update_context_state(nv50, vp, 0);
157
158 BEGIN_NV04(push, NV50_3D(VP_ATTR_EN(0)), 2);
159 PUSH_DATA (push, vp->vp.attrs[0]);
160 PUSH_DATA (push, vp->vp.attrs[1]);
161 BEGIN_NV04(push, NV50_3D(VP_REG_ALLOC_RESULT), 1);
162 PUSH_DATA (push, vp->max_out);
163 BEGIN_NV04(push, NV50_3D(VP_REG_ALLOC_TEMP), 1);
164 PUSH_DATA (push, vp->max_gpr);
165 BEGIN_NV04(push, NV50_3D(VP_START_ID), 1);
166 PUSH_DATA (push, vp->code_base);
167 }
168
169 void
170 nv50_fragprog_validate(struct nv50_context *nv50)
171 {
172 struct nouveau_pushbuf *push = nv50->base.pushbuf;
173 struct nv50_program *fp = nv50->fragprog;
174
175 if (!nv50_program_validate(nv50, fp))
176 return;
177 nv50_program_update_context_state(nv50, fp, 1);
178
179 BEGIN_NV04(push, NV50_3D(FP_REG_ALLOC_TEMP), 1);
180 PUSH_DATA (push, fp->max_gpr);
181 BEGIN_NV04(push, NV50_3D(FP_RESULT_COUNT), 1);
182 PUSH_DATA (push, fp->max_out);
183 BEGIN_NV04(push, NV50_3D(FP_CONTROL), 1);
184 PUSH_DATA (push, fp->fp.flags[0]);
185 BEGIN_NV04(push, NV50_3D(FP_CTRL_UNK196C), 1);
186 PUSH_DATA (push, fp->fp.flags[1]);
187 BEGIN_NV04(push, NV50_3D(FP_START_ID), 1);
188 PUSH_DATA (push, fp->code_base);
189 }
190
191 void
192 nv50_gmtyprog_validate(struct nv50_context *nv50)
193 {
194 struct nouveau_pushbuf *push = nv50->base.pushbuf;
195 struct nv50_program *gp = nv50->gmtyprog;
196
197 if (gp) {
198 if (!nv50_program_validate(nv50, gp))
199 return;
200 BEGIN_NV04(push, NV50_3D(GP_REG_ALLOC_TEMP), 1);
201 PUSH_DATA (push, gp->max_gpr);
202 BEGIN_NV04(push, NV50_3D(GP_REG_ALLOC_RESULT), 1);
203 PUSH_DATA (push, gp->max_out);
204 BEGIN_NV04(push, NV50_3D(GP_OUTPUT_PRIMITIVE_TYPE), 1);
205 PUSH_DATA (push, gp->gp.prim_type);
206 BEGIN_NV04(push, NV50_3D(GP_VERTEX_OUTPUT_COUNT), 1);
207 PUSH_DATA (push, gp->gp.vert_count);
208 BEGIN_NV04(push, NV50_3D(GP_START_ID), 1);
209 PUSH_DATA (push, gp->code_base);
210
211 nv50->state.prim_size = gp->gp.prim_type; /* enum matches vertex count */
212 }
213 nv50_program_update_context_state(nv50, gp, 2);
214
215 /* GP_ENABLE is updated in linkage validation */
216 }
217
218 static void
219 nv50_sprite_coords_validate(struct nv50_context *nv50)
220 {
221 struct nouveau_pushbuf *push = nv50->base.pushbuf;
222 uint32_t pntc[8], mode;
223 struct nv50_program *fp = nv50->fragprog;
224 unsigned i, c;
225 unsigned m = (nv50->state.interpolant_ctrl >> 8) & 0xff;
226
227 if (!nv50->rast->pipe.point_quad_rasterization) {
228 if (nv50->state.point_sprite) {
229 BEGIN_NV04(push, NV50_3D(POINT_COORD_REPLACE_MAP(0)), 8);
230 for (i = 0; i < 8; ++i)
231 PUSH_DATA(push, 0);
232
233 nv50->state.point_sprite = FALSE;
234 }
235 return;
236 } else {
237 nv50->state.point_sprite = TRUE;
238 }
239
240 memset(pntc, 0, sizeof(pntc));
241
242 for (i = 0; i < fp->in_nr; i++) {
243 unsigned n = util_bitcount(fp->in[i].mask);
244
245 if (fp->in[i].sn != TGSI_SEMANTIC_GENERIC) {
246 m += n;
247 continue;
248 }
249 if (!(nv50->rast->pipe.sprite_coord_enable & (1 << fp->in[i].si))) {
250 m += n;
251 continue;
252 }
253
254 for (c = 0; c < 4; ++c) {
255 if (fp->in[i].mask & (1 << c)) {
256 pntc[m / 8] |= (c + 1) << ((m % 8) * 4);
257 ++m;
258 }
259 }
260 }
261
262 if (nv50->rast->pipe.sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT)
263 mode = 0x00;
264 else
265 mode = 0x10;
266
267 BEGIN_NV04(push, NV50_3D(POINT_SPRITE_CTRL), 1);
268 PUSH_DATA (push, mode);
269
270 BEGIN_NV04(push, NV50_3D(POINT_COORD_REPLACE_MAP(0)), 8);
271 PUSH_DATAp(push, pntc, 8);
272 }
273
274 /* Validate state derived from shaders and the rasterizer cso. */
275 void
276 nv50_validate_derived_rs(struct nv50_context *nv50)
277 {
278 struct nouveau_pushbuf *push = nv50->base.pushbuf;
279 uint32_t color, psize;
280
281 nv50_sprite_coords_validate(nv50);
282
283 if (nv50->state.rasterizer_discard != nv50->rast->pipe.rasterizer_discard) {
284 nv50->state.rasterizer_discard = nv50->rast->pipe.rasterizer_discard;
285 BEGIN_NV04(push, NV50_3D(RASTERIZE_ENABLE), 1);
286 PUSH_DATA (push, !nv50->rast->pipe.rasterizer_discard);
287 }
288
289 if (nv50->dirty & NV50_NEW_FRAGPROG)
290 return;
291 psize = nv50->state.semantic_psize & ~NV50_3D_SEMANTIC_PTSZ_PTSZ_EN__MASK;
292 color = nv50->state.semantic_color & ~NV50_3D_SEMANTIC_COLOR_CLMP_EN;
293
294 if (nv50->rast->pipe.clamp_vertex_color)
295 color |= NV50_3D_SEMANTIC_COLOR_CLMP_EN;
296
297 if (color != nv50->state.semantic_color) {
298 nv50->state.semantic_color = color;
299 BEGIN_NV04(push, NV50_3D(SEMANTIC_COLOR), 1);
300 PUSH_DATA (push, color);
301 }
302
303 if (nv50->rast->pipe.point_size_per_vertex)
304 psize |= NV50_3D_SEMANTIC_PTSZ_PTSZ_EN__MASK;
305
306 if (psize != nv50->state.semantic_psize) {
307 nv50->state.semantic_psize = psize;
308 BEGIN_NV04(push, NV50_3D(SEMANTIC_PTSZ), 1);
309 PUSH_DATA (push, psize);
310 }
311 }
312
313 static int
314 nv50_vec4_map(uint8_t *map, int mid, uint32_t lin[4],
315 struct nv50_varying *in, struct nv50_varying *out)
316 {
317 int c;
318 uint8_t mv = out->mask, mf = in->mask, oid = out->hw;
319
320 for (c = 0; c < 4; ++c) {
321 if (mf & 1) {
322 if (in->linear)
323 lin[mid / 32] |= 1 << (mid % 32);
324 if (mv & 1)
325 map[mid] = oid;
326 else
327 if (c == 3)
328 map[mid] |= 1;
329 ++mid;
330 }
331
332 oid += mv & 1;
333 mf >>= 1;
334 mv >>= 1;
335 }
336
337 return mid;
338 }
339
340 void
341 nv50_fp_linkage_validate(struct nv50_context *nv50)
342 {
343 struct nouveau_pushbuf *push = nv50->base.pushbuf;
344 struct nv50_program *vp = nv50->gmtyprog ? nv50->gmtyprog : nv50->vertprog;
345 struct nv50_program *fp = nv50->fragprog;
346 struct nv50_varying dummy;
347 int i, n, c, m;
348 uint32_t primid = 0;
349 uint32_t layerid = vp->gp.layerid;
350 uint32_t psiz = 0x000;
351 uint32_t interp = fp->fp.interp;
352 uint32_t colors = fp->fp.colors;
353 uint32_t lin[4];
354 uint8_t map[64];
355 uint8_t so_map[64];
356
357 if (!(nv50->dirty & (NV50_NEW_VERTPROG |
358 NV50_NEW_FRAGPROG |
359 NV50_NEW_GMTYPROG))) {
360 uint8_t bfc, ffc;
361 ffc = (nv50->state.semantic_color & NV50_3D_SEMANTIC_COLOR_FFC0_ID__MASK);
362 bfc = (nv50->state.semantic_color & NV50_3D_SEMANTIC_COLOR_BFC0_ID__MASK)
363 >> 8;
364 if (nv50->rast->pipe.light_twoside == ((ffc == bfc) ? 0 : 1))
365 return;
366 }
367
368 memset(lin, 0x00, sizeof(lin));
369
370 /* XXX: in buggy-endian mode, is the first element of map (u32)0x000000xx
371 * or is it the first byte ?
372 */
373 memset(map, nv50->gmtyprog ? 0x80 : 0x40, sizeof(map));
374
375 dummy.mask = 0xf; /* map all components of HPOS */
376 dummy.linear = 0;
377 m = nv50_vec4_map(map, 0, lin, &dummy, &vp->out[0]);
378
379 for (c = 0; c < vp->vp.clpd_nr; ++c)
380 map[m++] = vp->vp.clpd[c / 4] + (c % 4);
381
382 colors |= m << 8; /* adjust BFC0 id */
383
384 dummy.mask = 0x0;
385
386 /* if light_twoside is active, FFC0_ID == BFC0_ID is invalid */
387 if (nv50->rast->pipe.light_twoside) {
388 for (i = 0; i < 2; ++i) {
389 n = vp->vp.bfc[i];
390 if (fp->vp.bfc[i] >= fp->in_nr)
391 continue;
392 m = nv50_vec4_map(map, m, lin, &fp->in[fp->vp.bfc[i]],
393 (n < vp->out_nr) ? &vp->out[n] : &dummy);
394 }
395 }
396 colors += m - 4; /* adjust FFC0 id */
397 interp |= m << 8; /* set map id where 'normal' FP inputs start */
398
399 for (i = 0; i < fp->in_nr; ++i) {
400 for (n = 0; n < vp->out_nr; ++n)
401 if (vp->out[n].sn == fp->in[i].sn &&
402 vp->out[n].si == fp->in[i].si)
403 break;
404 m = nv50_vec4_map(map, m, lin,
405 &fp->in[i], (n < vp->out_nr) ? &vp->out[n] : &dummy);
406 }
407
408 /* PrimitiveID either is replaced by the system value, or
409 * written by the geometry shader into an output register
410 */
411 if (fp->gp.primid < 0x80) {
412 primid = m;
413 map[m++] = vp->gp.primid;
414 }
415
416 if (vp->gp.has_layer) {
417 // In GL4.x, layer can be an fp input, but not in 3.x. Make sure to add
418 // it to the output map.
419 map[m++] = layerid;
420 }
421
422 if (nv50->rast->pipe.point_size_per_vertex) {
423 psiz = (m << 4) | 1;
424 map[m++] = vp->vp.psiz;
425 }
426
427 if (nv50->rast->pipe.clamp_vertex_color)
428 colors |= NV50_3D_SEMANTIC_COLOR_CLMP_EN;
429
430 if (unlikely(vp->so)) {
431 /* Slot i in STRMOUT_MAP specifies the offset where slot i in RESULT_MAP
432 * gets written.
433 *
434 * TODO:
435 * Inverting vp->so->map (output -> offset) would probably speed this up.
436 */
437 memset(so_map, 0, sizeof(so_map));
438 for (i = 0; i < vp->so->map_size; ++i) {
439 if (vp->so->map[i] == 0xff)
440 continue;
441 for (c = 0; c < m; ++c)
442 if (map[c] == vp->so->map[i] && !so_map[c])
443 break;
444 if (c == m) {
445 c = m;
446 map[m++] = vp->so->map[i];
447 }
448 so_map[c] = 0x80 | i;
449 }
450 for (c = m; c & 3; ++c)
451 so_map[c] = 0;
452 }
453
454 n = (m + 3) / 4;
455 assert(m <= 64);
456
457 if (unlikely(nv50->gmtyprog)) {
458 BEGIN_NV04(push, NV50_3D(GP_RESULT_MAP_SIZE), 1);
459 PUSH_DATA (push, m);
460 BEGIN_NV04(push, NV50_3D(GP_RESULT_MAP(0)), n);
461 PUSH_DATAp(push, map, n);
462 } else {
463 BEGIN_NV04(push, NV50_3D(VP_GP_BUILTIN_ATTR_EN), 1);
464 PUSH_DATA (push, vp->vp.attrs[2]);
465
466 BEGIN_NV04(push, NV50_3D(SEMANTIC_PRIM_ID), 1);
467 PUSH_DATA (push, primid);
468
469 BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP_SIZE), 1);
470 PUSH_DATA (push, m);
471 BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP(0)), n);
472 PUSH_DATAp(push, map, n);
473 }
474
475 BEGIN_NV04(push, NV50_3D(SEMANTIC_COLOR), 4);
476 PUSH_DATA (push, colors);
477 PUSH_DATA (push, (vp->vp.clpd_nr << 8) | 4);
478 PUSH_DATA (push, layerid);
479 PUSH_DATA (push, psiz);
480
481 BEGIN_NV04(push, NV50_3D(LAYER), 1);
482 PUSH_DATA (push, vp->gp.has_layer << 16);
483
484 BEGIN_NV04(push, NV50_3D(FP_INTERPOLANT_CTRL), 1);
485 PUSH_DATA (push, interp);
486
487 nv50->state.interpolant_ctrl = interp;
488
489 nv50->state.semantic_color = colors;
490 nv50->state.semantic_psize = psiz;
491
492 BEGIN_NV04(push, NV50_3D(NOPERSPECTIVE_BITMAP(0)), 4);
493 PUSH_DATAp(push, lin, 4);
494
495 BEGIN_NV04(push, NV50_3D(GP_ENABLE), 1);
496 PUSH_DATA (push, nv50->gmtyprog ? 1 : 0);
497
498 if (vp->so) {
499 BEGIN_NV04(push, NV50_3D(STRMOUT_MAP(0)), n);
500 PUSH_DATAp(push, so_map, n);
501 }
502 }
503
504 static int
505 nv50_vp_gp_mapping(uint8_t *map, int m,
506 struct nv50_program *vp, struct nv50_program *gp)
507 {
508 int i, j, c;
509
510 for (i = 0; i < gp->in_nr; ++i) {
511 uint8_t oid = 0, mv = 0, mg = gp->in[i].mask;
512
513 for (j = 0; j < vp->out_nr; ++j) {
514 if (vp->out[j].sn == gp->in[i].sn &&
515 vp->out[j].si == gp->in[i].si) {
516 mv = vp->out[j].mask;
517 oid = vp->out[j].hw;
518 break;
519 }
520 }
521
522 for (c = 0; c < 4; ++c, mv >>= 1, mg >>= 1) {
523 if (mg & mv & 1)
524 map[m++] = oid;
525 else
526 if (mg & 1)
527 map[m++] = (c == 3) ? 0x41 : 0x40;
528 oid += mv & 1;
529 }
530 }
531 return m;
532 }
533
534 void
535 nv50_gp_linkage_validate(struct nv50_context *nv50)
536 {
537 struct nouveau_pushbuf *push = nv50->base.pushbuf;
538 struct nv50_program *vp = nv50->vertprog;
539 struct nv50_program *gp = nv50->gmtyprog;
540 int m = 0;
541 int n;
542 uint8_t map[64];
543
544 if (!gp)
545 return;
546 memset(map, 0, sizeof(map));
547
548 m = nv50_vp_gp_mapping(map, m, vp, gp);
549
550 n = (m + 3) / 4;
551
552 BEGIN_NV04(push, NV50_3D(VP_GP_BUILTIN_ATTR_EN), 1);
553 PUSH_DATA (push, vp->vp.attrs[2] | gp->vp.attrs[2]);
554
555 BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP_SIZE), 1);
556 PUSH_DATA (push, m);
557 BEGIN_NV04(push, NV50_3D(VP_RESULT_MAP(0)), n);
558 PUSH_DATAp(push, map, n);
559 }
560
561 void
562 nv50_stream_output_validate(struct nv50_context *nv50)
563 {
564 struct nouveau_pushbuf *push = nv50->base.pushbuf;
565 struct nv50_stream_output_state *so;
566 uint32_t ctrl;
567 unsigned i;
568 unsigned prims = ~0;
569
570 so = nv50->gmtyprog ? nv50->gmtyprog->so : nv50->vertprog->so;
571
572 BEGIN_NV04(push, NV50_3D(STRMOUT_ENABLE), 1);
573 PUSH_DATA (push, 0);
574 if (!so || !nv50->num_so_targets) {
575 if (nv50->screen->base.class_3d < NVA0_3D_CLASS) {
576 BEGIN_NV04(push, NV50_3D(STRMOUT_PRIMITIVE_LIMIT), 1);
577 PUSH_DATA (push, 0);
578 }
579 BEGIN_NV04(push, NV50_3D(STRMOUT_PARAMS_LATCH), 1);
580 PUSH_DATA (push, 1);
581 return;
582 }
583
584 /* previous TFB needs to complete */
585 if (nv50->screen->base.class_3d < NVA0_3D_CLASS) {
586 BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
587 PUSH_DATA (push, 0);
588 }
589
590 ctrl = so->ctrl;
591 if (nv50->screen->base.class_3d >= NVA0_3D_CLASS)
592 ctrl |= NVA0_3D_STRMOUT_BUFFERS_CTRL_LIMIT_MODE_OFFSET;
593
594 BEGIN_NV04(push, NV50_3D(STRMOUT_BUFFERS_CTRL), 1);
595 PUSH_DATA (push, ctrl);
596
597 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_SO);
598
599 for (i = 0; i < nv50->num_so_targets; ++i) {
600 struct nv50_so_target *targ = nv50_so_target(nv50->so_target[i]);
601 struct nv04_resource *buf = nv04_resource(targ->pipe.buffer);
602
603 const unsigned n = nv50->screen->base.class_3d >= NVA0_3D_CLASS ? 4 : 3;
604
605 if (n == 4 && !targ->clean)
606 nv84_query_fifo_wait(push, targ->pq);
607 BEGIN_NV04(push, NV50_3D(STRMOUT_ADDRESS_HIGH(i)), n);
608 PUSH_DATAh(push, buf->address + targ->pipe.buffer_offset);
609 PUSH_DATA (push, buf->address + targ->pipe.buffer_offset);
610 PUSH_DATA (push, so->num_attribs[i]);
611 if (n == 4) {
612 PUSH_DATA(push, targ->pipe.buffer_size);
613
614 BEGIN_NV04(push, NVA0_3D(STRMOUT_OFFSET(i)), 1);
615 if (!targ->clean) {
616 assert(targ->pq);
617 nv50_query_pushbuf_submit(push, targ->pq, 0x4);
618 } else {
619 PUSH_DATA(push, 0);
620 targ->clean = FALSE;
621 }
622 } else {
623 const unsigned limit = targ->pipe.buffer_size /
624 (so->stride[i] * nv50->state.prim_size);
625 prims = MIN2(prims, limit);
626 }
627 BCTX_REFN(nv50->bufctx_3d, SO, buf, WR);
628 }
629 if (prims != ~0) {
630 BEGIN_NV04(push, NV50_3D(STRMOUT_PRIMITIVE_LIMIT), 1);
631 PUSH_DATA (push, prims);
632 }
633 BEGIN_NV04(push, NV50_3D(STRMOUT_PARAMS_LATCH), 1);
634 PUSH_DATA (push, 1);
635 BEGIN_NV04(push, NV50_3D(STRMOUT_ENABLE), 1);
636 PUSH_DATA (push, 1);
637 }