Merge remote-tracking branch 'origin/master' into vulkan
[mesa.git] / src / gallium / drivers / nouveau / nv50 / nv50_state_validate.c
1
2 #include "util/u_format.h"
3
4 #include "nv50/nv50_context.h"
5
6 static inline void
7 nv50_fb_set_null_rt(struct nouveau_pushbuf *push, unsigned i)
8 {
9 BEGIN_NV04(push, NV50_3D(RT_ADDRESS_HIGH(i)), 4);
10 PUSH_DATA (push, 0);
11 PUSH_DATA (push, 0);
12 PUSH_DATA (push, 0);
13 PUSH_DATA (push, 0);
14 BEGIN_NV04(push, NV50_3D(RT_HORIZ(i)), 2);
15 PUSH_DATA (push, 64);
16 PUSH_DATA (push, 0);
17 }
18
19 static void
20 nv50_validate_fb(struct nv50_context *nv50)
21 {
22 struct nouveau_pushbuf *push = nv50->base.pushbuf;
23 struct pipe_framebuffer_state *fb = &nv50->framebuffer;
24 unsigned i;
25 unsigned ms_mode = NV50_3D_MULTISAMPLE_MODE_MS1;
26 uint32_t array_size = 0xffff, array_mode = 0;
27
28 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_FB);
29
30 BEGIN_NV04(push, NV50_3D(RT_CONTROL), 1);
31 PUSH_DATA (push, (076543210 << 4) | fb->nr_cbufs);
32 BEGIN_NV04(push, NV50_3D(SCREEN_SCISSOR_HORIZ), 2);
33 PUSH_DATA (push, fb->width << 16);
34 PUSH_DATA (push, fb->height << 16);
35
36 for (i = 0; i < fb->nr_cbufs; ++i) {
37 struct nv50_miptree *mt;
38 struct nv50_surface *sf;
39 struct nouveau_bo *bo;
40
41 if (!fb->cbufs[i]) {
42 nv50_fb_set_null_rt(push, i);
43 continue;
44 }
45
46 mt = nv50_miptree(fb->cbufs[i]->texture);
47 sf = nv50_surface(fb->cbufs[i]);
48 bo = mt->base.bo;
49
50 array_size = MIN2(array_size, sf->depth);
51 if (mt->layout_3d)
52 array_mode = NV50_3D_RT_ARRAY_MODE_MODE_3D; /* 1 << 16 */
53
54 /* can't mix 3D with ARRAY or have RTs of different depth/array_size */
55 assert(mt->layout_3d || !array_mode || array_size == 1);
56
57 BEGIN_NV04(push, NV50_3D(RT_ADDRESS_HIGH(i)), 5);
58 PUSH_DATAh(push, mt->base.address + sf->offset);
59 PUSH_DATA (push, mt->base.address + sf->offset);
60 PUSH_DATA (push, nv50_format_table[sf->base.format].rt);
61 if (likely(nouveau_bo_memtype(bo))) {
62 assert(sf->base.texture->target != PIPE_BUFFER);
63
64 PUSH_DATA (push, mt->level[sf->base.u.tex.level].tile_mode);
65 PUSH_DATA (push, mt->layer_stride >> 2);
66 BEGIN_NV04(push, NV50_3D(RT_HORIZ(i)), 2);
67 PUSH_DATA (push, sf->width);
68 PUSH_DATA (push, sf->height);
69 BEGIN_NV04(push, NV50_3D(RT_ARRAY_MODE), 1);
70 PUSH_DATA (push, array_mode | array_size);
71 nv50->rt_array_mode = array_mode | array_size;
72 } else {
73 PUSH_DATA (push, 0);
74 PUSH_DATA (push, 0);
75 BEGIN_NV04(push, NV50_3D(RT_HORIZ(i)), 2);
76 PUSH_DATA (push, NV50_3D_RT_HORIZ_LINEAR | mt->level[0].pitch);
77 PUSH_DATA (push, sf->height);
78 BEGIN_NV04(push, NV50_3D(RT_ARRAY_MODE), 1);
79 PUSH_DATA (push, 0);
80
81 assert(!fb->zsbuf);
82 assert(!mt->ms_mode);
83 }
84
85 ms_mode = mt->ms_mode;
86
87 if (mt->base.status & NOUVEAU_BUFFER_STATUS_GPU_READING)
88 nv50->state.rt_serialize = true;
89 mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
90 mt->base.status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING;
91
92 /* only register for writing, otherwise we'd always serialize here */
93 BCTX_REFN(nv50->bufctx_3d, FB, &mt->base, WR);
94 }
95
96 if (fb->zsbuf) {
97 struct nv50_miptree *mt = nv50_miptree(fb->zsbuf->texture);
98 struct nv50_surface *sf = nv50_surface(fb->zsbuf);
99 int unk = mt->base.base.target == PIPE_TEXTURE_3D || sf->depth == 1;
100
101 BEGIN_NV04(push, NV50_3D(ZETA_ADDRESS_HIGH), 5);
102 PUSH_DATAh(push, mt->base.address + sf->offset);
103 PUSH_DATA (push, mt->base.address + sf->offset);
104 PUSH_DATA (push, nv50_format_table[fb->zsbuf->format].rt);
105 PUSH_DATA (push, mt->level[sf->base.u.tex.level].tile_mode);
106 PUSH_DATA (push, mt->layer_stride >> 2);
107 BEGIN_NV04(push, NV50_3D(ZETA_ENABLE), 1);
108 PUSH_DATA (push, 1);
109 BEGIN_NV04(push, NV50_3D(ZETA_HORIZ), 3);
110 PUSH_DATA (push, sf->width);
111 PUSH_DATA (push, sf->height);
112 PUSH_DATA (push, (unk << 16) | sf->depth);
113
114 ms_mode = mt->ms_mode;
115
116 if (mt->base.status & NOUVEAU_BUFFER_STATUS_GPU_READING)
117 nv50->state.rt_serialize = true;
118 mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
119 mt->base.status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING;
120
121 BCTX_REFN(nv50->bufctx_3d, FB, &mt->base, WR);
122 } else {
123 BEGIN_NV04(push, NV50_3D(ZETA_ENABLE), 1);
124 PUSH_DATA (push, 0);
125 }
126
127 BEGIN_NV04(push, NV50_3D(MULTISAMPLE_MODE), 1);
128 PUSH_DATA (push, ms_mode);
129
130 /* Only need to initialize the first viewport, which is used for clears */
131 BEGIN_NV04(push, NV50_3D(VIEWPORT_HORIZ(0)), 2);
132 PUSH_DATA (push, fb->width << 16);
133 PUSH_DATA (push, fb->height << 16);
134
135 if (nv50->screen->tesla->oclass >= NVA3_3D_CLASS) {
136 unsigned ms = 1 << ms_mode;
137 BEGIN_NV04(push, NV50_3D(CB_ADDR), 1);
138 PUSH_DATA (push, (NV50_CB_AUX_SAMPLE_OFFSET << (8 - 2)) | NV50_CB_AUX);
139 BEGIN_NI04(push, NV50_3D(CB_DATA(0)), 2 * ms);
140 for (i = 0; i < ms; i++) {
141 float xy[2];
142 nv50->base.pipe.get_sample_position(&nv50->base.pipe, ms, i, xy);
143 PUSH_DATAf(push, xy[0]);
144 PUSH_DATAf(push, xy[1]);
145 }
146 }
147 }
148
149 static void
150 nv50_validate_blend_colour(struct nv50_context *nv50)
151 {
152 struct nouveau_pushbuf *push = nv50->base.pushbuf;
153
154 BEGIN_NV04(push, NV50_3D(BLEND_COLOR(0)), 4);
155 PUSH_DATAf(push, nv50->blend_colour.color[0]);
156 PUSH_DATAf(push, nv50->blend_colour.color[1]);
157 PUSH_DATAf(push, nv50->blend_colour.color[2]);
158 PUSH_DATAf(push, nv50->blend_colour.color[3]);
159 }
160
161 static void
162 nv50_validate_stencil_ref(struct nv50_context *nv50)
163 {
164 struct nouveau_pushbuf *push = nv50->base.pushbuf;
165
166 BEGIN_NV04(push, NV50_3D(STENCIL_FRONT_FUNC_REF), 1);
167 PUSH_DATA (push, nv50->stencil_ref.ref_value[0]);
168 BEGIN_NV04(push, NV50_3D(STENCIL_BACK_FUNC_REF), 1);
169 PUSH_DATA (push, nv50->stencil_ref.ref_value[1]);
170 }
171
172 static void
173 nv50_validate_stipple(struct nv50_context *nv50)
174 {
175 struct nouveau_pushbuf *push = nv50->base.pushbuf;
176 unsigned i;
177
178 BEGIN_NV04(push, NV50_3D(POLYGON_STIPPLE_PATTERN(0)), 32);
179 for (i = 0; i < 32; ++i)
180 PUSH_DATA(push, util_bswap32(nv50->stipple.stipple[i]));
181 }
182
183 static void
184 nv50_validate_scissor(struct nv50_context *nv50)
185 {
186 struct nouveau_pushbuf *push = nv50->base.pushbuf;
187 #ifdef NV50_SCISSORS_CLIPPING
188 int minx, maxx, miny, maxy, i;
189
190 if (!(nv50->dirty &
191 (NV50_NEW_SCISSOR | NV50_NEW_VIEWPORT | NV50_NEW_FRAMEBUFFER)) &&
192 nv50->state.scissor == nv50->rast->pipe.scissor)
193 return;
194
195 if (nv50->state.scissor != nv50->rast->pipe.scissor)
196 nv50->scissors_dirty = (1 << NV50_MAX_VIEWPORTS) - 1;
197
198 nv50->state.scissor = nv50->rast->pipe.scissor;
199
200 if ((nv50->dirty & NV50_NEW_FRAMEBUFFER) && !nv50->state.scissor)
201 nv50->scissors_dirty = (1 << NV50_MAX_VIEWPORTS) - 1;
202
203 for (i = 0; i < NV50_MAX_VIEWPORTS; i++) {
204 struct pipe_scissor_state *s = &nv50->scissors[i];
205 struct pipe_viewport_state *vp = &nv50->viewports[i];
206
207 if (!(nv50->scissors_dirty & (1 << i)) &&
208 !(nv50->viewports_dirty & (1 << i)))
209 continue;
210
211 if (nv50->state.scissor) {
212 minx = s->minx;
213 maxx = s->maxx;
214 miny = s->miny;
215 maxy = s->maxy;
216 } else {
217 minx = 0;
218 maxx = nv50->framebuffer.width;
219 miny = 0;
220 maxy = nv50->framebuffer.height;
221 }
222
223 minx = MAX2(minx, (int)(vp->translate[0] - fabsf(vp->scale[0])));
224 maxx = MIN2(maxx, (int)(vp->translate[0] + fabsf(vp->scale[0])));
225 miny = MAX2(miny, (int)(vp->translate[1] - fabsf(vp->scale[1])));
226 maxy = MIN2(maxy, (int)(vp->translate[1] + fabsf(vp->scale[1])));
227
228 minx = MIN2(minx, 8192);
229 maxx = MAX2(maxx, 0);
230 miny = MIN2(miny, 8192);
231 maxy = MAX2(maxy, 0);
232
233 BEGIN_NV04(push, NV50_3D(SCISSOR_HORIZ(i)), 2);
234 PUSH_DATA (push, (maxx << 16) | minx);
235 PUSH_DATA (push, (maxy << 16) | miny);
236 #else
237 BEGIN_NV04(push, NV50_3D(SCISSOR_HORIZ(i)), 2);
238 PUSH_DATA (push, (s->maxx << 16) | s->minx);
239 PUSH_DATA (push, (s->maxy << 16) | s->miny);
240 #endif
241 }
242
243 nv50->scissors_dirty = 0;
244 }
245
246 static void
247 nv50_validate_viewport(struct nv50_context *nv50)
248 {
249 struct nouveau_pushbuf *push = nv50->base.pushbuf;
250 float zmin, zmax;
251 int i;
252
253 for (i = 0; i < NV50_MAX_VIEWPORTS; i++) {
254 struct pipe_viewport_state *vpt = &nv50->viewports[i];
255
256 if (!(nv50->viewports_dirty & (1 << i)))
257 continue;
258
259 BEGIN_NV04(push, NV50_3D(VIEWPORT_TRANSLATE_X(i)), 3);
260 PUSH_DATAf(push, vpt->translate[0]);
261 PUSH_DATAf(push, vpt->translate[1]);
262 PUSH_DATAf(push, vpt->translate[2]);
263 BEGIN_NV04(push, NV50_3D(VIEWPORT_SCALE_X(i)), 3);
264 PUSH_DATAf(push, vpt->scale[0]);
265 PUSH_DATAf(push, vpt->scale[1]);
266 PUSH_DATAf(push, vpt->scale[2]);
267
268 zmin = vpt->translate[2] - fabsf(vpt->scale[2]);
269 zmax = vpt->translate[2] + fabsf(vpt->scale[2]);
270
271 #ifdef NV50_SCISSORS_CLIPPING
272 BEGIN_NV04(push, NV50_3D(DEPTH_RANGE_NEAR(i)), 2);
273 PUSH_DATAf(push, zmin);
274 PUSH_DATAf(push, zmax);
275 #endif
276 }
277
278 nv50->viewports_dirty = 0;
279 }
280
281 static inline void
282 nv50_check_program_ucps(struct nv50_context *nv50,
283 struct nv50_program *vp, uint8_t mask)
284 {
285 const unsigned n = util_logbase2(mask) + 1;
286
287 if (vp->vp.clpd_nr >= n)
288 return;
289 nv50_program_destroy(nv50, vp);
290
291 vp->vp.clpd_nr = n;
292 if (likely(vp == nv50->vertprog)) {
293 nv50->dirty |= NV50_NEW_VERTPROG;
294 nv50_vertprog_validate(nv50);
295 } else {
296 nv50->dirty |= NV50_NEW_GMTYPROG;
297 nv50_gmtyprog_validate(nv50);
298 }
299 nv50_fp_linkage_validate(nv50);
300 }
301
302 /* alpha test is disabled if there are no color RTs, so make sure we have at
303 * least one if alpha test is enabled. Note that this must run after
304 * nv50_validate_fb, otherwise that will override the RT count setting.
305 */
306 static void
307 nv50_validate_derived_2(struct nv50_context *nv50)
308 {
309 struct nouveau_pushbuf *push = nv50->base.pushbuf;
310
311 if (nv50->zsa && nv50->zsa->pipe.alpha.enabled &&
312 nv50->framebuffer.nr_cbufs == 0) {
313 nv50_fb_set_null_rt(push, 0);
314 BEGIN_NV04(push, NV50_3D(RT_CONTROL), 1);
315 PUSH_DATA (push, (076543210 << 4) | 1);
316 }
317 }
318
319 static void
320 nv50_validate_derived_3(struct nv50_context *nv50)
321 {
322 struct nouveau_pushbuf *push = nv50->base.pushbuf;
323 struct pipe_framebuffer_state *fb = &nv50->framebuffer;
324 uint32_t ms = 0;
325
326 if ((!fb->nr_cbufs || !fb->cbufs[0] ||
327 !util_format_is_pure_integer(fb->cbufs[0]->format)) && nv50->blend) {
328 if (nv50->blend->pipe.alpha_to_coverage)
329 ms |= NV50_3D_MULTISAMPLE_CTRL_ALPHA_TO_COVERAGE;
330 if (nv50->blend->pipe.alpha_to_one)
331 ms |= NV50_3D_MULTISAMPLE_CTRL_ALPHA_TO_ONE;
332 }
333
334 BEGIN_NV04(push, NV50_3D(MULTISAMPLE_CTRL), 1);
335 PUSH_DATA (push, ms);
336 }
337
338 static void
339 nv50_validate_clip(struct nv50_context *nv50)
340 {
341 struct nouveau_pushbuf *push = nv50->base.pushbuf;
342 struct nv50_program *vp;
343 uint8_t clip_enable;
344
345 if (nv50->dirty & NV50_NEW_CLIP) {
346 BEGIN_NV04(push, NV50_3D(CB_ADDR), 1);
347 PUSH_DATA (push, (NV50_CB_AUX_UCP_OFFSET << 8) | NV50_CB_AUX);
348 BEGIN_NI04(push, NV50_3D(CB_DATA(0)), PIPE_MAX_CLIP_PLANES * 4);
349 PUSH_DATAp(push, &nv50->clip.ucp[0][0], PIPE_MAX_CLIP_PLANES * 4);
350 }
351
352 vp = nv50->gmtyprog;
353 if (likely(!vp))
354 vp = nv50->vertprog;
355
356 clip_enable = nv50->rast->pipe.clip_plane_enable;
357
358 BEGIN_NV04(push, NV50_3D(CLIP_DISTANCE_ENABLE), 1);
359 PUSH_DATA (push, clip_enable);
360
361 if (clip_enable)
362 nv50_check_program_ucps(nv50, vp, clip_enable);
363 }
364
365 static void
366 nv50_validate_blend(struct nv50_context *nv50)
367 {
368 struct nouveau_pushbuf *push = nv50->base.pushbuf;
369
370 PUSH_SPACE(push, nv50->blend->size);
371 PUSH_DATAp(push, nv50->blend->state, nv50->blend->size);
372 }
373
374 static void
375 nv50_validate_zsa(struct nv50_context *nv50)
376 {
377 struct nouveau_pushbuf *push = nv50->base.pushbuf;
378
379 PUSH_SPACE(push, nv50->zsa->size);
380 PUSH_DATAp(push, nv50->zsa->state, nv50->zsa->size);
381 }
382
383 static void
384 nv50_validate_rasterizer(struct nv50_context *nv50)
385 {
386 struct nouveau_pushbuf *push = nv50->base.pushbuf;
387
388 PUSH_SPACE(push, nv50->rast->size);
389 PUSH_DATAp(push, nv50->rast->state, nv50->rast->size);
390 }
391
392 static void
393 nv50_validate_sample_mask(struct nv50_context *nv50)
394 {
395 struct nouveau_pushbuf *push = nv50->base.pushbuf;
396
397 unsigned mask[4] =
398 {
399 nv50->sample_mask & 0xffff,
400 nv50->sample_mask & 0xffff,
401 nv50->sample_mask & 0xffff,
402 nv50->sample_mask & 0xffff
403 };
404
405 BEGIN_NV04(push, NV50_3D(MSAA_MASK(0)), 4);
406 PUSH_DATA (push, mask[0]);
407 PUSH_DATA (push, mask[1]);
408 PUSH_DATA (push, mask[2]);
409 PUSH_DATA (push, mask[3]);
410 }
411
412 static void
413 nv50_validate_min_samples(struct nv50_context *nv50)
414 {
415 struct nouveau_pushbuf *push = nv50->base.pushbuf;
416 int samples;
417
418 if (nv50->screen->tesla->oclass < NVA3_3D_CLASS)
419 return;
420
421 samples = util_next_power_of_two(nv50->min_samples);
422 if (samples > 1)
423 samples |= NVA3_3D_SAMPLE_SHADING_ENABLE;
424
425 BEGIN_NV04(push, SUBC_3D(NVA3_3D_SAMPLE_SHADING), 1);
426 PUSH_DATA (push, samples);
427 }
428
429 static void
430 nv50_switch_pipe_context(struct nv50_context *ctx_to)
431 {
432 struct nv50_context *ctx_from = ctx_to->screen->cur_ctx;
433
434 if (ctx_from)
435 ctx_to->state = ctx_from->state;
436 else
437 ctx_to->state = ctx_to->screen->save_state;
438
439 ctx_to->dirty = ~0;
440 ctx_to->viewports_dirty = ~0;
441 ctx_to->scissors_dirty = ~0;
442
443 ctx_to->constbuf_dirty[0] =
444 ctx_to->constbuf_dirty[1] =
445 ctx_to->constbuf_dirty[2] = (1 << NV50_MAX_PIPE_CONSTBUFS) - 1;
446
447 if (!ctx_to->vertex)
448 ctx_to->dirty &= ~(NV50_NEW_VERTEX | NV50_NEW_ARRAYS);
449
450 if (!ctx_to->vertprog)
451 ctx_to->dirty &= ~NV50_NEW_VERTPROG;
452 if (!ctx_to->fragprog)
453 ctx_to->dirty &= ~NV50_NEW_FRAGPROG;
454
455 if (!ctx_to->blend)
456 ctx_to->dirty &= ~NV50_NEW_BLEND;
457 if (!ctx_to->rast)
458 #ifdef NV50_SCISSORS_CLIPPING
459 ctx_to->dirty &= ~(NV50_NEW_RASTERIZER | NV50_NEW_SCISSOR);
460 #else
461 ctx_to->dirty &= ~NV50_NEW_RASTERIZER;
462 #endif
463 if (!ctx_to->zsa)
464 ctx_to->dirty &= ~NV50_NEW_ZSA;
465
466 ctx_to->screen->cur_ctx = ctx_to;
467 }
468
469 static struct state_validate {
470 void (*func)(struct nv50_context *);
471 uint32_t states;
472 } validate_list[] = {
473 { nv50_validate_fb, NV50_NEW_FRAMEBUFFER },
474 { nv50_validate_blend, NV50_NEW_BLEND },
475 { nv50_validate_zsa, NV50_NEW_ZSA },
476 { nv50_validate_sample_mask, NV50_NEW_SAMPLE_MASK },
477 { nv50_validate_rasterizer, NV50_NEW_RASTERIZER },
478 { nv50_validate_blend_colour, NV50_NEW_BLEND_COLOUR },
479 { nv50_validate_stencil_ref, NV50_NEW_STENCIL_REF },
480 { nv50_validate_stipple, NV50_NEW_STIPPLE },
481 #ifdef NV50_SCISSORS_CLIPPING
482 { nv50_validate_scissor, NV50_NEW_SCISSOR | NV50_NEW_VIEWPORT |
483 NV50_NEW_RASTERIZER |
484 NV50_NEW_FRAMEBUFFER },
485 #else
486 { nv50_validate_scissor, NV50_NEW_SCISSOR },
487 #endif
488 { nv50_validate_viewport, NV50_NEW_VIEWPORT },
489 { nv50_vertprog_validate, NV50_NEW_VERTPROG },
490 { nv50_gmtyprog_validate, NV50_NEW_GMTYPROG },
491 { nv50_fragprog_validate, NV50_NEW_FRAGPROG | NV50_NEW_RASTERIZER |
492 NV50_NEW_MIN_SAMPLES },
493 { nv50_fp_linkage_validate, NV50_NEW_FRAGPROG | NV50_NEW_VERTPROG |
494 NV50_NEW_GMTYPROG | NV50_NEW_RASTERIZER },
495 { nv50_gp_linkage_validate, NV50_NEW_GMTYPROG | NV50_NEW_VERTPROG },
496 { nv50_validate_derived_rs, NV50_NEW_FRAGPROG | NV50_NEW_RASTERIZER |
497 NV50_NEW_VERTPROG | NV50_NEW_GMTYPROG },
498 { nv50_validate_derived_2, NV50_NEW_ZSA | NV50_NEW_FRAMEBUFFER },
499 { nv50_validate_derived_3, NV50_NEW_BLEND | NV50_NEW_FRAMEBUFFER },
500 { nv50_validate_clip, NV50_NEW_CLIP | NV50_NEW_RASTERIZER |
501 NV50_NEW_VERTPROG | NV50_NEW_GMTYPROG },
502 { nv50_constbufs_validate, NV50_NEW_CONSTBUF },
503 { nv50_validate_textures, NV50_NEW_TEXTURES },
504 { nv50_validate_samplers, NV50_NEW_SAMPLERS },
505 { nv50_stream_output_validate, NV50_NEW_STRMOUT |
506 NV50_NEW_VERTPROG | NV50_NEW_GMTYPROG },
507 { nv50_vertex_arrays_validate, NV50_NEW_VERTEX | NV50_NEW_ARRAYS },
508 { nv50_validate_min_samples, NV50_NEW_MIN_SAMPLES },
509 };
510
511 bool
512 nv50_state_validate(struct nv50_context *nv50, uint32_t mask)
513 {
514 uint32_t state_mask;
515 int ret;
516 unsigned i;
517
518 if (nv50->screen->cur_ctx != nv50)
519 nv50_switch_pipe_context(nv50);
520
521 state_mask = nv50->dirty & mask;
522
523 if (state_mask) {
524 for (i = 0; i < ARRAY_SIZE(validate_list); ++i) {
525 struct state_validate *validate = &validate_list[i];
526
527 if (state_mask & validate->states)
528 validate->func(nv50);
529 }
530 nv50->dirty &= ~state_mask;
531
532 if (nv50->state.rt_serialize) {
533 nv50->state.rt_serialize = false;
534 BEGIN_NV04(nv50->base.pushbuf, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
535 PUSH_DATA (nv50->base.pushbuf, 0);
536 }
537
538 nv50_bufctx_fence(nv50->bufctx_3d, false);
539 }
540 nouveau_pushbuf_bufctx(nv50->base.pushbuf, nv50->bufctx_3d);
541 ret = nouveau_pushbuf_validate(nv50->base.pushbuf);
542
543 if (unlikely(nv50->state.flushed)) {
544 nv50->state.flushed = false;
545 nv50_bufctx_fence(nv50->bufctx_3d, true);
546 }
547 return !ret;
548 }