swr: Limit DEBUG workaround to LLVM < 7
[mesa.git] / src / gallium / drivers / swr / swr_state.cpp
1 /****************************************************************************
2 * Copyright (C) 2015 Intel Corporation. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 ***************************************************************************/
23
24 #include <llvm/Config/llvm-config.h>
25
26 #if LLVM_VERSION_MAJOR < 7
27 // llvm redefines DEBUG
28 #pragma push_macro("DEBUG")
29 #undef DEBUG
30 #endif
31
32 #include <rasterizer/core/state.h>
33 #include "JitManager.h"
34
35 #if LLVM_VERSION_MAJOR < 7
36 #pragma pop_macro("DEBUG")
37 #endif
38
39 #include "common/os.h"
40 #include "jit_api.h"
41 #include "gen_state_llvm.h"
42 #include "core/multisample.h"
43 #include "core/state_funcs.h"
44
45 #include "gallivm/lp_bld_tgsi.h"
46 #include "util/u_format.h"
47
48 #include "util/u_memory.h"
49 #include "util/u_inlines.h"
50 #include "util/u_helpers.h"
51 #include "util/u_framebuffer.h"
52 #include "util/u_viewport.h"
53 #include "util/u_prim.h"
54
55 #include "swr_state.h"
56 #include "swr_context.h"
57 #include "gen_surf_state_llvm.h"
58 #include "gen_swr_context_llvm.h"
59 #include "swr_screen.h"
60 #include "swr_resource.h"
61 #include "swr_tex_sample.h"
62 #include "swr_scratch.h"
63 #include "swr_shader.h"
64 #include "swr_fence.h"
65
66 /* These should be pulled out into separate files as necessary
67 * Just initializing everything here to get going. */
68
69 static void *
70 swr_create_blend_state(struct pipe_context *pipe,
71 const struct pipe_blend_state *blend)
72 {
73 struct swr_blend_state *state = CALLOC_STRUCT(swr_blend_state);
74
75 memcpy(&state->pipe, blend, sizeof(*blend));
76
77 struct pipe_blend_state *pipe_blend = &state->pipe;
78
79 for (int target = 0;
80 target < std::min(SWR_NUM_RENDERTARGETS, PIPE_MAX_COLOR_BUFS);
81 target++) {
82
83 struct pipe_rt_blend_state *rt_blend = &pipe_blend->rt[target];
84 SWR_RENDER_TARGET_BLEND_STATE &blendState =
85 state->blendState.renderTarget[target];
86 RENDER_TARGET_BLEND_COMPILE_STATE &compileState =
87 state->compileState[target];
88
89 if (target != 0 && !pipe_blend->independent_blend_enable) {
90 memcpy(&compileState,
91 &state->compileState[0],
92 sizeof(RENDER_TARGET_BLEND_COMPILE_STATE));
93 continue;
94 }
95
96 compileState.blendEnable = rt_blend->blend_enable;
97 if (compileState.blendEnable) {
98 compileState.sourceAlphaBlendFactor =
99 swr_convert_blend_factor(rt_blend->alpha_src_factor);
100 compileState.destAlphaBlendFactor =
101 swr_convert_blend_factor(rt_blend->alpha_dst_factor);
102 compileState.sourceBlendFactor =
103 swr_convert_blend_factor(rt_blend->rgb_src_factor);
104 compileState.destBlendFactor =
105 swr_convert_blend_factor(rt_blend->rgb_dst_factor);
106
107 compileState.colorBlendFunc =
108 swr_convert_blend_func(rt_blend->rgb_func);
109 compileState.alphaBlendFunc =
110 swr_convert_blend_func(rt_blend->alpha_func);
111 }
112 compileState.logicOpEnable = state->pipe.logicop_enable;
113 if (compileState.logicOpEnable) {
114 compileState.logicOpFunc =
115 swr_convert_logic_op(state->pipe.logicop_func);
116 }
117
118 blendState.writeDisableRed =
119 (rt_blend->colormask & PIPE_MASK_R) ? 0 : 1;
120 blendState.writeDisableGreen =
121 (rt_blend->colormask & PIPE_MASK_G) ? 0 : 1;
122 blendState.writeDisableBlue =
123 (rt_blend->colormask & PIPE_MASK_B) ? 0 : 1;
124 blendState.writeDisableAlpha =
125 (rt_blend->colormask & PIPE_MASK_A) ? 0 : 1;
126
127 if (rt_blend->colormask == 0)
128 compileState.blendEnable = false;
129 }
130
131 return state;
132 }
133
134 static void
135 swr_bind_blend_state(struct pipe_context *pipe, void *blend)
136 {
137 struct swr_context *ctx = swr_context(pipe);
138
139 if (ctx->blend == blend)
140 return;
141
142 ctx->blend = (swr_blend_state *)blend;
143
144 ctx->dirty |= SWR_NEW_BLEND;
145 }
146
147 static void
148 swr_delete_blend_state(struct pipe_context *pipe, void *blend)
149 {
150 FREE(blend);
151 }
152
153 static void
154 swr_set_blend_color(struct pipe_context *pipe,
155 const struct pipe_blend_color *color)
156 {
157 struct swr_context *ctx = swr_context(pipe);
158
159 ctx->blend_color = *color;
160
161 ctx->dirty |= SWR_NEW_BLEND;
162 }
163
164 static void
165 swr_set_stencil_ref(struct pipe_context *pipe,
166 const struct pipe_stencil_ref *ref)
167 {
168 struct swr_context *ctx = swr_context(pipe);
169
170 ctx->stencil_ref = *ref;
171
172 ctx->dirty |= SWR_NEW_DEPTH_STENCIL_ALPHA;
173 }
174
175 static void *
176 swr_create_depth_stencil_state(
177 struct pipe_context *pipe,
178 const struct pipe_depth_stencil_alpha_state *depth_stencil)
179 {
180 struct pipe_depth_stencil_alpha_state *state;
181
182 state = (pipe_depth_stencil_alpha_state *)mem_dup(depth_stencil,
183 sizeof *depth_stencil);
184
185 return state;
186 }
187
188 static void
189 swr_bind_depth_stencil_state(struct pipe_context *pipe, void *depth_stencil)
190 {
191 struct swr_context *ctx = swr_context(pipe);
192
193 if (ctx->depth_stencil == (pipe_depth_stencil_alpha_state *)depth_stencil)
194 return;
195
196 ctx->depth_stencil = (pipe_depth_stencil_alpha_state *)depth_stencil;
197
198 ctx->dirty |= SWR_NEW_DEPTH_STENCIL_ALPHA;
199 }
200
201 static void
202 swr_delete_depth_stencil_state(struct pipe_context *pipe, void *depth)
203 {
204 FREE(depth);
205 }
206
207
208 static void *
209 swr_create_rasterizer_state(struct pipe_context *pipe,
210 const struct pipe_rasterizer_state *rast)
211 {
212 struct pipe_rasterizer_state *state;
213 state = (pipe_rasterizer_state *)mem_dup(rast, sizeof *rast);
214
215 return state;
216 }
217
218 static void
219 swr_bind_rasterizer_state(struct pipe_context *pipe, void *handle)
220 {
221 struct swr_context *ctx = swr_context(pipe);
222 const struct pipe_rasterizer_state *rasterizer =
223 (const struct pipe_rasterizer_state *)handle;
224
225 if (ctx->rasterizer == (pipe_rasterizer_state *)rasterizer)
226 return;
227
228 ctx->rasterizer = (pipe_rasterizer_state *)rasterizer;
229
230 ctx->dirty |= SWR_NEW_RASTERIZER;
231 }
232
233 static void
234 swr_delete_rasterizer_state(struct pipe_context *pipe, void *rasterizer)
235 {
236 FREE(rasterizer);
237 }
238
239
240 static void *
241 swr_create_sampler_state(struct pipe_context *pipe,
242 const struct pipe_sampler_state *sampler)
243 {
244 struct pipe_sampler_state *state =
245 (pipe_sampler_state *)mem_dup(sampler, sizeof *sampler);
246
247 return state;
248 }
249
250 static void
251 swr_bind_sampler_states(struct pipe_context *pipe,
252 enum pipe_shader_type shader,
253 unsigned start,
254 unsigned num,
255 void **samplers)
256 {
257 struct swr_context *ctx = swr_context(pipe);
258 unsigned i;
259
260 assert(shader < PIPE_SHADER_TYPES);
261 assert(start + num <= ARRAY_SIZE(ctx->samplers[shader]));
262
263 /* set the new samplers */
264 ctx->num_samplers[shader] = num;
265 for (i = 0; i < num; i++) {
266 ctx->samplers[shader][start + i] = (pipe_sampler_state *)samplers[i];
267 }
268
269 ctx->dirty |= SWR_NEW_SAMPLER;
270 }
271
272 static void
273 swr_delete_sampler_state(struct pipe_context *pipe, void *sampler)
274 {
275 FREE(sampler);
276 }
277
278
279 static struct pipe_sampler_view *
280 swr_create_sampler_view(struct pipe_context *pipe,
281 struct pipe_resource *texture,
282 const struct pipe_sampler_view *templ)
283 {
284 struct pipe_sampler_view *view = CALLOC_STRUCT(pipe_sampler_view);
285
286 if (view) {
287 *view = *templ;
288 view->reference.count = 1;
289 view->texture = NULL;
290 pipe_resource_reference(&view->texture, texture);
291 view->context = pipe;
292 }
293
294 return view;
295 }
296
297 static void
298 swr_set_sampler_views(struct pipe_context *pipe,
299 enum pipe_shader_type shader,
300 unsigned start,
301 unsigned num,
302 struct pipe_sampler_view **views)
303 {
304 struct swr_context *ctx = swr_context(pipe);
305 uint i;
306
307 assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
308
309 assert(shader < PIPE_SHADER_TYPES);
310 assert(start + num <= ARRAY_SIZE(ctx->sampler_views[shader]));
311
312 /* set the new sampler views */
313 ctx->num_sampler_views[shader] = num;
314 for (i = 0; i < num; i++) {
315 pipe_sampler_view_reference(&ctx->sampler_views[shader][start + i],
316 views[i]);
317 }
318
319 ctx->dirty |= SWR_NEW_SAMPLER_VIEW;
320 }
321
322 static void
323 swr_sampler_view_destroy(struct pipe_context *pipe,
324 struct pipe_sampler_view *view)
325 {
326 pipe_resource_reference(&view->texture, NULL);
327 FREE(view);
328 }
329
330 static void *
331 swr_create_vs_state(struct pipe_context *pipe,
332 const struct pipe_shader_state *vs)
333 {
334 struct swr_vertex_shader *swr_vs = new swr_vertex_shader;
335 if (!swr_vs)
336 return NULL;
337
338 swr_vs->pipe.tokens = tgsi_dup_tokens(vs->tokens);
339 swr_vs->pipe.stream_output = vs->stream_output;
340
341 lp_build_tgsi_info(vs->tokens, &swr_vs->info);
342
343 swr_vs->soState = {0};
344
345 if (swr_vs->pipe.stream_output.num_outputs) {
346 pipe_stream_output_info *stream_output = &swr_vs->pipe.stream_output;
347
348 swr_vs->soState.soEnable = true;
349 // soState.rasterizerDisable set on state dirty
350 // soState.streamToRasterizer not used
351
352 for (uint32_t i = 0; i < stream_output->num_outputs; i++) {
353 unsigned attrib_slot = stream_output->output[i].register_index;
354 attrib_slot = swr_so_adjust_attrib(attrib_slot, swr_vs);
355 swr_vs->soState.streamMasks[stream_output->output[i].stream] |=
356 (1 << attrib_slot);
357 }
358 for (uint32_t i = 0; i < MAX_SO_STREAMS; i++) {
359 swr_vs->soState.streamNumEntries[i] =
360 _mm_popcnt_u32(swr_vs->soState.streamMasks[i]);
361 }
362 }
363
364 return swr_vs;
365 }
366
367 static void
368 swr_bind_vs_state(struct pipe_context *pipe, void *vs)
369 {
370 struct swr_context *ctx = swr_context(pipe);
371
372 if (ctx->vs == vs)
373 return;
374
375 ctx->vs = (swr_vertex_shader *)vs;
376 ctx->dirty |= SWR_NEW_VS;
377 }
378
379 static void
380 swr_delete_vs_state(struct pipe_context *pipe, void *vs)
381 {
382 struct swr_vertex_shader *swr_vs = (swr_vertex_shader *)vs;
383 FREE((void *)swr_vs->pipe.tokens);
384 struct swr_screen *screen = swr_screen(pipe->screen);
385
386 /* Defer deletion of vs state */
387 swr_fence_work_delete_vs(screen->flush_fence, swr_vs);
388 }
389
390 static void *
391 swr_create_fs_state(struct pipe_context *pipe,
392 const struct pipe_shader_state *fs)
393 {
394 struct swr_fragment_shader *swr_fs = new swr_fragment_shader;
395 if (!swr_fs)
396 return NULL;
397
398 swr_fs->pipe.tokens = tgsi_dup_tokens(fs->tokens);
399
400 lp_build_tgsi_info(fs->tokens, &swr_fs->info);
401
402 return swr_fs;
403 }
404
405
406 static void
407 swr_bind_fs_state(struct pipe_context *pipe, void *fs)
408 {
409 struct swr_context *ctx = swr_context(pipe);
410
411 if (ctx->fs == fs)
412 return;
413
414 ctx->fs = (swr_fragment_shader *)fs;
415 ctx->dirty |= SWR_NEW_FS;
416 }
417
418 static void
419 swr_delete_fs_state(struct pipe_context *pipe, void *fs)
420 {
421 struct swr_fragment_shader *swr_fs = (swr_fragment_shader *)fs;
422 FREE((void *)swr_fs->pipe.tokens);
423 struct swr_screen *screen = swr_screen(pipe->screen);
424
425 /* Defer deleton of fs state */
426 swr_fence_work_delete_fs(screen->flush_fence, swr_fs);
427 }
428
429 static void *
430 swr_create_gs_state(struct pipe_context *pipe,
431 const struct pipe_shader_state *gs)
432 {
433 struct swr_geometry_shader *swr_gs = new swr_geometry_shader;
434 if (!swr_gs)
435 return NULL;
436
437 swr_gs->pipe.tokens = tgsi_dup_tokens(gs->tokens);
438 lp_build_tgsi_info(gs->tokens, &swr_gs->info);
439 return swr_gs;
440 }
441
442
443 static void
444 swr_bind_gs_state(struct pipe_context *pipe, void *gs)
445 {
446 struct swr_context *ctx = swr_context(pipe);
447
448 if (ctx->gs == gs)
449 return;
450
451 ctx->gs = (swr_geometry_shader *)gs;
452 ctx->dirty |= SWR_NEW_GS;
453 }
454
455 static void
456 swr_delete_gs_state(struct pipe_context *pipe, void *gs)
457 {
458 struct swr_geometry_shader *swr_gs = (swr_geometry_shader *)gs;
459 FREE((void *)swr_gs->pipe.tokens);
460 struct swr_screen *screen = swr_screen(pipe->screen);
461
462 /* Defer deleton of fs state */
463 swr_fence_work_delete_gs(screen->flush_fence, swr_gs);
464 }
465
466 static void
467 swr_set_constant_buffer(struct pipe_context *pipe,
468 enum pipe_shader_type shader,
469 uint index,
470 const struct pipe_constant_buffer *cb)
471 {
472 struct swr_context *ctx = swr_context(pipe);
473 struct pipe_resource *constants = cb ? cb->buffer : NULL;
474
475 assert(shader < PIPE_SHADER_TYPES);
476 assert(index < ARRAY_SIZE(ctx->constants[shader]));
477
478 /* note: reference counting */
479 util_copy_constant_buffer(&ctx->constants[shader][index], cb);
480
481 if (shader == PIPE_SHADER_VERTEX) {
482 ctx->dirty |= SWR_NEW_VSCONSTANTS;
483 } else if (shader == PIPE_SHADER_FRAGMENT) {
484 ctx->dirty |= SWR_NEW_FSCONSTANTS;
485 } else if (shader == PIPE_SHADER_GEOMETRY) {
486 ctx->dirty |= SWR_NEW_GSCONSTANTS;
487 }
488
489 if (cb && cb->user_buffer) {
490 pipe_resource_reference(&constants, NULL);
491 }
492 }
493
494
495 static void *
496 swr_create_vertex_elements_state(struct pipe_context *pipe,
497 unsigned num_elements,
498 const struct pipe_vertex_element *attribs)
499 {
500 struct swr_vertex_element_state *velems;
501 assert(num_elements <= PIPE_MAX_ATTRIBS);
502 velems = new swr_vertex_element_state;
503 if (velems) {
504 memset(&velems->fsState, 0, sizeof(velems->fsState));
505 velems->fsState.bVertexIDOffsetEnable = true;
506 velems->fsState.numAttribs = num_elements;
507 for (unsigned i = 0; i < num_elements; i++) {
508 // XXX: we should do this keyed on the VS usage info
509
510 const struct util_format_description *desc =
511 util_format_description(attribs[i].src_format);
512
513 velems->fsState.layout[i].AlignedByteOffset = attribs[i].src_offset;
514 velems->fsState.layout[i].Format =
515 mesa_to_swr_format(attribs[i].src_format);
516 velems->fsState.layout[i].StreamIndex =
517 attribs[i].vertex_buffer_index;
518 velems->fsState.layout[i].InstanceEnable =
519 attribs[i].instance_divisor != 0;
520 velems->fsState.layout[i].ComponentControl0 =
521 desc->channel[0].type != UTIL_FORMAT_TYPE_VOID
522 ? ComponentControl::StoreSrc
523 : ComponentControl::Store0;
524 velems->fsState.layout[i].ComponentControl1 =
525 desc->channel[1].type != UTIL_FORMAT_TYPE_VOID
526 ? ComponentControl::StoreSrc
527 : ComponentControl::Store0;
528 velems->fsState.layout[i].ComponentControl2 =
529 desc->channel[2].type != UTIL_FORMAT_TYPE_VOID
530 ? ComponentControl::StoreSrc
531 : ComponentControl::Store0;
532 velems->fsState.layout[i].ComponentControl3 =
533 desc->channel[3].type != UTIL_FORMAT_TYPE_VOID
534 ? ComponentControl::StoreSrc
535 : ComponentControl::Store1Fp;
536 velems->fsState.layout[i].ComponentPacking = ComponentEnable::XYZW;
537 velems->fsState.layout[i].InstanceAdvancementState =
538 attribs[i].instance_divisor;
539
540 /* Calculate the pitch of each stream */
541 const SWR_FORMAT_INFO &swr_desc = GetFormatInfo(
542 mesa_to_swr_format(attribs[i].src_format));
543 velems->stream_pitch[attribs[i].vertex_buffer_index] += swr_desc.Bpp;
544
545 if (attribs[i].instance_divisor != 0) {
546 velems->instanced_bufs |= 1U << attribs[i].vertex_buffer_index;
547 uint32_t *min_instance_div =
548 &velems->min_instance_div[attribs[i].vertex_buffer_index];
549 if (!*min_instance_div ||
550 attribs[i].instance_divisor < *min_instance_div)
551 *min_instance_div = attribs[i].instance_divisor;
552 }
553 }
554 }
555
556 return velems;
557 }
558
559 static void
560 swr_bind_vertex_elements_state(struct pipe_context *pipe, void *velems)
561 {
562 struct swr_context *ctx = swr_context(pipe);
563 struct swr_vertex_element_state *swr_velems =
564 (struct swr_vertex_element_state *)velems;
565
566 ctx->velems = swr_velems;
567 ctx->dirty |= SWR_NEW_VERTEX;
568 }
569
570 static void
571 swr_delete_vertex_elements_state(struct pipe_context *pipe, void *velems)
572 {
573 struct swr_vertex_element_state *swr_velems =
574 (struct swr_vertex_element_state *) velems;
575 /* XXX Need to destroy fetch shader? */
576 delete swr_velems;
577 }
578
579
580 static void
581 swr_set_vertex_buffers(struct pipe_context *pipe,
582 unsigned start_slot,
583 unsigned num_elements,
584 const struct pipe_vertex_buffer *buffers)
585 {
586 struct swr_context *ctx = swr_context(pipe);
587
588 assert(num_elements <= PIPE_MAX_ATTRIBS);
589
590 util_set_vertex_buffers_count(ctx->vertex_buffer,
591 &ctx->num_vertex_buffers,
592 buffers,
593 start_slot,
594 num_elements);
595
596 ctx->dirty |= SWR_NEW_VERTEX;
597 }
598
599
600 static void
601 swr_set_polygon_stipple(struct pipe_context *pipe,
602 const struct pipe_poly_stipple *stipple)
603 {
604 struct swr_context *ctx = swr_context(pipe);
605
606 ctx->poly_stipple.pipe = *stipple; /* struct copy */
607 ctx->dirty |= SWR_NEW_STIPPLE;
608 }
609
610 static void
611 swr_set_clip_state(struct pipe_context *pipe,
612 const struct pipe_clip_state *clip)
613 {
614 struct swr_context *ctx = swr_context(pipe);
615
616 ctx->clip = *clip;
617 /* XXX Unimplemented, but prevents crash */
618
619 ctx->dirty |= SWR_NEW_CLIP;
620 }
621
622
623 static void
624 swr_set_scissor_states(struct pipe_context *pipe,
625 unsigned start_slot,
626 unsigned num_scissors,
627 const struct pipe_scissor_state *scissors)
628 {
629 struct swr_context *ctx = swr_context(pipe);
630
631 memcpy(ctx->scissors + start_slot, scissors,
632 sizeof(struct pipe_scissor_state) * num_scissors);
633
634 for (unsigned i = 0; i < num_scissors; i++) {
635 auto idx = start_slot + i;
636 ctx->swr_scissors[idx].xmin = scissors[idx].minx;
637 ctx->swr_scissors[idx].xmax = scissors[idx].maxx;
638 ctx->swr_scissors[idx].ymin = scissors[idx].miny;
639 ctx->swr_scissors[idx].ymax = scissors[idx].maxy;
640 }
641 ctx->dirty |= SWR_NEW_SCISSOR;
642 }
643
644 static void
645 swr_set_viewport_states(struct pipe_context *pipe,
646 unsigned start_slot,
647 unsigned num_viewports,
648 const struct pipe_viewport_state *vpt)
649 {
650 struct swr_context *ctx = swr_context(pipe);
651
652 memcpy(ctx->viewports + start_slot, vpt, sizeof(struct pipe_viewport_state) * num_viewports);
653 ctx->dirty |= SWR_NEW_VIEWPORT;
654 }
655
656
657 static void
658 swr_set_framebuffer_state(struct pipe_context *pipe,
659 const struct pipe_framebuffer_state *fb)
660 {
661 struct swr_context *ctx = swr_context(pipe);
662
663 bool changed = !util_framebuffer_state_equal(&ctx->framebuffer, fb);
664
665 assert(fb->width <= KNOB_GUARDBAND_WIDTH);
666 assert(fb->height <= KNOB_GUARDBAND_HEIGHT);
667
668 if (changed) {
669 util_copy_framebuffer_state(&ctx->framebuffer, fb);
670
671 /* 0 and 1 both indicate no msaa. Core doesn't understand 0 samples */
672 ctx->framebuffer.samples = std::max((ubyte)1, ctx->framebuffer.samples);
673
674 ctx->dirty |= SWR_NEW_FRAMEBUFFER;
675 }
676 }
677
678
679 static void
680 swr_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask)
681 {
682 struct swr_context *ctx = swr_context(pipe);
683
684 if (sample_mask != ctx->sample_mask) {
685 ctx->sample_mask = sample_mask;
686 ctx->dirty |= SWR_NEW_RASTERIZER;
687 }
688 }
689
690 /*
691 * MSAA fixed sample position table
692 * used by update_derived and get_sample_position
693 * (integer locations on a 16x16 grid)
694 */
695 static const uint8_t swr_sample_positions[][2] =
696 { /* 1x*/ { 8, 8},
697 /* 2x*/ {12,12},{ 4, 4},
698 /* 4x*/ { 6, 2},{14, 6},{ 2,10},{10,14},
699 /* 8x*/ { 9, 5},{ 7,11},{13, 9},{ 5, 3},
700 { 3,13},{ 1, 7},{11,15},{15, 1},
701 /*16x*/ { 9, 9},{ 7, 5},{ 5,10},{12, 7},
702 { 3, 6},{10,13},{13,11},{11, 3},
703 { 6,14},{ 8, 1},{ 4, 2},{ 2,12},
704 { 0, 8},{15, 4},{14,15},{ 1, 0} };
705
706 static void
707 swr_get_sample_position(struct pipe_context *pipe,
708 unsigned sample_count, unsigned sample_index,
709 float *out_value)
710 {
711 /* validate sample_count */
712 sample_count = GetNumSamples(GetSampleCount(sample_count));
713
714 const uint8_t *sample = swr_sample_positions[sample_count-1 + sample_index];
715 out_value[0] = sample[0] / 16.0f;
716 out_value[1] = sample[1] / 16.0f;
717 }
718
719
720 /*
721 * Update resource in-use status
722 * All resources bound to color or depth targets marked as WRITE resources.
723 * VBO Vertex/index buffers and texture views marked as READ resources.
724 */
725 void
726 swr_update_resource_status(struct pipe_context *pipe,
727 const struct pipe_draw_info *p_draw_info)
728 {
729 struct swr_context *ctx = swr_context(pipe);
730 struct pipe_framebuffer_state *fb = &ctx->framebuffer;
731
732 /* colorbuffer targets */
733 if (fb->nr_cbufs)
734 for (uint32_t i = 0; i < fb->nr_cbufs; ++i)
735 if (fb->cbufs[i])
736 swr_resource_write(fb->cbufs[i]->texture);
737
738 /* depth/stencil target */
739 if (fb->zsbuf)
740 swr_resource_write(fb->zsbuf->texture);
741
742 /* VBO vertex buffers */
743 for (uint32_t i = 0; i < ctx->num_vertex_buffers; i++) {
744 struct pipe_vertex_buffer *vb = &ctx->vertex_buffer[i];
745 if (!vb->is_user_buffer && vb->buffer.resource)
746 swr_resource_read(vb->buffer.resource);
747 }
748
749 /* VBO index buffer */
750 if (p_draw_info && p_draw_info->index_size) {
751 if (!p_draw_info->has_user_indices)
752 swr_resource_read(p_draw_info->index.resource);
753 }
754
755 /* transform feedback buffers */
756 for (uint32_t i = 0; i < ctx->num_so_targets; i++) {
757 struct pipe_stream_output_target *target = ctx->so_targets[i];
758 if (target && target->buffer)
759 swr_resource_write(target->buffer);
760 }
761
762 /* texture sampler views */
763 for (uint32_t j : {PIPE_SHADER_VERTEX, PIPE_SHADER_FRAGMENT}) {
764 for (uint32_t i = 0; i < ctx->num_sampler_views[j]; i++) {
765 struct pipe_sampler_view *view = ctx->sampler_views[j][i];
766 if (view)
767 swr_resource_read(view->texture);
768 }
769 }
770
771 /* constant buffers */
772 for (uint32_t j : {PIPE_SHADER_VERTEX, PIPE_SHADER_FRAGMENT}) {
773 for (uint32_t i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
774 struct pipe_constant_buffer *cb = &ctx->constants[j][i];
775 if (cb->buffer)
776 swr_resource_read(cb->buffer);
777 }
778 }
779 }
780
781 static void
782 swr_update_texture_state(struct swr_context *ctx,
783 enum pipe_shader_type shader_type,
784 unsigned num_sampler_views,
785 swr_jit_texture *textures)
786 {
787 for (unsigned i = 0; i < num_sampler_views; i++) {
788 struct pipe_sampler_view *view =
789 ctx->sampler_views[shader_type][i];
790 struct swr_jit_texture *jit_tex = &textures[i];
791
792 memset(jit_tex, 0, sizeof(*jit_tex));
793 if (view) {
794 struct pipe_resource *res = view->texture;
795 struct swr_resource *swr_res = swr_resource(res);
796 SWR_SURFACE_STATE *swr = &swr_res->swr;
797 size_t *mip_offsets = swr_res->mip_offsets;
798 if (swr_res->has_depth && swr_res->has_stencil &&
799 !util_format_has_depth(util_format_description(view->format))) {
800 swr = &swr_res->secondary;
801 mip_offsets = swr_res->secondary_mip_offsets;
802 }
803
804 jit_tex->width = res->width0;
805 jit_tex->height = res->height0;
806 jit_tex->base_ptr = (uint8_t*)swr->xpBaseAddress;
807 if (view->target != PIPE_BUFFER) {
808 jit_tex->first_level = view->u.tex.first_level;
809 jit_tex->last_level = view->u.tex.last_level;
810 if (view->target == PIPE_TEXTURE_3D)
811 jit_tex->depth = res->depth0;
812 else
813 jit_tex->depth =
814 view->u.tex.last_layer - view->u.tex.first_layer + 1;
815 jit_tex->base_ptr += view->u.tex.first_layer *
816 swr->qpitch * swr->pitch;
817 } else {
818 unsigned view_blocksize = util_format_get_blocksize(view->format);
819 jit_tex->base_ptr += view->u.buf.offset;
820 jit_tex->width = view->u.buf.size / view_blocksize;
821 jit_tex->depth = 1;
822 }
823
824 for (unsigned level = jit_tex->first_level;
825 level <= jit_tex->last_level;
826 level++) {
827 jit_tex->row_stride[level] = swr->pitch;
828 jit_tex->img_stride[level] = swr->qpitch * swr->pitch;
829 jit_tex->mip_offsets[level] = mip_offsets[level];
830 }
831 }
832 }
833 }
834
835 static void
836 swr_update_sampler_state(struct swr_context *ctx,
837 enum pipe_shader_type shader_type,
838 unsigned num_samplers,
839 swr_jit_sampler *samplers)
840 {
841 for (unsigned i = 0; i < num_samplers; i++) {
842 const struct pipe_sampler_state *sampler =
843 ctx->samplers[shader_type][i];
844
845 if (sampler) {
846 samplers[i].min_lod = sampler->min_lod;
847 samplers[i].max_lod = sampler->max_lod;
848 samplers[i].lod_bias = sampler->lod_bias;
849 COPY_4V(samplers[i].border_color, sampler->border_color.f);
850 }
851 }
852 }
853
854 static void
855 swr_update_constants(struct swr_context *ctx, enum pipe_shader_type shaderType)
856 {
857 swr_draw_context *pDC = &ctx->swrDC;
858
859 const float **constant;
860 uint32_t *num_constants;
861 struct swr_scratch_space *scratch;
862
863 switch (shaderType) {
864 case PIPE_SHADER_VERTEX:
865 constant = pDC->constantVS;
866 num_constants = pDC->num_constantsVS;
867 scratch = &ctx->scratch->vs_constants;
868 break;
869 case PIPE_SHADER_FRAGMENT:
870 constant = pDC->constantFS;
871 num_constants = pDC->num_constantsFS;
872 scratch = &ctx->scratch->fs_constants;
873 break;
874 case PIPE_SHADER_GEOMETRY:
875 constant = pDC->constantGS;
876 num_constants = pDC->num_constantsGS;
877 scratch = &ctx->scratch->gs_constants;
878 break;
879 default:
880 debug_printf("Unsupported shader type constants\n");
881 return;
882 }
883
884 for (UINT i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
885 const pipe_constant_buffer *cb = &ctx->constants[shaderType][i];
886 num_constants[i] = cb->buffer_size;
887 if (cb->buffer) {
888 constant[i] =
889 (const float *)(swr_resource_data(cb->buffer) +
890 cb->buffer_offset);
891 } else {
892 /* Need to copy these constants to scratch space */
893 if (cb->user_buffer && cb->buffer_size) {
894 const void *ptr =
895 ((const uint8_t *)cb->user_buffer + cb->buffer_offset);
896 uint32_t size = AlignUp(cb->buffer_size, 4);
897 ptr = swr_copy_to_scratch_space(ctx, scratch, ptr, size);
898 constant[i] = (const float *)ptr;
899 }
900 }
901 }
902 }
903
904 static bool
905 swr_change_rt(struct swr_context *ctx,
906 unsigned attachment,
907 const struct pipe_surface *sf)
908 {
909 swr_draw_context *pDC = &ctx->swrDC;
910 struct SWR_SURFACE_STATE *rt = &pDC->renderTargets[attachment];
911
912 /* Do nothing if the render target hasn't changed */
913 if ((!sf || !sf->texture) && (void*)(rt->xpBaseAddress) == nullptr)
914 return false;
915
916 /* Deal with disabling RT up front */
917 if (!sf || !sf->texture) {
918 /* If detaching attachment, mark tiles as RESOLVED so core
919 * won't try to load from non-existent target. */
920 swr_store_render_target(&ctx->pipe, attachment, SWR_TILE_RESOLVED);
921 *rt = {0};
922 return true;
923 }
924
925 const struct swr_resource *swr = swr_resource(sf->texture);
926 const SWR_SURFACE_STATE *swr_surface = &swr->swr;
927 SWR_FORMAT fmt = mesa_to_swr_format(sf->format);
928
929 if (attachment == SWR_ATTACHMENT_STENCIL && swr->secondary.xpBaseAddress) {
930 swr_surface = &swr->secondary;
931 fmt = swr_surface->format;
932 }
933
934 if (rt->xpBaseAddress == swr_surface->xpBaseAddress &&
935 rt->format == fmt &&
936 rt->lod == sf->u.tex.level &&
937 rt->arrayIndex == sf->u.tex.first_layer)
938 return false;
939
940 bool need_fence = false;
941
942 /* StoreTile for changed target */
943 if (rt->xpBaseAddress) {
944 /* If changing attachment to a new target, mark tiles as
945 * INVALID so they are reloaded from surface. */
946 swr_store_render_target(&ctx->pipe, attachment, SWR_TILE_INVALID);
947 need_fence = true;
948 } else {
949 /* if no previous attachment, invalidate tiles that may be marked
950 * RESOLVED because of an old attachment */
951 swr_invalidate_render_target(&ctx->pipe, attachment, sf->width, sf->height);
952 /* no need to set fence here */
953 }
954
955 /* Make new attachment */
956 *rt = *swr_surface;
957 rt->format = fmt;
958 rt->lod = sf->u.tex.level;
959 rt->arrayIndex = sf->u.tex.first_layer;
960
961 return need_fence;
962 }
963
964 /*
965 * for cases where resources are shared between contexts, invalidate
966 * this ctx's resource. so it can be fetched fresh. Old ctx's resource
967 * is already stored during a flush
968 */
969 static inline void
970 swr_invalidate_buffers_after_ctx_change(struct pipe_context *pipe)
971 {
972 struct swr_context *ctx = swr_context(pipe);
973
974 for (uint32_t i = 0; i < ctx->framebuffer.nr_cbufs; i++) {
975 struct pipe_surface *cb = ctx->framebuffer.cbufs[i];
976 if (cb) {
977 struct swr_resource *res = swr_resource(cb->texture);
978 if (res->curr_pipe != pipe) {
979 /* if curr_pipe is NULL (first use), status should not be WRITE */
980 assert(res->curr_pipe || !(res->status & SWR_RESOURCE_WRITE));
981 if (res->status & SWR_RESOURCE_WRITE) {
982 swr_invalidate_render_target(pipe, i, cb->width, cb->height);
983 }
984 }
985 res->curr_pipe = pipe;
986 }
987 }
988 if (ctx->framebuffer.zsbuf) {
989 struct pipe_surface *zb = ctx->framebuffer.zsbuf;
990 if (zb) {
991 struct swr_resource *res = swr_resource(zb->texture);
992 if (res->curr_pipe != pipe) {
993 /* if curr_pipe is NULL (first use), status should not be WRITE */
994 assert(res->curr_pipe || !(res->status & SWR_RESOURCE_WRITE));
995 if (res->status & SWR_RESOURCE_WRITE) {
996 swr_invalidate_render_target(pipe, SWR_ATTACHMENT_DEPTH, zb->width, zb->height);
997 swr_invalidate_render_target(pipe, SWR_ATTACHMENT_STENCIL, zb->width, zb->height);
998 }
999 }
1000 res->curr_pipe = pipe;
1001 }
1002 }
1003 }
1004
1005 static inline void
1006 swr_user_vbuf_range(const struct pipe_draw_info *info,
1007 const struct swr_vertex_element_state *velems,
1008 const struct pipe_vertex_buffer *vb,
1009 uint32_t i,
1010 uint32_t *totelems,
1011 uint32_t *base,
1012 uint32_t *size)
1013 {
1014 /* FIXME: The size is too large - we don't access the full extra stride. */
1015 unsigned elems;
1016 if (velems->instanced_bufs & (1U << i)) {
1017 elems = info->instance_count / velems->min_instance_div[i] + 1;
1018 *totelems = info->start_instance + elems;
1019 *base = info->start_instance * vb->stride;
1020 *size = elems * vb->stride;
1021 } else if (vb->stride) {
1022 elems = info->max_index - info->min_index + 1;
1023 *totelems = (info->max_index + info->index_bias) + 1;
1024 *base = (info->min_index + info->index_bias) * vb->stride;
1025 *size = elems * vb->stride;
1026 } else {
1027 *totelems = 1;
1028 *base = 0;
1029 *size = velems->stream_pitch[i];
1030 }
1031 }
1032
1033 static void
1034 swr_update_poly_stipple(struct swr_context *ctx)
1035 {
1036 struct swr_draw_context *pDC = &ctx->swrDC;
1037
1038 assert(sizeof(ctx->poly_stipple.pipe.stipple) == sizeof(pDC->polyStipple));
1039 memcpy(pDC->polyStipple,
1040 ctx->poly_stipple.pipe.stipple,
1041 sizeof(ctx->poly_stipple.pipe.stipple));
1042 }
1043
1044 void
1045 swr_update_derived(struct pipe_context *pipe,
1046 const struct pipe_draw_info *p_draw_info)
1047 {
1048 struct swr_context *ctx = swr_context(pipe);
1049 struct swr_screen *screen = swr_screen(pipe->screen);
1050
1051 /* When called from swr_clear (p_draw_info = null), set any null
1052 * state-objects to the dummy state objects to prevent nullptr dereference
1053 * in validation below.
1054 *
1055 * Important that this remains static for zero initialization. These
1056 * aren't meant to be proper state objects, just empty structs. They will
1057 * not be written to.
1058 *
1059 * Shaders can't be part of the union since they contain std::unordered_map
1060 */
1061 static struct {
1062 union {
1063 struct pipe_rasterizer_state rasterizer;
1064 struct pipe_depth_stencil_alpha_state depth_stencil;
1065 struct swr_blend_state blend;
1066 } state;
1067 struct swr_vertex_shader vs;
1068 struct swr_fragment_shader fs;
1069 } swr_dummy;
1070
1071 if (!p_draw_info) {
1072 if (!ctx->rasterizer)
1073 ctx->rasterizer = &swr_dummy.state.rasterizer;
1074 if (!ctx->depth_stencil)
1075 ctx->depth_stencil = &swr_dummy.state.depth_stencil;
1076 if (!ctx->blend)
1077 ctx->blend = &swr_dummy.state.blend;
1078 if (!ctx->vs)
1079 ctx->vs = &swr_dummy.vs;
1080 if (!ctx->fs)
1081 ctx->fs = &swr_dummy.fs;
1082 }
1083
1084 /* Update screen->pipe to current pipe context. */
1085 screen->pipe = pipe;
1086
1087 /* Any state that requires dirty flags to be re-triggered sets this mask */
1088 /* For example, user_buffer vertex and index buffers. */
1089 unsigned post_update_dirty_flags = 0;
1090
1091 /* bring resources that changed context up-to-date */
1092 swr_invalidate_buffers_after_ctx_change(pipe);
1093
1094 /* Render Targets */
1095 if (ctx->dirty & SWR_NEW_FRAMEBUFFER) {
1096 struct pipe_framebuffer_state *fb = &ctx->framebuffer;
1097 const struct util_format_description *desc = NULL;
1098 bool need_fence = false;
1099
1100 /* colorbuffer targets */
1101 if (fb->nr_cbufs) {
1102 for (unsigned i = 0; i < fb->nr_cbufs; ++i)
1103 need_fence |= swr_change_rt(
1104 ctx, SWR_ATTACHMENT_COLOR0 + i, fb->cbufs[i]);
1105 }
1106 for (unsigned i = fb->nr_cbufs; i < SWR_NUM_RENDERTARGETS; ++i)
1107 need_fence |= swr_change_rt(ctx, SWR_ATTACHMENT_COLOR0 + i, NULL);
1108
1109 /* depth/stencil target */
1110 if (fb->zsbuf)
1111 desc = util_format_description(fb->zsbuf->format);
1112 if (fb->zsbuf && util_format_has_depth(desc))
1113 need_fence |= swr_change_rt(ctx, SWR_ATTACHMENT_DEPTH, fb->zsbuf);
1114 else
1115 need_fence |= swr_change_rt(ctx, SWR_ATTACHMENT_DEPTH, NULL);
1116
1117 if (fb->zsbuf && util_format_has_stencil(desc))
1118 need_fence |= swr_change_rt(ctx, SWR_ATTACHMENT_STENCIL, fb->zsbuf);
1119 else
1120 need_fence |= swr_change_rt(ctx, SWR_ATTACHMENT_STENCIL, NULL);
1121
1122 /* This fence ensures any attachment changes are resolved before the
1123 * next draw */
1124 if (need_fence)
1125 swr_fence_submit(ctx, screen->flush_fence);
1126 }
1127
1128 /* Raster state */
1129 if (ctx->dirty & (SWR_NEW_RASTERIZER |
1130 SWR_NEW_VS | // clipping
1131 SWR_NEW_FRAMEBUFFER)) {
1132 pipe_rasterizer_state *rasterizer = ctx->rasterizer;
1133 pipe_framebuffer_state *fb = &ctx->framebuffer;
1134
1135 SWR_RASTSTATE *rastState = &ctx->derived.rastState;
1136 rastState->cullMode = swr_convert_cull_mode(rasterizer->cull_face);
1137 rastState->frontWinding = rasterizer->front_ccw
1138 ? SWR_FRONTWINDING_CCW
1139 : SWR_FRONTWINDING_CW;
1140 rastState->scissorEnable = rasterizer->scissor;
1141 rastState->pointSize = rasterizer->point_size > 0.0f
1142 ? rasterizer->point_size
1143 : 1.0f;
1144 rastState->lineWidth = rasterizer->line_width > 0.0f
1145 ? rasterizer->line_width
1146 : 1.0f;
1147
1148 rastState->pointParam = rasterizer->point_size_per_vertex;
1149
1150 rastState->pointSpriteEnable = rasterizer->sprite_coord_enable;
1151 rastState->pointSpriteTopOrigin =
1152 rasterizer->sprite_coord_mode == PIPE_SPRITE_COORD_UPPER_LEFT;
1153
1154 /* If SWR_MSAA_FORCE_ENABLE is set, turn msaa on */
1155 if (screen->msaa_force_enable && !rasterizer->multisample) {
1156 /* Force enable and use the value the surface was created with */
1157 rasterizer->multisample = true;
1158 fb->samples = swr_resource(fb->cbufs[0]->texture)->swr.numSamples;
1159 fprintf(stderr,"msaa force enable: %d samples\n", fb->samples);
1160 }
1161
1162 rastState->sampleCount = GetSampleCount(fb->samples);
1163 rastState->forcedSampleCount = false;
1164 rastState->bIsCenterPattern = !rasterizer->multisample;
1165 rastState->pixelLocation = SWR_PIXEL_LOCATION_CENTER;
1166
1167 /* Only initialize sample positions if msaa is enabled */
1168 if (rasterizer->multisample) {
1169 for (uint32_t i = 0; i < fb->samples; i++) {
1170 const uint8_t *sample = swr_sample_positions[fb->samples-1 + i];
1171 rastState->samplePositions.SetXi(i, sample[0] << 4);
1172 rastState->samplePositions.SetYi(i, sample[1] << 4);
1173 rastState->samplePositions.SetX (i, sample[0] / 16.0f);
1174 rastState->samplePositions.SetY (i, sample[1] / 16.0f);
1175 }
1176 rastState->samplePositions.PrecalcSampleData(fb->samples);
1177 }
1178
1179 bool do_offset = false;
1180 switch (rasterizer->fill_front) {
1181 case PIPE_POLYGON_MODE_FILL:
1182 do_offset = rasterizer->offset_tri;
1183 break;
1184 case PIPE_POLYGON_MODE_LINE:
1185 do_offset = rasterizer->offset_line;
1186 break;
1187 case PIPE_POLYGON_MODE_POINT:
1188 do_offset = rasterizer->offset_point;
1189 break;
1190 }
1191
1192 if (do_offset) {
1193 rastState->depthBias = rasterizer->offset_units;
1194 rastState->slopeScaledDepthBias = rasterizer->offset_scale;
1195 rastState->depthBiasClamp = rasterizer->offset_clamp;
1196 } else {
1197 rastState->depthBias = 0;
1198 rastState->slopeScaledDepthBias = 0;
1199 rastState->depthBiasClamp = 0;
1200 }
1201
1202 /* translate polygon mode, at least for the front==back case */
1203 rastState->fillMode = swr_convert_fill_mode(rasterizer->fill_front);
1204
1205 struct pipe_surface *zb = fb->zsbuf;
1206 if (zb && swr_resource(zb->texture)->has_depth)
1207 rastState->depthFormat = swr_resource(zb->texture)->swr.format;
1208
1209 rastState->depthClipEnable = rasterizer->depth_clip_near;
1210 rastState->clipEnable = rasterizer->depth_clip_near | rasterizer->depth_clip_far;
1211 rastState->clipHalfZ = rasterizer->clip_halfz;
1212
1213 ctx->api.pfnSwrSetRastState(ctx->swrContext, rastState);
1214 }
1215
1216 /* Viewport */
1217 if (ctx->dirty & (SWR_NEW_VIEWPORT | SWR_NEW_FRAMEBUFFER
1218 | SWR_NEW_RASTERIZER)) {
1219 pipe_viewport_state *state = &ctx->viewports[0];
1220 pipe_framebuffer_state *fb = &ctx->framebuffer;
1221 pipe_rasterizer_state *rasterizer = ctx->rasterizer;
1222
1223 SWR_VIEWPORT *vp = &ctx->derived.vp[0];
1224 SWR_VIEWPORT_MATRICES *vpm = &ctx->derived.vpm;
1225
1226 for (unsigned i = 0; i < KNOB_NUM_VIEWPORTS_SCISSORS; i++) {
1227 vp->x = state->translate[0] - state->scale[0];
1228 vp->width = 2 * state->scale[0];
1229 vp->y = state->translate[1] - fabs(state->scale[1]);
1230 vp->height = 2 * fabs(state->scale[1]);
1231 util_viewport_zmin_zmax(state, rasterizer->clip_halfz,
1232 &vp->minZ, &vp->maxZ);
1233
1234 vpm->m00[i] = state->scale[0];
1235 vpm->m11[i] = state->scale[1];
1236 vpm->m22[i] = state->scale[2];
1237 vpm->m30[i] = state->translate[0];
1238 vpm->m31[i] = state->translate[1];
1239 vpm->m32[i] = state->translate[2];
1240
1241 /* Now that the matrix is calculated, clip the view coords to screen
1242 * size. OpenGL allows for -ve x,y in the viewport. */
1243 if (vp->x < 0.0f) {
1244 vp->width += vp->x;
1245 vp->x = 0.0f;
1246 }
1247 if (vp->y < 0.0f) {
1248 vp->height += vp->y;
1249 vp->y = 0.0f;
1250 }
1251 vp->width = std::min(vp->width, (float) fb->width - vp->x);
1252 vp->height = std::min(vp->height, (float) fb->height - vp->y);
1253
1254 vp++;
1255 state++;
1256 }
1257 ctx->api.pfnSwrSetViewports(ctx->swrContext, KNOB_NUM_VIEWPORTS_SCISSORS,
1258 &ctx->derived.vp[0], &ctx->derived.vpm);
1259 }
1260
1261 /* When called from swr_clear (p_draw_info = null), render targets,
1262 * rasterState and viewports (dependent on render targets) are the only
1263 * necessary validation. Defer remaining validation by setting
1264 * post_update_dirty_flags and clear all dirty flags. BackendState is
1265 * still unconditionally validated below */
1266 if (!p_draw_info) {
1267 post_update_dirty_flags = ctx->dirty & ~(SWR_NEW_FRAMEBUFFER |
1268 SWR_NEW_RASTERIZER |
1269 SWR_NEW_VIEWPORT);
1270 ctx->dirty = 0;
1271 }
1272
1273 /* Scissor */
1274 if (ctx->dirty & SWR_NEW_SCISSOR) {
1275 ctx->api.pfnSwrSetScissorRects(ctx->swrContext, KNOB_NUM_VIEWPORTS_SCISSORS, ctx->swr_scissors);
1276 }
1277
1278 /* Set vertex & index buffers */
1279 if (ctx->dirty & SWR_NEW_VERTEX) {
1280 const struct pipe_draw_info &info = *p_draw_info;
1281
1282 /* vertex buffers */
1283 SWR_VERTEX_BUFFER_STATE swrVertexBuffers[PIPE_MAX_ATTRIBS];
1284 for (UINT i = 0; i < ctx->num_vertex_buffers; i++) {
1285 uint32_t size = 0, pitch = 0, elems = 0, partial_inbounds = 0;
1286 uint32_t min_vertex_index = 0;
1287 const uint8_t *p_data;
1288 struct pipe_vertex_buffer *vb = &ctx->vertex_buffer[i];
1289
1290 pitch = vb->stride;
1291 if (vb->is_user_buffer) {
1292 /* Client buffer
1293 * client memory is one-time use, re-trigger SWR_NEW_VERTEX to
1294 * revalidate on each draw */
1295 post_update_dirty_flags |= SWR_NEW_VERTEX;
1296
1297 uint32_t base;
1298 swr_user_vbuf_range(&info, ctx->velems, vb, i, &elems, &base, &size);
1299 partial_inbounds = 0;
1300 min_vertex_index = info.min_index + info.index_bias;
1301
1302 size = AlignUp(size, 4);
1303 /* If size of client memory copy is too large, don't copy. The
1304 * draw will access user-buffer directly and then block. This is
1305 * faster than queuing many large client draws. */
1306 if (size >= screen->client_copy_limit) {
1307 post_update_dirty_flags |= SWR_LARGE_CLIENT_DRAW;
1308 p_data = (const uint8_t *) vb->buffer.user;
1309 } else {
1310 /* Copy only needed vertices to scratch space */
1311 const void *ptr = (const uint8_t *) vb->buffer.user + base;
1312 ptr = (uint8_t *)swr_copy_to_scratch_space(
1313 ctx, &ctx->scratch->vertex_buffer, ptr, size);
1314 p_data = (const uint8_t *)ptr - base;
1315 }
1316 } else if (vb->buffer.resource) {
1317 /* VBO */
1318 if (!pitch) {
1319 /* If pitch=0 (ie vb->stride), buffer contains a single
1320 * constant attribute. Use the stream_pitch which was
1321 * calculated during creation of vertex_elements_state for the
1322 * size of the attribute. */
1323 size = ctx->velems->stream_pitch[i];
1324 elems = 1;
1325 partial_inbounds = 0;
1326 min_vertex_index = 0;
1327 } else {
1328 /* size is based on buffer->width0 rather than info.max_index
1329 * to prevent having to validate VBO on each draw. */
1330 size = vb->buffer.resource->width0;
1331 elems = size / pitch;
1332 partial_inbounds = size % pitch;
1333 min_vertex_index = 0;
1334 }
1335
1336 p_data = swr_resource_data(vb->buffer.resource) + vb->buffer_offset;
1337 } else
1338 p_data = NULL;
1339
1340 swrVertexBuffers[i] = {0};
1341 swrVertexBuffers[i].index = i;
1342 swrVertexBuffers[i].pitch = pitch;
1343 swrVertexBuffers[i].xpData = (gfxptr_t) p_data;
1344 swrVertexBuffers[i].size = size;
1345 swrVertexBuffers[i].minVertex = min_vertex_index;
1346 swrVertexBuffers[i].maxVertex = elems;
1347 swrVertexBuffers[i].partialInboundsSize = partial_inbounds;
1348 }
1349
1350 ctx->api.pfnSwrSetVertexBuffers(
1351 ctx->swrContext, ctx->num_vertex_buffers, swrVertexBuffers);
1352
1353 /* index buffer, if required (info passed in by swr_draw_vbo) */
1354 SWR_FORMAT index_type = R32_UINT; /* Default for non-indexed draws */
1355 if (info.index_size) {
1356 const uint8_t *p_data;
1357 uint32_t size, pitch;
1358
1359 pitch = info.index_size ? info.index_size : sizeof(uint32_t);
1360 index_type = swr_convert_index_type(pitch);
1361
1362 if (!info.has_user_indices) {
1363 /* VBO
1364 * size is based on buffer->width0 rather than info.count
1365 * to prevent having to validate VBO on each draw */
1366 size = info.index.resource->width0;
1367 p_data = swr_resource_data(info.index.resource);
1368 } else {
1369 /* Client buffer
1370 * client memory is one-time use, re-trigger SWR_NEW_VERTEX to
1371 * revalidate on each draw */
1372 post_update_dirty_flags |= SWR_NEW_VERTEX;
1373
1374 size = info.count * pitch;
1375 size = AlignUp(size, 4);
1376 /* If size of client memory copy is too large, don't copy. The
1377 * draw will access user-buffer directly and then block. This is
1378 * faster than queuing many large client draws. */
1379 if (size >= screen->client_copy_limit) {
1380 post_update_dirty_flags |= SWR_LARGE_CLIENT_DRAW;
1381 p_data = (const uint8_t *) info.index.user;
1382 } else {
1383 /* Copy indices to scratch space */
1384 const void *ptr = info.index.user;
1385 ptr = swr_copy_to_scratch_space(
1386 ctx, &ctx->scratch->index_buffer, ptr, size);
1387 p_data = (const uint8_t *)ptr;
1388 }
1389 }
1390
1391 SWR_INDEX_BUFFER_STATE swrIndexBuffer;
1392 swrIndexBuffer.format = swr_convert_index_type(info.index_size);
1393 swrIndexBuffer.xpIndices = (gfxptr_t) p_data;
1394 swrIndexBuffer.size = size;
1395
1396 ctx->api.pfnSwrSetIndexBuffer(ctx->swrContext, &swrIndexBuffer);
1397 }
1398
1399 struct swr_vertex_element_state *velems = ctx->velems;
1400 if (velems && velems->fsState.indexType != index_type) {
1401 velems->fsFunc = NULL;
1402 velems->fsState.indexType = index_type;
1403 }
1404 }
1405
1406 /* GeometryShader */
1407 if (ctx->dirty & (SWR_NEW_GS |
1408 SWR_NEW_VS |
1409 SWR_NEW_SAMPLER |
1410 SWR_NEW_SAMPLER_VIEW)) {
1411 if (ctx->gs) {
1412 swr_jit_gs_key key;
1413 swr_generate_gs_key(key, ctx, ctx->gs);
1414 auto search = ctx->gs->map.find(key);
1415 PFN_GS_FUNC func;
1416 if (search != ctx->gs->map.end()) {
1417 func = search->second->shader;
1418 } else {
1419 func = swr_compile_gs(ctx, key);
1420 }
1421 ctx->api.pfnSwrSetGsFunc(ctx->swrContext, func);
1422
1423 /* JIT sampler state */
1424 if (ctx->dirty & SWR_NEW_SAMPLER) {
1425 swr_update_sampler_state(ctx,
1426 PIPE_SHADER_GEOMETRY,
1427 key.nr_samplers,
1428 ctx->swrDC.samplersGS);
1429 }
1430
1431 /* JIT sampler view state */
1432 if (ctx->dirty & (SWR_NEW_SAMPLER_VIEW | SWR_NEW_FRAMEBUFFER)) {
1433 swr_update_texture_state(ctx,
1434 PIPE_SHADER_GEOMETRY,
1435 key.nr_sampler_views,
1436 ctx->swrDC.texturesGS);
1437 }
1438
1439 ctx->api.pfnSwrSetGsState(ctx->swrContext, &ctx->gs->gsState);
1440 } else {
1441 SWR_GS_STATE state = { 0 };
1442 ctx->api.pfnSwrSetGsState(ctx->swrContext, &state);
1443 ctx->api.pfnSwrSetGsFunc(ctx->swrContext, NULL);
1444 }
1445 }
1446
1447 /* VertexShader */
1448 if (ctx->dirty & (SWR_NEW_VS |
1449 SWR_NEW_RASTERIZER | // for clip planes
1450 SWR_NEW_SAMPLER |
1451 SWR_NEW_SAMPLER_VIEW |
1452 SWR_NEW_FRAMEBUFFER)) {
1453 swr_jit_vs_key key;
1454 swr_generate_vs_key(key, ctx, ctx->vs);
1455 auto search = ctx->vs->map.find(key);
1456 PFN_VERTEX_FUNC func;
1457 if (search != ctx->vs->map.end()) {
1458 func = search->second->shader;
1459 } else {
1460 func = swr_compile_vs(ctx, key);
1461 }
1462 ctx->api.pfnSwrSetVertexFunc(ctx->swrContext, func);
1463
1464 /* JIT sampler state */
1465 if (ctx->dirty & SWR_NEW_SAMPLER) {
1466 swr_update_sampler_state(ctx,
1467 PIPE_SHADER_VERTEX,
1468 key.nr_samplers,
1469 ctx->swrDC.samplersVS);
1470 }
1471
1472 /* JIT sampler view state */
1473 if (ctx->dirty & (SWR_NEW_SAMPLER_VIEW | SWR_NEW_FRAMEBUFFER)) {
1474 swr_update_texture_state(ctx,
1475 PIPE_SHADER_VERTEX,
1476 key.nr_sampler_views,
1477 ctx->swrDC.texturesVS);
1478 }
1479 }
1480
1481 /* work around the fact that poly stipple also affects lines */
1482 /* and points, since we rasterize them as triangles, too */
1483 /* Has to be before fragment shader, since it sets SWR_NEW_FS */
1484 if (p_draw_info) {
1485 bool new_prim_is_poly =
1486 (u_reduced_prim(p_draw_info->mode) == PIPE_PRIM_TRIANGLES) &&
1487 (ctx->derived.rastState.fillMode == SWR_FILLMODE_SOLID);
1488 if (new_prim_is_poly != ctx->poly_stipple.prim_is_poly) {
1489 ctx->dirty |= SWR_NEW_FS;
1490 ctx->poly_stipple.prim_is_poly = new_prim_is_poly;
1491 }
1492 }
1493
1494 /* FragmentShader */
1495 if (ctx->dirty & (SWR_NEW_FS |
1496 SWR_NEW_VS |
1497 SWR_NEW_GS |
1498 SWR_NEW_RASTERIZER |
1499 SWR_NEW_SAMPLER |
1500 SWR_NEW_SAMPLER_VIEW |
1501 SWR_NEW_FRAMEBUFFER)) {
1502 swr_jit_fs_key key;
1503 swr_generate_fs_key(key, ctx, ctx->fs);
1504 auto search = ctx->fs->map.find(key);
1505 PFN_PIXEL_KERNEL func;
1506 if (search != ctx->fs->map.end()) {
1507 func = search->second->shader;
1508 } else {
1509 func = swr_compile_fs(ctx, key);
1510 }
1511 SWR_PS_STATE psState = {0};
1512 psState.pfnPixelShader = func;
1513 psState.killsPixel = ctx->fs->info.base.uses_kill;
1514 psState.inputCoverage = SWR_INPUT_COVERAGE_NORMAL;
1515 psState.writesODepth = ctx->fs->info.base.writes_z;
1516 psState.usesSourceDepth = ctx->fs->info.base.reads_z;
1517 psState.shadingRate = SWR_SHADING_RATE_PIXEL;
1518 psState.renderTargetMask = (1 << ctx->framebuffer.nr_cbufs) - 1;
1519 psState.posOffset = SWR_PS_POSITION_SAMPLE_NONE;
1520 uint32_t barycentricsMask = 0;
1521 #if 0
1522 // when we switch to mesa-master
1523 if (ctx->fs->info.base.uses_persp_center ||
1524 ctx->fs->info.base.uses_linear_center)
1525 barycentricsMask |= SWR_BARYCENTRIC_PER_PIXEL_MASK;
1526 if (ctx->fs->info.base.uses_persp_centroid ||
1527 ctx->fs->info.base.uses_linear_centroid)
1528 barycentricsMask |= SWR_BARYCENTRIC_CENTROID_MASK;
1529 if (ctx->fs->info.base.uses_persp_sample ||
1530 ctx->fs->info.base.uses_linear_sample)
1531 barycentricsMask |= SWR_BARYCENTRIC_PER_SAMPLE_MASK;
1532 #else
1533 for (unsigned i = 0; i < ctx->fs->info.base.num_inputs; i++) {
1534 switch (ctx->fs->info.base.input_interpolate_loc[i]) {
1535 case TGSI_INTERPOLATE_LOC_CENTER:
1536 barycentricsMask |= SWR_BARYCENTRIC_PER_PIXEL_MASK;
1537 break;
1538 case TGSI_INTERPOLATE_LOC_CENTROID:
1539 barycentricsMask |= SWR_BARYCENTRIC_CENTROID_MASK;
1540 break;
1541 case TGSI_INTERPOLATE_LOC_SAMPLE:
1542 barycentricsMask |= SWR_BARYCENTRIC_PER_SAMPLE_MASK;
1543 break;
1544 }
1545 }
1546 #endif
1547 psState.barycentricsMask = barycentricsMask;
1548 psState.usesUAV = false; // XXX
1549 psState.forceEarlyZ = false;
1550 ctx->api.pfnSwrSetPixelShaderState(ctx->swrContext, &psState);
1551
1552 /* JIT sampler state */
1553 if (ctx->dirty & (SWR_NEW_SAMPLER |
1554 SWR_NEW_FS)) {
1555 swr_update_sampler_state(ctx,
1556 PIPE_SHADER_FRAGMENT,
1557 key.nr_samplers,
1558 ctx->swrDC.samplersFS);
1559 }
1560
1561 /* JIT sampler view state */
1562 if (ctx->dirty & (SWR_NEW_SAMPLER_VIEW |
1563 SWR_NEW_FRAMEBUFFER |
1564 SWR_NEW_FS)) {
1565 swr_update_texture_state(ctx,
1566 PIPE_SHADER_FRAGMENT,
1567 key.nr_sampler_views,
1568 ctx->swrDC.texturesFS);
1569 }
1570 }
1571
1572
1573 /* VertexShader Constants */
1574 if (ctx->dirty & SWR_NEW_VSCONSTANTS) {
1575 swr_update_constants(ctx, PIPE_SHADER_VERTEX);
1576 }
1577
1578 /* FragmentShader Constants */
1579 if (ctx->dirty & SWR_NEW_FSCONSTANTS) {
1580 swr_update_constants(ctx, PIPE_SHADER_FRAGMENT);
1581 }
1582
1583 /* GeometryShader Constants */
1584 if (ctx->dirty & SWR_NEW_GSCONSTANTS) {
1585 swr_update_constants(ctx, PIPE_SHADER_GEOMETRY);
1586 }
1587
1588 /* Depth/stencil state */
1589 if (ctx->dirty & (SWR_NEW_DEPTH_STENCIL_ALPHA | SWR_NEW_FRAMEBUFFER)) {
1590 struct pipe_depth_state *depth = &(ctx->depth_stencil->depth);
1591 struct pipe_stencil_state *stencil = ctx->depth_stencil->stencil;
1592 SWR_DEPTH_STENCIL_STATE depthStencilState = {{0}};
1593 SWR_DEPTH_BOUNDS_STATE depthBoundsState = {0};
1594
1595 /* XXX, incomplete. Need to flesh out stencil & alpha test state
1596 struct pipe_stencil_state *front_stencil =
1597 ctx->depth_stencil.stencil[0];
1598 struct pipe_stencil_state *back_stencil = ctx->depth_stencil.stencil[1];
1599 struct pipe_alpha_state alpha;
1600 */
1601 if (stencil[0].enabled) {
1602 depthStencilState.stencilWriteEnable = 1;
1603 depthStencilState.stencilTestEnable = 1;
1604 depthStencilState.stencilTestFunc =
1605 swr_convert_depth_func(stencil[0].func);
1606
1607 depthStencilState.stencilPassDepthPassOp =
1608 swr_convert_stencil_op(stencil[0].zpass_op);
1609 depthStencilState.stencilPassDepthFailOp =
1610 swr_convert_stencil_op(stencil[0].zfail_op);
1611 depthStencilState.stencilFailOp =
1612 swr_convert_stencil_op(stencil[0].fail_op);
1613 depthStencilState.stencilWriteMask = stencil[0].writemask;
1614 depthStencilState.stencilTestMask = stencil[0].valuemask;
1615 depthStencilState.stencilRefValue = ctx->stencil_ref.ref_value[0];
1616 }
1617 if (stencil[1].enabled) {
1618 depthStencilState.doubleSidedStencilTestEnable = 1;
1619
1620 depthStencilState.backfaceStencilTestFunc =
1621 swr_convert_depth_func(stencil[1].func);
1622
1623 depthStencilState.backfaceStencilPassDepthPassOp =
1624 swr_convert_stencil_op(stencil[1].zpass_op);
1625 depthStencilState.backfaceStencilPassDepthFailOp =
1626 swr_convert_stencil_op(stencil[1].zfail_op);
1627 depthStencilState.backfaceStencilFailOp =
1628 swr_convert_stencil_op(stencil[1].fail_op);
1629 depthStencilState.backfaceStencilWriteMask = stencil[1].writemask;
1630 depthStencilState.backfaceStencilTestMask = stencil[1].valuemask;
1631
1632 depthStencilState.backfaceStencilRefValue =
1633 ctx->stencil_ref.ref_value[1];
1634 }
1635
1636 depthStencilState.depthTestEnable = depth->enabled;
1637 depthStencilState.depthTestFunc = swr_convert_depth_func(depth->func);
1638 depthStencilState.depthWriteEnable = depth->writemask;
1639 ctx->api.pfnSwrSetDepthStencilState(ctx->swrContext, &depthStencilState);
1640
1641 depthBoundsState.depthBoundsTestEnable = depth->bounds_test;
1642 depthBoundsState.depthBoundsTestMinValue = depth->bounds_min;
1643 depthBoundsState.depthBoundsTestMaxValue = depth->bounds_max;
1644 ctx->api.pfnSwrSetDepthBoundsState(ctx->swrContext, &depthBoundsState);
1645 }
1646
1647 /* Blend State */
1648 if (ctx->dirty & (SWR_NEW_BLEND |
1649 SWR_NEW_RASTERIZER |
1650 SWR_NEW_FRAMEBUFFER |
1651 SWR_NEW_DEPTH_STENCIL_ALPHA)) {
1652 struct pipe_framebuffer_state *fb = &ctx->framebuffer;
1653
1654 SWR_BLEND_STATE blendState;
1655 memcpy(&blendState, &ctx->blend->blendState, sizeof(blendState));
1656 blendState.constantColor[0] = ctx->blend_color.color[0];
1657 blendState.constantColor[1] = ctx->blend_color.color[1];
1658 blendState.constantColor[2] = ctx->blend_color.color[2];
1659 blendState.constantColor[3] = ctx->blend_color.color[3];
1660 blendState.alphaTestReference =
1661 *((uint32_t*)&ctx->depth_stencil->alpha.ref_value);
1662
1663 blendState.sampleMask = ctx->sample_mask;
1664 blendState.sampleCount = GetSampleCount(fb->samples);
1665
1666 /* If there are no color buffers bound, disable writes on RT0
1667 * and skip loop */
1668 if (fb->nr_cbufs == 0) {
1669 blendState.renderTarget[0].writeDisableRed = 1;
1670 blendState.renderTarget[0].writeDisableGreen = 1;
1671 blendState.renderTarget[0].writeDisableBlue = 1;
1672 blendState.renderTarget[0].writeDisableAlpha = 1;
1673 ctx->api.pfnSwrSetBlendFunc(ctx->swrContext, 0, NULL);
1674 }
1675 else
1676 for (int target = 0;
1677 target < std::min(SWR_NUM_RENDERTARGETS,
1678 PIPE_MAX_COLOR_BUFS);
1679 target++) {
1680 if (!fb->cbufs[target])
1681 continue;
1682
1683 struct swr_resource *colorBuffer =
1684 swr_resource(fb->cbufs[target]->texture);
1685
1686 BLEND_COMPILE_STATE compileState;
1687 memset(&compileState, 0, sizeof(compileState));
1688 compileState.format = colorBuffer->swr.format;
1689 memcpy(&compileState.blendState,
1690 &ctx->blend->compileState[target],
1691 sizeof(compileState.blendState));
1692
1693 const SWR_FORMAT_INFO& info = GetFormatInfo(compileState.format);
1694 if (compileState.blendState.logicOpEnable &&
1695 ((info.type[0] == SWR_TYPE_FLOAT) || info.isSRGB)) {
1696 compileState.blendState.logicOpEnable = false;
1697 }
1698
1699 if (info.type[0] == SWR_TYPE_SINT || info.type[0] == SWR_TYPE_UINT)
1700 compileState.blendState.blendEnable = false;
1701
1702 if (compileState.blendState.blendEnable == false &&
1703 compileState.blendState.logicOpEnable == false &&
1704 ctx->depth_stencil->alpha.enabled == 0) {
1705 ctx->api.pfnSwrSetBlendFunc(ctx->swrContext, target, NULL);
1706 continue;
1707 }
1708
1709 compileState.desc.alphaTestEnable =
1710 ctx->depth_stencil->alpha.enabled;
1711 compileState.desc.independentAlphaBlendEnable =
1712 (compileState.blendState.sourceBlendFactor !=
1713 compileState.blendState.sourceAlphaBlendFactor) ||
1714 (compileState.blendState.destBlendFactor !=
1715 compileState.blendState.destAlphaBlendFactor) ||
1716 (compileState.blendState.colorBlendFunc !=
1717 compileState.blendState.alphaBlendFunc);
1718 compileState.desc.alphaToCoverageEnable =
1719 ctx->blend->pipe.alpha_to_coverage;
1720 compileState.desc.sampleMaskEnable = (blendState.sampleMask != 0);
1721 compileState.desc.numSamples = fb->samples;
1722
1723 compileState.alphaTestFunction =
1724 swr_convert_depth_func(ctx->depth_stencil->alpha.func);
1725 compileState.alphaTestFormat = ALPHA_TEST_FLOAT32; // xxx
1726
1727 compileState.Canonicalize();
1728
1729 PFN_BLEND_JIT_FUNC func = NULL;
1730 auto search = ctx->blendJIT->find(compileState);
1731 if (search != ctx->blendJIT->end()) {
1732 func = search->second;
1733 } else {
1734 HANDLE hJitMgr = screen->hJitMgr;
1735 func = JitCompileBlend(hJitMgr, compileState);
1736 debug_printf("BLEND shader %p\n", func);
1737 assert(func && "Error: BlendShader = NULL");
1738
1739 ctx->blendJIT->insert(std::make_pair(compileState, func));
1740 }
1741 ctx->api.pfnSwrSetBlendFunc(ctx->swrContext, target, func);
1742 }
1743
1744 ctx->api.pfnSwrSetBlendState(ctx->swrContext, &blendState);
1745 }
1746
1747 if (ctx->dirty & SWR_NEW_STIPPLE) {
1748 swr_update_poly_stipple(ctx);
1749 }
1750
1751 if (ctx->dirty & (SWR_NEW_VS | SWR_NEW_SO | SWR_NEW_RASTERIZER)) {
1752 ctx->vs->soState.rasterizerDisable =
1753 ctx->rasterizer->rasterizer_discard;
1754 ctx->api.pfnSwrSetSoState(ctx->swrContext, &ctx->vs->soState);
1755
1756 pipe_stream_output_info *stream_output = &ctx->vs->pipe.stream_output;
1757
1758 for (uint32_t i = 0; i < ctx->num_so_targets; i++) {
1759 SWR_STREAMOUT_BUFFER buffer = {0};
1760 if (!ctx->so_targets[i])
1761 continue;
1762 buffer.enable = true;
1763 buffer.pBuffer =
1764 (gfxptr_t)(swr_resource_data(ctx->so_targets[i]->buffer) +
1765 ctx->so_targets[i]->buffer_offset);
1766 buffer.bufferSize = ctx->so_targets[i]->buffer_size >> 2;
1767 buffer.pitch = stream_output->stride[i];
1768 buffer.streamOffset = 0;
1769
1770 ctx->api.pfnSwrSetSoBuffers(ctx->swrContext, &buffer, i);
1771 }
1772 }
1773
1774 if (ctx->dirty & (SWR_NEW_CLIP | SWR_NEW_RASTERIZER | SWR_NEW_VS)) {
1775 // shader exporting clip distances overrides all user clip planes
1776 if (ctx->rasterizer->clip_plane_enable &&
1777 !ctx->vs->info.base.num_written_clipdistance)
1778 {
1779 swr_draw_context *pDC = &ctx->swrDC;
1780 memcpy(pDC->userClipPlanes,
1781 ctx->clip.ucp,
1782 sizeof(pDC->userClipPlanes));
1783 }
1784 }
1785
1786 // set up backend state
1787 SWR_BACKEND_STATE backendState = {0};
1788 if (ctx->gs) {
1789 backendState.numAttributes = ctx->gs->info.base.num_outputs - 1;
1790 } else {
1791 backendState.numAttributes = ctx->vs->info.base.num_outputs - 1;
1792 if (ctx->fs->info.base.uses_primid) {
1793 backendState.numAttributes++;
1794 backendState.swizzleEnable = true;
1795 for (unsigned i = 0; i < sizeof(backendState.numComponents); i++) {
1796 backendState.swizzleMap[i].sourceAttrib = i;
1797 }
1798 backendState.swizzleMap[ctx->vs->info.base.num_outputs - 1].constantSource =
1799 SWR_CONSTANT_SOURCE_PRIM_ID;
1800 backendState.swizzleMap[ctx->vs->info.base.num_outputs - 1].componentOverrideMask = 1;
1801 }
1802 }
1803 if (ctx->rasterizer->sprite_coord_enable)
1804 backendState.numAttributes++;
1805
1806 backendState.numAttributes = std::min((size_t)backendState.numAttributes,
1807 sizeof(backendState.numComponents));
1808 for (unsigned i = 0; i < backendState.numAttributes; i++)
1809 backendState.numComponents[i] = 4;
1810 backendState.constantInterpolationMask = ctx->fs->constantMask |
1811 (ctx->rasterizer->flatshade ? ctx->fs->flatConstantMask : 0);
1812 backendState.pointSpriteTexCoordMask = ctx->fs->pointSpriteMask;
1813
1814 struct tgsi_shader_info *pLastFE =
1815 ctx->gs ?
1816 &ctx->gs->info.base :
1817 &ctx->vs->info.base;
1818 backendState.readRenderTargetArrayIndex = pLastFE->writes_layer;
1819 backendState.readViewportArrayIndex = pLastFE->writes_viewport_index;
1820 backendState.vertexAttribOffset = VERTEX_ATTRIB_START_SLOT; // TODO: optimize
1821
1822 backendState.clipDistanceMask =
1823 ctx->vs->info.base.num_written_clipdistance ?
1824 ctx->vs->info.base.clipdist_writemask & ctx->rasterizer->clip_plane_enable :
1825 ctx->rasterizer->clip_plane_enable;
1826
1827 backendState.cullDistanceMask =
1828 ctx->vs->info.base.culldist_writemask << ctx->vs->info.base.num_written_clipdistance;
1829
1830 // Assume old layout of SGV, POSITION, CLIPCULL, ATTRIB
1831 backendState.vertexClipCullOffset = backendState.vertexAttribOffset - 2;
1832
1833 ctx->api.pfnSwrSetBackendState(ctx->swrContext, &backendState);
1834
1835 /* Ensure that any in-progress attachment change StoreTiles finish */
1836 if (swr_is_fence_pending(screen->flush_fence))
1837 swr_fence_finish(pipe->screen, NULL, screen->flush_fence, 0);
1838
1839 /* Finally, update the in-use status of all resources involved in draw */
1840 swr_update_resource_status(pipe, p_draw_info);
1841
1842 ctx->dirty = post_update_dirty_flags;
1843 }
1844
1845
1846 static struct pipe_stream_output_target *
1847 swr_create_so_target(struct pipe_context *pipe,
1848 struct pipe_resource *buffer,
1849 unsigned buffer_offset,
1850 unsigned buffer_size)
1851 {
1852 struct pipe_stream_output_target *target;
1853
1854 target = CALLOC_STRUCT(pipe_stream_output_target);
1855 if (!target)
1856 return NULL;
1857
1858 target->context = pipe;
1859 target->reference.count = 1;
1860 pipe_resource_reference(&target->buffer, buffer);
1861 target->buffer_offset = buffer_offset;
1862 target->buffer_size = buffer_size;
1863 return target;
1864 }
1865
1866 static void
1867 swr_destroy_so_target(struct pipe_context *pipe,
1868 struct pipe_stream_output_target *target)
1869 {
1870 pipe_resource_reference(&target->buffer, NULL);
1871 FREE(target);
1872 }
1873
1874 static void
1875 swr_set_so_targets(struct pipe_context *pipe,
1876 unsigned num_targets,
1877 struct pipe_stream_output_target **targets,
1878 const unsigned *offsets)
1879 {
1880 struct swr_context *swr = swr_context(pipe);
1881 uint32_t i;
1882
1883 assert(num_targets <= MAX_SO_STREAMS);
1884
1885 for (i = 0; i < num_targets; i++) {
1886 pipe_so_target_reference(
1887 (struct pipe_stream_output_target **)&swr->so_targets[i],
1888 targets[i]);
1889 }
1890
1891 for (/* fall-through */; i < swr->num_so_targets; i++) {
1892 pipe_so_target_reference(
1893 (struct pipe_stream_output_target **)&swr->so_targets[i], NULL);
1894 }
1895
1896 swr->num_so_targets = num_targets;
1897
1898 swr->dirty |= SWR_NEW_SO;
1899 }
1900
1901
1902 void
1903 swr_state_init(struct pipe_context *pipe)
1904 {
1905 pipe->create_blend_state = swr_create_blend_state;
1906 pipe->bind_blend_state = swr_bind_blend_state;
1907 pipe->delete_blend_state = swr_delete_blend_state;
1908
1909 pipe->create_depth_stencil_alpha_state = swr_create_depth_stencil_state;
1910 pipe->bind_depth_stencil_alpha_state = swr_bind_depth_stencil_state;
1911 pipe->delete_depth_stencil_alpha_state = swr_delete_depth_stencil_state;
1912
1913 pipe->create_rasterizer_state = swr_create_rasterizer_state;
1914 pipe->bind_rasterizer_state = swr_bind_rasterizer_state;
1915 pipe->delete_rasterizer_state = swr_delete_rasterizer_state;
1916
1917 pipe->create_sampler_state = swr_create_sampler_state;
1918 pipe->bind_sampler_states = swr_bind_sampler_states;
1919 pipe->delete_sampler_state = swr_delete_sampler_state;
1920
1921 pipe->create_sampler_view = swr_create_sampler_view;
1922 pipe->set_sampler_views = swr_set_sampler_views;
1923 pipe->sampler_view_destroy = swr_sampler_view_destroy;
1924
1925 pipe->create_vs_state = swr_create_vs_state;
1926 pipe->bind_vs_state = swr_bind_vs_state;
1927 pipe->delete_vs_state = swr_delete_vs_state;
1928
1929 pipe->create_fs_state = swr_create_fs_state;
1930 pipe->bind_fs_state = swr_bind_fs_state;
1931 pipe->delete_fs_state = swr_delete_fs_state;
1932
1933 pipe->create_gs_state = swr_create_gs_state;
1934 pipe->bind_gs_state = swr_bind_gs_state;
1935 pipe->delete_gs_state = swr_delete_gs_state;
1936
1937 pipe->set_constant_buffer = swr_set_constant_buffer;
1938
1939 pipe->create_vertex_elements_state = swr_create_vertex_elements_state;
1940 pipe->bind_vertex_elements_state = swr_bind_vertex_elements_state;
1941 pipe->delete_vertex_elements_state = swr_delete_vertex_elements_state;
1942
1943 pipe->set_vertex_buffers = swr_set_vertex_buffers;
1944
1945 pipe->set_polygon_stipple = swr_set_polygon_stipple;
1946 pipe->set_clip_state = swr_set_clip_state;
1947 pipe->set_scissor_states = swr_set_scissor_states;
1948 pipe->set_viewport_states = swr_set_viewport_states;
1949
1950 pipe->set_framebuffer_state = swr_set_framebuffer_state;
1951
1952 pipe->set_blend_color = swr_set_blend_color;
1953 pipe->set_stencil_ref = swr_set_stencil_ref;
1954
1955 pipe->set_sample_mask = swr_set_sample_mask;
1956 pipe->get_sample_position = swr_get_sample_position;
1957
1958 pipe->create_stream_output_target = swr_create_so_target;
1959 pipe->stream_output_target_destroy = swr_destroy_so_target;
1960 pipe->set_stream_output_targets = swr_set_so_targets;
1961 }