db634b419a1c393aa11f7b9addeb6fbb11111ea0
[mesa.git] / src / gallium / auxiliary / draw / draw_context.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "pipe/p_context.h"
35 #include "util/u_memory.h"
36 #include "util/u_math.h"
37 #include "util/u_cpu_detect.h"
38 #include "util/u_inlines.h"
39 #include "util/u_helpers.h"
40 #include "util/u_prim.h"
41 #include "util/u_format.h"
42 #include "draw_context.h"
43 #include "draw_pipe.h"
44 #include "draw_prim_assembler.h"
45 #include "draw_vs.h"
46 #include "draw_gs.h"
47
48 #ifdef LLVM_AVAILABLE
49 #include "gallivm/lp_bld_init.h"
50 #include "gallivm/lp_bld_limits.h"
51 #include "draw_llvm.h"
52
53 boolean
54 draw_get_option_use_llvm(void)
55 {
56 return debug_get_bool_option("DRAW_USE_LLVM", TRUE);
57 }
58 #else
59 boolean
60 draw_get_option_use_llvm(void)
61 {
62 return FALSE;
63 }
64 #endif
65
66
67 /**
68 * Create new draw module context with gallivm state for LLVM JIT.
69 */
70 static struct draw_context *
71 draw_create_context(struct pipe_context *pipe, void *context,
72 boolean try_llvm)
73 {
74 struct draw_context *draw = CALLOC_STRUCT( draw_context );
75 if (!draw)
76 goto err_out;
77
78 /* we need correct cpu caps for disabling denorms in draw_vbo() */
79 util_cpu_detect();
80
81 #ifdef LLVM_AVAILABLE
82 if (try_llvm && draw_get_option_use_llvm()) {
83 draw->llvm = draw_llvm_create(draw, (LLVMContextRef)context);
84 }
85 #endif
86
87 draw->pipe = pipe;
88
89 if (!draw_init(draw))
90 goto err_destroy;
91
92 draw->ia = draw_prim_assembler_create(draw);
93 if (!draw->ia)
94 goto err_destroy;
95
96 return draw;
97
98 err_destroy:
99 draw_destroy( draw );
100 err_out:
101 return NULL;
102 }
103
104
105 /**
106 * Create new draw module context, with LLVM JIT.
107 */
108 struct draw_context *
109 draw_create(struct pipe_context *pipe)
110 {
111 return draw_create_context(pipe, NULL, TRUE);
112 }
113
114
115 #ifdef LLVM_AVAILABLE
116 struct draw_context *
117 draw_create_with_llvm_context(struct pipe_context *pipe,
118 void *context)
119 {
120 return draw_create_context(pipe, context, TRUE);
121 }
122 #endif
123
124 /**
125 * Create a new draw context, without LLVM JIT.
126 */
127 struct draw_context *
128 draw_create_no_llvm(struct pipe_context *pipe)
129 {
130 return draw_create_context(pipe, NULL, FALSE);
131 }
132
133
134 boolean draw_init(struct draw_context *draw)
135 {
136 /*
137 * Note that several functions compute the clipmask of the predefined
138 * formats with hardcoded formulas instead of using these. So modifications
139 * here must be reflected there too.
140 */
141
142 ASSIGN_4V( draw->plane[0], -1, 0, 0, 1 );
143 ASSIGN_4V( draw->plane[1], 1, 0, 0, 1 );
144 ASSIGN_4V( draw->plane[2], 0, -1, 0, 1 );
145 ASSIGN_4V( draw->plane[3], 0, 1, 0, 1 );
146 ASSIGN_4V( draw->plane[4], 0, 0, 1, 1 ); /* yes these are correct */
147 ASSIGN_4V( draw->plane[5], 0, 0, -1, 1 ); /* mesa's a bit wonky */
148 draw->clip_xy = TRUE;
149 draw->clip_z = TRUE;
150
151 draw->pt.user.planes = (float (*) [DRAW_TOTAL_CLIP_PLANES][4]) &(draw->plane[0]);
152 draw->pt.user.eltMax = ~0;
153
154 if (!draw_pipeline_init( draw ))
155 return FALSE;
156
157 if (!draw_pt_init( draw ))
158 return FALSE;
159
160 if (!draw_vs_init( draw ))
161 return FALSE;
162
163 if (!draw_gs_init( draw ))
164 return FALSE;
165
166 draw->quads_always_flatshade_last = !draw->pipe->screen->get_param(
167 draw->pipe->screen, PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION);
168
169 draw->floating_point_depth = false;
170
171 return TRUE;
172 }
173
174 /*
175 * Called whenever we're starting to draw a new instance.
176 * Some internal structures don't want to have to reset internal
177 * members on each invocation (because their state might have to persist
178 * between multiple primitive restart rendering call) but might have to
179 * for each new instance.
180 * This is particularly the case for primitive id's in geometry shader.
181 */
182 void draw_new_instance(struct draw_context *draw)
183 {
184 draw_geometry_shader_new_instance(draw->gs.geometry_shader);
185 draw_prim_assembler_new_instance(draw->ia);
186 }
187
188
189 void draw_destroy( struct draw_context *draw )
190 {
191 struct pipe_context *pipe;
192 unsigned i, j;
193
194 if (!draw)
195 return;
196
197 pipe = draw->pipe;
198
199 /* free any rasterizer CSOs that we may have created.
200 */
201 for (i = 0; i < 2; i++) {
202 for (j = 0; j < 2; j++) {
203 if (draw->rasterizer_no_cull[i][j]) {
204 pipe->delete_rasterizer_state(pipe, draw->rasterizer_no_cull[i][j]);
205 }
206 }
207 }
208
209 for (i = 0; i < draw->pt.nr_vertex_buffers; i++)
210 pipe_vertex_buffer_unreference(&draw->pt.vertex_buffer[i]);
211
212 /* Not so fast -- we're just borrowing this at the moment.
213 *
214 if (draw->render)
215 draw->render->destroy( draw->render );
216 */
217
218 draw_prim_assembler_destroy(draw->ia);
219 draw_pipeline_destroy( draw );
220 draw_pt_destroy( draw );
221 draw_vs_destroy( draw );
222 draw_gs_destroy( draw );
223 #ifdef LLVM_AVAILABLE
224 if (draw->llvm)
225 draw_llvm_destroy( draw->llvm );
226 #endif
227
228 FREE( draw );
229 }
230
231
232
233 void draw_flush( struct draw_context *draw )
234 {
235 draw_do_flush( draw, DRAW_FLUSH_BACKEND );
236 }
237
238
239 /**
240 * Specify the depth stencil format for the draw pipeline. This function
241 * determines the Minimum Resolvable Depth factor for polygon offset.
242 * This factor potentially depends on the number of Z buffer bits,
243 * the rasterization algorithm and the arithmetic performed on Z
244 * values between vertex shading and rasterization.
245 */
246 void draw_set_zs_format(struct draw_context *draw, enum pipe_format format)
247 {
248 const struct util_format_description *desc = util_format_description(format);
249
250 draw->floating_point_depth =
251 (util_get_depth_format_type(desc) == UTIL_FORMAT_TYPE_FLOAT);
252
253 draw->mrd = util_get_depth_format_mrd(desc);
254 }
255
256
257 static bool
258 draw_is_vs_window_space(struct draw_context *draw)
259 {
260 if (draw->vs.vertex_shader) {
261 struct tgsi_shader_info *info = &draw->vs.vertex_shader->info;
262
263 return info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] != 0;
264 }
265 return false;
266 }
267
268
269 void
270 draw_update_clip_flags(struct draw_context *draw)
271 {
272 bool window_space = draw_is_vs_window_space(draw);
273
274 draw->clip_xy = !draw->driver.bypass_clip_xy && !window_space;
275 draw->guard_band_xy = (!draw->driver.bypass_clip_xy &&
276 draw->driver.guard_band_xy);
277 draw->clip_z = (!draw->driver.bypass_clip_z &&
278 draw->rasterizer && draw->rasterizer->depth_clip_near) &&
279 !window_space;
280 draw->clip_user = draw->rasterizer &&
281 draw->rasterizer->clip_plane_enable != 0 &&
282 !window_space;
283 draw->guard_band_points_xy = draw->guard_band_xy ||
284 (draw->driver.bypass_clip_points &&
285 (draw->rasterizer &&
286 draw->rasterizer->point_tri_clip));
287 }
288
289
290 void
291 draw_update_viewport_flags(struct draw_context *draw)
292 {
293 bool window_space = draw_is_vs_window_space(draw);
294
295 draw->bypass_viewport = window_space || draw->identity_viewport;
296 }
297
298
299 /**
300 * Register new primitive rasterization/rendering state.
301 * This causes the drawing pipeline to be rebuilt.
302 */
303 void draw_set_rasterizer_state( struct draw_context *draw,
304 const struct pipe_rasterizer_state *raster,
305 void *rast_handle )
306 {
307 if (!draw->suspend_flushing) {
308 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
309
310 draw->rasterizer = raster;
311 draw->rast_handle = rast_handle;
312 draw_update_clip_flags(draw);
313 }
314 }
315
316 /* With a little more work, llvmpipe will be able to turn this off and
317 * do its own x/y clipping.
318 *
319 * Some hardware can turn off clipping altogether - in particular any
320 * hardware with a TNL unit can do its own clipping, even if it is
321 * relying on the draw module for some other reason.
322 * Setting bypass_clip_points to achieve d3d-style point clipping (the driver
323 * will need to do the "vp scissoring") _requires_ the driver to implement
324 * wide points / point sprites itself (points will still be clipped if rasterizer
325 * point_tri_clip isn't set). Only relevant if bypass_clip_xy isn't set.
326 */
327 void draw_set_driver_clipping( struct draw_context *draw,
328 boolean bypass_clip_xy,
329 boolean bypass_clip_z,
330 boolean guard_band_xy,
331 boolean bypass_clip_points)
332 {
333 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
334
335 draw->driver.bypass_clip_xy = bypass_clip_xy;
336 draw->driver.bypass_clip_z = bypass_clip_z;
337 draw->driver.guard_band_xy = guard_band_xy;
338 draw->driver.bypass_clip_points = bypass_clip_points;
339 draw_update_clip_flags(draw);
340 }
341
342
343 /**
344 * Plug in the primitive rendering/rasterization stage (which is the last
345 * stage in the drawing pipeline).
346 * This is provided by the device driver.
347 */
348 void draw_set_rasterize_stage( struct draw_context *draw,
349 struct draw_stage *stage )
350 {
351 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
352
353 draw->pipeline.rasterize = stage;
354 }
355
356
357 /**
358 * Set the draw module's clipping state.
359 */
360 void draw_set_clip_state( struct draw_context *draw,
361 const struct pipe_clip_state *clip )
362 {
363 draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
364
365 memcpy(&draw->plane[6], clip->ucp, sizeof(clip->ucp));
366 }
367
368
369 /**
370 * Set the draw module's viewport state.
371 */
372 void draw_set_viewport_states( struct draw_context *draw,
373 unsigned start_slot,
374 unsigned num_viewports,
375 const struct pipe_viewport_state *vps )
376 {
377 const struct pipe_viewport_state *viewport = vps;
378 draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
379
380 debug_assert(start_slot < PIPE_MAX_VIEWPORTS);
381 debug_assert((start_slot + num_viewports) <= PIPE_MAX_VIEWPORTS);
382
383 memcpy(draw->viewports + start_slot, vps,
384 sizeof(struct pipe_viewport_state) * num_viewports);
385
386 draw->identity_viewport = (num_viewports == 1) &&
387 (viewport->scale[0] == 1.0f &&
388 viewport->scale[1] == 1.0f &&
389 viewport->scale[2] == 1.0f &&
390 viewport->translate[0] == 0.0f &&
391 viewport->translate[1] == 0.0f &&
392 viewport->translate[2] == 0.0f);
393 draw_update_viewport_flags(draw);
394 }
395
396
397
398 void
399 draw_set_vertex_buffers(struct draw_context *draw,
400 unsigned start_slot, unsigned count,
401 const struct pipe_vertex_buffer *buffers)
402 {
403 assert(start_slot + count <= PIPE_MAX_ATTRIBS);
404
405 util_set_vertex_buffers_count(draw->pt.vertex_buffer,
406 &draw->pt.nr_vertex_buffers,
407 buffers, start_slot, count);
408 }
409
410
411 void
412 draw_set_vertex_elements(struct draw_context *draw,
413 unsigned count,
414 const struct pipe_vertex_element *elements)
415 {
416 assert(count <= PIPE_MAX_ATTRIBS);
417
418 /* We could improve this by only flushing the frontend and the fetch part
419 * of the middle. This would avoid recalculating the emit keys.*/
420 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
421
422 memcpy(draw->pt.vertex_element, elements, count * sizeof(elements[0]));
423 draw->pt.nr_vertex_elements = count;
424 }
425
426
427 /**
428 * Tell drawing context where to find mapped vertex buffers.
429 */
430 void
431 draw_set_mapped_vertex_buffer(struct draw_context *draw,
432 unsigned attr, const void *buffer,
433 size_t size)
434 {
435 draw->pt.user.vbuffer[attr].map = buffer;
436 draw->pt.user.vbuffer[attr].size = size;
437 }
438
439
440 void
441 draw_set_mapped_constant_buffer(struct draw_context *draw,
442 enum pipe_shader_type shader_type,
443 unsigned slot,
444 const void *buffer,
445 unsigned size )
446 {
447 debug_assert(shader_type == PIPE_SHADER_VERTEX ||
448 shader_type == PIPE_SHADER_GEOMETRY);
449 debug_assert(slot < PIPE_MAX_CONSTANT_BUFFERS);
450
451 draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
452
453 switch (shader_type) {
454 case PIPE_SHADER_VERTEX:
455 draw->pt.user.vs_constants[slot] = buffer;
456 draw->pt.user.vs_constants_size[slot] = size;
457 break;
458 case PIPE_SHADER_GEOMETRY:
459 draw->pt.user.gs_constants[slot] = buffer;
460 draw->pt.user.gs_constants_size[slot] = size;
461 break;
462 default:
463 assert(0 && "invalid shader type in draw_set_mapped_constant_buffer");
464 }
465 }
466
467 void
468 draw_set_mapped_shader_buffer(struct draw_context *draw,
469 enum pipe_shader_type shader_type,
470 unsigned slot,
471 const void *buffer,
472 unsigned size )
473 {
474 debug_assert(shader_type == PIPE_SHADER_VERTEX ||
475 shader_type == PIPE_SHADER_GEOMETRY);
476 debug_assert(slot < PIPE_MAX_SHADER_BUFFERS);
477
478 draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
479
480 switch (shader_type) {
481 case PIPE_SHADER_VERTEX:
482 draw->pt.user.vs_ssbos[slot] = buffer;
483 draw->pt.user.vs_ssbos_size[slot] = size;
484 break;
485 case PIPE_SHADER_GEOMETRY:
486 draw->pt.user.gs_ssbos[slot] = buffer;
487 draw->pt.user.gs_ssbos_size[slot] = size;
488 break;
489 default:
490 assert(0 && "invalid shader type in draw_set_mapped_shader_buffer");
491 }
492 }
493
494 /**
495 * Tells the draw module to draw points with triangles if their size
496 * is greater than this threshold.
497 */
498 void
499 draw_wide_point_threshold(struct draw_context *draw, float threshold)
500 {
501 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
502 draw->pipeline.wide_point_threshold = threshold;
503 }
504
505
506 /**
507 * Should the draw module handle point->quad conversion for drawing sprites?
508 */
509 void
510 draw_wide_point_sprites(struct draw_context *draw, boolean draw_sprite)
511 {
512 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
513 draw->pipeline.wide_point_sprites = draw_sprite;
514 }
515
516
517 /**
518 * Tells the draw module to draw lines with triangles if their width
519 * is greater than this threshold.
520 */
521 void
522 draw_wide_line_threshold(struct draw_context *draw, float threshold)
523 {
524 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
525 draw->pipeline.wide_line_threshold = roundf(threshold);
526 }
527
528
529 /**
530 * Tells the draw module whether or not to implement line stipple.
531 */
532 void
533 draw_enable_line_stipple(struct draw_context *draw, boolean enable)
534 {
535 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
536 draw->pipeline.line_stipple = enable;
537 }
538
539
540 /**
541 * Tells draw module whether to convert points to quads for sprite mode.
542 */
543 void
544 draw_enable_point_sprites(struct draw_context *draw, boolean enable)
545 {
546 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
547 draw->pipeline.point_sprite = enable;
548 }
549
550
551 void
552 draw_set_force_passthrough( struct draw_context *draw, boolean enable )
553 {
554 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
555 draw->force_passthrough = enable;
556 }
557
558
559
560 /**
561 * Allocate an extra vertex/geometry shader vertex attribute, if it doesn't
562 * exist already.
563 *
564 * This is used by some of the optional draw module stages such
565 * as wide_point which may need to allocate additional generic/texcoord
566 * attributes.
567 */
568 int
569 draw_alloc_extra_vertex_attrib(struct draw_context *draw,
570 uint semantic_name, uint semantic_index)
571 {
572 int slot;
573 uint num_outputs;
574 uint n;
575
576 slot = draw_find_shader_output(draw, semantic_name, semantic_index);
577 if (slot >= 0) {
578 return slot;
579 }
580
581 num_outputs = draw_current_shader_outputs(draw);
582 n = draw->extra_shader_outputs.num;
583
584 assert(n < ARRAY_SIZE(draw->extra_shader_outputs.semantic_name));
585
586 draw->extra_shader_outputs.semantic_name[n] = semantic_name;
587 draw->extra_shader_outputs.semantic_index[n] = semantic_index;
588 draw->extra_shader_outputs.slot[n] = num_outputs + n;
589 draw->extra_shader_outputs.num++;
590
591 return draw->extra_shader_outputs.slot[n];
592 }
593
594
595 /**
596 * Remove all extra vertex attributes that were allocated with
597 * draw_alloc_extra_vertex_attrib().
598 */
599 void
600 draw_remove_extra_vertex_attribs(struct draw_context *draw)
601 {
602 draw->extra_shader_outputs.num = 0;
603 }
604
605
606 /**
607 * If a geometry shader is present, return its info, else the vertex shader's
608 * info.
609 */
610 struct tgsi_shader_info *
611 draw_get_shader_info(const struct draw_context *draw)
612 {
613
614 if (draw->gs.geometry_shader) {
615 return &draw->gs.geometry_shader->info;
616 } else {
617 return &draw->vs.vertex_shader->info;
618 }
619 }
620
621 /**
622 * Prepare outputs slots from the draw module
623 *
624 * Certain parts of the draw module can emit additional
625 * outputs that can be quite useful to the backends, a good
626 * example of it is the process of decomposing primitives
627 * into wireframes (aka. lines) which normally would lose
628 * the face-side information, but using this method we can
629 * inject another shader output which passes the original
630 * face side information to the backend.
631 */
632 void
633 draw_prepare_shader_outputs(struct draw_context *draw)
634 {
635 draw_remove_extra_vertex_attribs(draw);
636 draw_prim_assembler_prepare_outputs(draw->ia);
637 draw_unfilled_prepare_outputs(draw, draw->pipeline.unfilled);
638 if (draw->pipeline.aapoint)
639 draw_aapoint_prepare_outputs(draw, draw->pipeline.aapoint);
640 if (draw->pipeline.aaline)
641 draw_aaline_prepare_outputs(draw, draw->pipeline.aaline);
642 }
643
644 /**
645 * Ask the draw module for the location/slot of the given vertex attribute in
646 * a post-transformed vertex.
647 *
648 * With this function, drivers that use the draw module should have no reason
649 * to track the current vertex/geometry shader.
650 *
651 * Note that the draw module may sometimes generate vertices with extra
652 * attributes (such as texcoords for AA lines). The driver can call this
653 * function to find those attributes.
654 *
655 * -1 is returned if the attribute is not found since this is
656 * an undefined situation. Note, that zero is valid and can
657 * be used by any of the attributes, because position is not
658 * required to be attribute 0 or even at all present.
659 */
660 int
661 draw_find_shader_output(const struct draw_context *draw,
662 uint semantic_name, uint semantic_index)
663 {
664 const struct tgsi_shader_info *info = draw_get_shader_info(draw);
665 uint i;
666
667 for (i = 0; i < info->num_outputs; i++) {
668 if (info->output_semantic_name[i] == semantic_name &&
669 info->output_semantic_index[i] == semantic_index)
670 return i;
671 }
672
673 /* Search the extra vertex attributes */
674 for (i = 0; i < draw->extra_shader_outputs.num; i++) {
675 if (draw->extra_shader_outputs.semantic_name[i] == semantic_name &&
676 draw->extra_shader_outputs.semantic_index[i] == semantic_index) {
677 return draw->extra_shader_outputs.slot[i];
678 }
679 }
680
681 return -1;
682 }
683
684
685 /**
686 * Return total number of the shader outputs. This function is similar to
687 * draw_current_shader_outputs() but this function also counts any extra
688 * vertex/geometry output attributes that may be filled in by some draw
689 * stages (such as AA point, AA line).
690 *
691 * If geometry shader is present, its output will be returned,
692 * if not vertex shader is used.
693 */
694 uint
695 draw_num_shader_outputs(const struct draw_context *draw)
696 {
697 const struct tgsi_shader_info *info = draw_get_shader_info(draw);
698 uint count;
699
700 count = info->num_outputs;
701 count += draw->extra_shader_outputs.num;
702
703 return count;
704 }
705
706
707 /**
708 * Return total number of the vertex shader outputs. This function
709 * also counts any extra vertex output attributes that may
710 * be filled in by some draw stages (such as AA point, AA line,
711 * front face).
712 */
713 uint
714 draw_total_vs_outputs(const struct draw_context *draw)
715 {
716 const struct tgsi_shader_info *info = &draw->vs.vertex_shader->info;
717
718 return info->num_outputs + draw->extra_shader_outputs.num;
719 }
720
721 /**
722 * Return total number of the geometry shader outputs. This function
723 * also counts any extra geometry output attributes that may
724 * be filled in by some draw stages (such as AA point, AA line, front
725 * face).
726 */
727 uint
728 draw_total_gs_outputs(const struct draw_context *draw)
729 {
730 const struct tgsi_shader_info *info;
731
732 if (!draw->gs.geometry_shader)
733 return 0;
734
735 info = &draw->gs.geometry_shader->info;
736
737 return info->num_outputs + draw->extra_shader_outputs.num;
738 }
739
740
741 /**
742 * Provide TGSI sampler objects for vertex/geometry shaders that use
743 * texture fetches. This state only needs to be set once per context.
744 * This might only be used by software drivers for the time being.
745 */
746 void
747 draw_texture_sampler(struct draw_context *draw,
748 enum pipe_shader_type shader,
749 struct tgsi_sampler *sampler)
750 {
751 if (shader == PIPE_SHADER_VERTEX) {
752 draw->vs.tgsi.sampler = sampler;
753 } else {
754 debug_assert(shader == PIPE_SHADER_GEOMETRY);
755 draw->gs.tgsi.sampler = sampler;
756 }
757 }
758
759 /**
760 * Provide TGSI image objects for vertex/geometry shaders that use
761 * texture fetches. This state only needs to be set once per context.
762 * This might only be used by software drivers for the time being.
763 */
764 void
765 draw_image(struct draw_context *draw,
766 enum pipe_shader_type shader,
767 struct tgsi_image *image)
768 {
769 if (shader == PIPE_SHADER_VERTEX) {
770 draw->vs.tgsi.image = image;
771 } else {
772 debug_assert(shader == PIPE_SHADER_GEOMETRY);
773 draw->gs.tgsi.image = image;
774 }
775 }
776
777 /**
778 * Provide TGSI buffer objects for vertex/geometry shaders that use
779 * load/store/atomic ops. This state only needs to be set once per context.
780 * This might only be used by software drivers for the time being.
781 */
782 void
783 draw_buffer(struct draw_context *draw,
784 enum pipe_shader_type shader,
785 struct tgsi_buffer *buffer)
786 {
787 if (shader == PIPE_SHADER_VERTEX) {
788 draw->vs.tgsi.buffer = buffer;
789 } else {
790 debug_assert(shader == PIPE_SHADER_GEOMETRY);
791 draw->gs.tgsi.buffer = buffer;
792 }
793 }
794
795
796 void draw_set_render( struct draw_context *draw,
797 struct vbuf_render *render )
798 {
799 draw->render = render;
800 }
801
802
803 /**
804 * Tell the draw module where vertex indexes/elements are located, and
805 * their size (in bytes).
806 */
807 void
808 draw_set_indexes(struct draw_context *draw,
809 const void *elements, unsigned elem_size,
810 unsigned elem_buffer_space)
811 {
812 assert(elem_size == 0 ||
813 elem_size == 1 ||
814 elem_size == 2 ||
815 elem_size == 4);
816 draw->pt.user.elts = elements;
817 draw->pt.user.eltSizeIB = elem_size;
818 if (elem_size)
819 draw->pt.user.eltMax = elem_buffer_space / elem_size;
820 else
821 draw->pt.user.eltMax = 0;
822 }
823
824
825 /* Revamp me please:
826 */
827 void draw_do_flush( struct draw_context *draw, unsigned flags )
828 {
829 if (!draw->suspend_flushing)
830 {
831 assert(!draw->flushing); /* catch inadvertant recursion */
832
833 draw->flushing = TRUE;
834
835 draw_pipeline_flush( draw, flags );
836
837 draw_pt_flush( draw, flags );
838
839 draw->flushing = FALSE;
840 }
841 }
842
843
844 /**
845 * Return the number of output attributes produced by the geometry
846 * shader, if present. If no geometry shader, return the number of
847 * outputs from the vertex shader.
848 * \sa draw_num_shader_outputs
849 */
850 uint
851 draw_current_shader_outputs(const struct draw_context *draw)
852 {
853 if (draw->gs.geometry_shader)
854 return draw->gs.num_gs_outputs;
855 return draw->vs.num_vs_outputs;
856 }
857
858
859 /**
860 * Return the index of the shader output which will contain the
861 * vertex position.
862 */
863 uint
864 draw_current_shader_position_output(const struct draw_context *draw)
865 {
866 if (draw->gs.geometry_shader)
867 return draw->gs.position_output;
868 return draw->vs.position_output;
869 }
870
871
872 /**
873 * Return the index of the shader output which will contain the
874 * viewport index.
875 */
876 uint
877 draw_current_shader_viewport_index_output(const struct draw_context *draw)
878 {
879 if (draw->gs.geometry_shader)
880 return draw->gs.geometry_shader->viewport_index_output;
881 return draw->vs.vertex_shader->viewport_index_output;
882 }
883
884 /**
885 * Returns true if there's a geometry shader bound and the geometry
886 * shader writes out a viewport index.
887 */
888 boolean
889 draw_current_shader_uses_viewport_index(const struct draw_context *draw)
890 {
891 if (draw->gs.geometry_shader)
892 return draw->gs.geometry_shader->info.writes_viewport_index;
893 return draw->vs.vertex_shader->info.writes_viewport_index;
894 }
895
896
897 /**
898 * Return the index of the shader output which will contain the
899 * clip vertex position.
900 * Note we don't support clipvertex output in the gs. For clipping
901 * to work correctly hence we return ordinary position output instead.
902 */
903 uint
904 draw_current_shader_clipvertex_output(const struct draw_context *draw)
905 {
906 if (draw->gs.geometry_shader)
907 return draw->gs.position_output;
908 return draw->vs.clipvertex_output;
909 }
910
911 uint
912 draw_current_shader_ccdistance_output(const struct draw_context *draw, int index)
913 {
914 debug_assert(index < PIPE_MAX_CLIP_OR_CULL_DISTANCE_ELEMENT_COUNT);
915 if (draw->gs.geometry_shader)
916 return draw->gs.geometry_shader->ccdistance_output[index];
917 return draw->vs.ccdistance_output[index];
918 }
919
920
921 uint
922 draw_current_shader_num_written_clipdistances(const struct draw_context *draw)
923 {
924 if (draw->gs.geometry_shader)
925 return draw->gs.geometry_shader->info.num_written_clipdistance;
926 return draw->vs.vertex_shader->info.num_written_clipdistance;
927 }
928
929 uint
930 draw_current_shader_num_written_culldistances(const struct draw_context *draw)
931 {
932 if (draw->gs.geometry_shader)
933 return draw->gs.geometry_shader->info.num_written_culldistance;
934 return draw->vs.vertex_shader->info.num_written_culldistance;
935 }
936
937 /**
938 * Return a pointer/handle for a driver/CSO rasterizer object which
939 * disabled culling, stippling, unfilled tris, etc.
940 * This is used by some pipeline stages (such as wide_point, aa_line
941 * and aa_point) which convert points/lines into triangles. In those
942 * cases we don't want to accidentally cull the triangles.
943 *
944 * \param scissor should the rasterizer state enable scissoring?
945 * \param flatshade should the rasterizer state use flat shading?
946 * \return rasterizer CSO handle
947 */
948 void *
949 draw_get_rasterizer_no_cull( struct draw_context *draw,
950 boolean scissor,
951 boolean flatshade )
952 {
953 if (!draw->rasterizer_no_cull[scissor][flatshade]) {
954 /* create now */
955 struct pipe_context *pipe = draw->pipe;
956 struct pipe_rasterizer_state rast;
957
958 memset(&rast, 0, sizeof(rast));
959 rast.scissor = scissor;
960 rast.flatshade = flatshade;
961 rast.front_ccw = 1;
962 rast.half_pixel_center = draw->rasterizer->half_pixel_center;
963 rast.bottom_edge_rule = draw->rasterizer->bottom_edge_rule;
964 rast.clip_halfz = draw->rasterizer->clip_halfz;
965
966 draw->rasterizer_no_cull[scissor][flatshade] =
967 pipe->create_rasterizer_state(pipe, &rast);
968 }
969 return draw->rasterizer_no_cull[scissor][flatshade];
970 }
971
972 void
973 draw_set_mapped_so_targets(struct draw_context *draw,
974 int num_targets,
975 struct draw_so_target *targets[PIPE_MAX_SO_BUFFERS])
976 {
977 int i;
978
979 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
980
981 for (i = 0; i < num_targets; i++)
982 draw->so.targets[i] = targets[i];
983 for (i = num_targets; i < PIPE_MAX_SO_BUFFERS; i++)
984 draw->so.targets[i] = NULL;
985
986 draw->so.num_targets = num_targets;
987 }
988
989 void
990 draw_set_sampler_views(struct draw_context *draw,
991 enum pipe_shader_type shader_stage,
992 struct pipe_sampler_view **views,
993 unsigned num)
994 {
995 unsigned i;
996
997 debug_assert(shader_stage < PIPE_SHADER_TYPES);
998 debug_assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
999
1000 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
1001
1002 for (i = 0; i < num; ++i)
1003 draw->sampler_views[shader_stage][i] = views[i];
1004 for (i = num; i < draw->num_sampler_views[shader_stage]; ++i)
1005 draw->sampler_views[shader_stage][i] = NULL;
1006
1007 draw->num_sampler_views[shader_stage] = num;
1008 }
1009
1010 void
1011 draw_set_samplers(struct draw_context *draw,
1012 enum pipe_shader_type shader_stage,
1013 struct pipe_sampler_state **samplers,
1014 unsigned num)
1015 {
1016 unsigned i;
1017
1018 debug_assert(shader_stage < PIPE_SHADER_TYPES);
1019 debug_assert(num <= PIPE_MAX_SAMPLERS);
1020
1021 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
1022
1023 for (i = 0; i < num; ++i)
1024 draw->samplers[shader_stage][i] = samplers[i];
1025 for (i = num; i < PIPE_MAX_SAMPLERS; ++i)
1026 draw->samplers[shader_stage][i] = NULL;
1027
1028 draw->num_samplers[shader_stage] = num;
1029
1030 #ifdef LLVM_AVAILABLE
1031 if (draw->llvm)
1032 draw_llvm_set_sampler_state(draw, shader_stage);
1033 #endif
1034 }
1035
1036 void
1037 draw_set_images(struct draw_context *draw,
1038 enum pipe_shader_type shader_stage,
1039 struct pipe_image_view *views,
1040 unsigned num)
1041 {
1042 unsigned i;
1043
1044 debug_assert(shader_stage < PIPE_SHADER_TYPES);
1045 debug_assert(num <= PIPE_MAX_SHADER_IMAGES);
1046
1047 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
1048
1049 for (i = 0; i < num; ++i)
1050 draw->images[shader_stage][i] = &views[i];
1051 for (i = num; i < draw->num_sampler_views[shader_stage]; ++i)
1052 draw->images[shader_stage][i] = NULL;
1053
1054 draw->num_images[shader_stage] = num;
1055 }
1056
1057 void
1058 draw_set_mapped_texture(struct draw_context *draw,
1059 enum pipe_shader_type shader_stage,
1060 unsigned sview_idx,
1061 uint32_t width, uint32_t height, uint32_t depth,
1062 uint32_t first_level, uint32_t last_level,
1063 const void *base_ptr,
1064 uint32_t row_stride[PIPE_MAX_TEXTURE_LEVELS],
1065 uint32_t img_stride[PIPE_MAX_TEXTURE_LEVELS],
1066 uint32_t mip_offsets[PIPE_MAX_TEXTURE_LEVELS])
1067 {
1068 #ifdef LLVM_AVAILABLE
1069 if (draw->llvm)
1070 draw_llvm_set_mapped_texture(draw,
1071 shader_stage,
1072 sview_idx,
1073 width, height, depth, first_level,
1074 last_level, base_ptr,
1075 row_stride, img_stride, mip_offsets);
1076 #endif
1077 }
1078
1079 void
1080 draw_set_mapped_image(struct draw_context *draw,
1081 enum pipe_shader_type shader_stage,
1082 unsigned idx,
1083 uint32_t width, uint32_t height, uint32_t depth,
1084 const void *base_ptr,
1085 uint32_t row_stride,
1086 uint32_t img_stride)
1087 {
1088 #ifdef LLVM_AVAILABLE
1089 if (draw->llvm)
1090 draw_llvm_set_mapped_image(draw,
1091 shader_stage,
1092 idx,
1093 width, height, depth,
1094 base_ptr,
1095 row_stride, img_stride);
1096 #endif
1097 }
1098
1099 /**
1100 * XXX: Results for PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS because there are two
1101 * different ways of setting textures, and drivers typically only support one.
1102 */
1103 int
1104 draw_get_shader_param_no_llvm(enum pipe_shader_type shader,
1105 enum pipe_shader_cap param)
1106 {
1107 switch(shader) {
1108 case PIPE_SHADER_VERTEX:
1109 case PIPE_SHADER_GEOMETRY:
1110 return tgsi_exec_get_shader_param(param);
1111 default:
1112 return 0;
1113 }
1114 }
1115
1116 /**
1117 * XXX: Results for PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS because there are two
1118 * different ways of setting textures, and drivers typically only support one.
1119 * Drivers requesting a draw context explicitly without llvm must call
1120 * draw_get_shader_param_no_llvm instead.
1121 */
1122 int
1123 draw_get_shader_param(enum pipe_shader_type shader, enum pipe_shader_cap param)
1124 {
1125
1126 #ifdef LLVM_AVAILABLE
1127 if (draw_get_option_use_llvm()) {
1128 switch(shader) {
1129 case PIPE_SHADER_VERTEX:
1130 case PIPE_SHADER_GEOMETRY:
1131 return gallivm_get_shader_param(param);
1132 default:
1133 return 0;
1134 }
1135 }
1136 #endif
1137
1138 return draw_get_shader_param_no_llvm(shader, param);
1139 }
1140
1141 /**
1142 * Enables or disables collection of statistics.
1143 *
1144 * Draw module is capable of generating statistics for the vertex
1145 * processing pipeline. Collection of that data isn't free and so
1146 * it's disabled by default. The users of the module can enable
1147 * (or disable) this functionality through this function.
1148 * The actual data will be emitted through the VBUF interface,
1149 * the 'pipeline_statistics' callback to be exact.
1150 */
1151 void
1152 draw_collect_pipeline_statistics(struct draw_context *draw,
1153 boolean enable)
1154 {
1155 draw->collect_statistics = enable;
1156 }
1157
1158 /**
1159 * Computes clipper invocation statistics.
1160 *
1161 * Figures out how many primitives would have been
1162 * sent to the clipper given the specified
1163 * prim info data.
1164 */
1165 void
1166 draw_stats_clipper_primitives(struct draw_context *draw,
1167 const struct draw_prim_info *prim_info)
1168 {
1169 if (draw->collect_statistics) {
1170 unsigned i;
1171 for (i = 0; i < prim_info->primitive_count; i++) {
1172 draw->statistics.c_invocations +=
1173 u_decomposed_prims_for_vertices(prim_info->prim,
1174 prim_info->primitive_lengths[i]);
1175 }
1176 }
1177 }
1178
1179
1180 /**
1181 * Returns true if the draw module will inject the frontface
1182 * info into the outputs.
1183 *
1184 * Given the specified primitive and rasterizer state
1185 * the function will figure out if the draw module
1186 * will inject the front-face information into shader
1187 * outputs. This is done to preserve the front-facing
1188 * info when decomposing primitives into wireframes.
1189 */
1190 boolean
1191 draw_will_inject_frontface(const struct draw_context *draw)
1192 {
1193 unsigned reduced_prim = u_reduced_prim(draw->pt.prim);
1194 const struct pipe_rasterizer_state *rast = draw->rasterizer;
1195
1196 if (reduced_prim != PIPE_PRIM_TRIANGLES) {
1197 return FALSE;
1198 }
1199
1200 return (rast &&
1201 (rast->fill_front != PIPE_POLYGON_MODE_FILL ||
1202 rast->fill_back != PIPE_POLYGON_MODE_FILL));
1203 }