draw: stop using CULLDIST semantic.
[mesa.git] / src / gallium / auxiliary / draw / draw_context.c
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33
34 #include "pipe/p_context.h"
35 #include "util/u_memory.h"
36 #include "util/u_math.h"
37 #include "util/u_cpu_detect.h"
38 #include "util/u_inlines.h"
39 #include "util/u_helpers.h"
40 #include "util/u_prim.h"
41 #include "util/u_format.h"
42 #include "draw_context.h"
43 #include "draw_pipe.h"
44 #include "draw_prim_assembler.h"
45 #include "draw_vs.h"
46 #include "draw_gs.h"
47
48 #if HAVE_LLVM
49 #include "gallivm/lp_bld_init.h"
50 #include "gallivm/lp_bld_limits.h"
51 #include "draw_llvm.h"
52
53 boolean
54 draw_get_option_use_llvm(void)
55 {
56 return debug_get_bool_option("DRAW_USE_LLVM", TRUE);
57 }
58 #else
59 boolean
60 draw_get_option_use_llvm(void)
61 {
62 return FALSE;
63 }
64 #endif
65
66
67 /**
68 * Create new draw module context with gallivm state for LLVM JIT.
69 */
70 static struct draw_context *
71 draw_create_context(struct pipe_context *pipe, void *context,
72 boolean try_llvm)
73 {
74 struct draw_context *draw = CALLOC_STRUCT( draw_context );
75 if (!draw)
76 goto err_out;
77
78 /* we need correct cpu caps for disabling denorms in draw_vbo() */
79 util_cpu_detect();
80
81 #if HAVE_LLVM
82 if (try_llvm && draw_get_option_use_llvm()) {
83 draw->llvm = draw_llvm_create(draw, (LLVMContextRef)context);
84 }
85 #endif
86
87 draw->pipe = pipe;
88
89 if (!draw_init(draw))
90 goto err_destroy;
91
92 draw->ia = draw_prim_assembler_create(draw);
93 if (!draw->ia)
94 goto err_destroy;
95
96 return draw;
97
98 err_destroy:
99 draw_destroy( draw );
100 err_out:
101 return NULL;
102 }
103
104
105 /**
106 * Create new draw module context, with LLVM JIT.
107 */
108 struct draw_context *
109 draw_create(struct pipe_context *pipe)
110 {
111 return draw_create_context(pipe, NULL, TRUE);
112 }
113
114
115 #if HAVE_LLVM
116 struct draw_context *
117 draw_create_with_llvm_context(struct pipe_context *pipe,
118 void *context)
119 {
120 return draw_create_context(pipe, context, TRUE);
121 }
122 #endif
123
124 /**
125 * Create a new draw context, without LLVM JIT.
126 */
127 struct draw_context *
128 draw_create_no_llvm(struct pipe_context *pipe)
129 {
130 return draw_create_context(pipe, NULL, FALSE);
131 }
132
133
134 boolean draw_init(struct draw_context *draw)
135 {
136 /*
137 * Note that several functions compute the clipmask of the predefined
138 * formats with hardcoded formulas instead of using these. So modifications
139 * here must be reflected there too.
140 */
141
142 ASSIGN_4V( draw->plane[0], -1, 0, 0, 1 );
143 ASSIGN_4V( draw->plane[1], 1, 0, 0, 1 );
144 ASSIGN_4V( draw->plane[2], 0, -1, 0, 1 );
145 ASSIGN_4V( draw->plane[3], 0, 1, 0, 1 );
146 ASSIGN_4V( draw->plane[4], 0, 0, 1, 1 ); /* yes these are correct */
147 ASSIGN_4V( draw->plane[5], 0, 0, -1, 1 ); /* mesa's a bit wonky */
148 draw->clip_xy = TRUE;
149 draw->clip_z = TRUE;
150
151 draw->pt.user.planes = (float (*) [DRAW_TOTAL_CLIP_PLANES][4]) &(draw->plane[0]);
152 draw->pt.user.eltMax = ~0;
153
154 if (!draw_pipeline_init( draw ))
155 return FALSE;
156
157 if (!draw_pt_init( draw ))
158 return FALSE;
159
160 if (!draw_vs_init( draw ))
161 return FALSE;
162
163 if (!draw_gs_init( draw ))
164 return FALSE;
165
166 draw->quads_always_flatshade_last = !draw->pipe->screen->get_param(
167 draw->pipe->screen, PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION);
168
169 draw->floating_point_depth = false;
170
171 return TRUE;
172 }
173
174 /*
175 * Called whenever we're starting to draw a new instance.
176 * Some internal structures don't want to have to reset internal
177 * members on each invocation (because their state might have to persist
178 * between multiple primitive restart rendering call) but might have to
179 * for each new instance.
180 * This is particularly the case for primitive id's in geometry shader.
181 */
182 void draw_new_instance(struct draw_context *draw)
183 {
184 draw_geometry_shader_new_instance(draw->gs.geometry_shader);
185 draw_prim_assembler_new_instance(draw->ia);
186 }
187
188
189 void draw_destroy( struct draw_context *draw )
190 {
191 struct pipe_context *pipe;
192 unsigned i, j;
193
194 if (!draw)
195 return;
196
197 pipe = draw->pipe;
198
199 /* free any rasterizer CSOs that we may have created.
200 */
201 for (i = 0; i < 2; i++) {
202 for (j = 0; j < 2; j++) {
203 if (draw->rasterizer_no_cull[i][j]) {
204 pipe->delete_rasterizer_state(pipe, draw->rasterizer_no_cull[i][j]);
205 }
206 }
207 }
208
209 for (i = 0; i < draw->pt.nr_vertex_buffers; i++) {
210 pipe_resource_reference(&draw->pt.vertex_buffer[i].buffer, NULL);
211 }
212
213 /* Not so fast -- we're just borrowing this at the moment.
214 *
215 if (draw->render)
216 draw->render->destroy( draw->render );
217 */
218
219 draw_prim_assembler_destroy(draw->ia);
220 draw_pipeline_destroy( draw );
221 draw_pt_destroy( draw );
222 draw_vs_destroy( draw );
223 draw_gs_destroy( draw );
224 #ifdef HAVE_LLVM
225 if (draw->llvm)
226 draw_llvm_destroy( draw->llvm );
227 #endif
228
229 FREE( draw );
230 }
231
232
233
234 void draw_flush( struct draw_context *draw )
235 {
236 draw_do_flush( draw, DRAW_FLUSH_BACKEND );
237 }
238
239
240 /**
241 * Specify the depth stencil format for the draw pipeline. This function
242 * determines the Minimum Resolvable Depth factor for polygon offset.
243 * This factor potentially depends on the number of Z buffer bits,
244 * the rasterization algorithm and the arithmetic performed on Z
245 * values between vertex shading and rasterization.
246 */
247 void draw_set_zs_format(struct draw_context *draw, enum pipe_format format)
248 {
249 const struct util_format_description *desc = util_format_description(format);
250
251 draw->floating_point_depth =
252 (util_get_depth_format_type(desc) == UTIL_FORMAT_TYPE_FLOAT);
253
254 draw->mrd = util_get_depth_format_mrd(desc);
255 }
256
257
258 static bool
259 draw_is_vs_window_space(struct draw_context *draw)
260 {
261 if (draw->vs.vertex_shader) {
262 struct tgsi_shader_info *info = &draw->vs.vertex_shader->info;
263
264 return info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] != 0;
265 }
266 return false;
267 }
268
269
270 void
271 draw_update_clip_flags(struct draw_context *draw)
272 {
273 bool window_space = draw_is_vs_window_space(draw);
274
275 draw->clip_xy = !draw->driver.bypass_clip_xy && !window_space;
276 draw->guard_band_xy = (!draw->driver.bypass_clip_xy &&
277 draw->driver.guard_band_xy);
278 draw->clip_z = (!draw->driver.bypass_clip_z &&
279 draw->rasterizer && draw->rasterizer->depth_clip) &&
280 !window_space;
281 draw->clip_user = draw->rasterizer &&
282 draw->rasterizer->clip_plane_enable != 0 &&
283 !window_space;
284 draw->guard_band_points_xy = draw->guard_band_xy ||
285 (draw->driver.bypass_clip_points &&
286 (draw->rasterizer &&
287 draw->rasterizer->point_tri_clip));
288 }
289
290
291 void
292 draw_update_viewport_flags(struct draw_context *draw)
293 {
294 bool window_space = draw_is_vs_window_space(draw);
295
296 draw->bypass_viewport = window_space || draw->identity_viewport;
297 }
298
299
300 /**
301 * Register new primitive rasterization/rendering state.
302 * This causes the drawing pipeline to be rebuilt.
303 */
304 void draw_set_rasterizer_state( struct draw_context *draw,
305 const struct pipe_rasterizer_state *raster,
306 void *rast_handle )
307 {
308 if (!draw->suspend_flushing) {
309 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
310
311 draw->rasterizer = raster;
312 draw->rast_handle = rast_handle;
313 draw_update_clip_flags(draw);
314 }
315 }
316
317 /* With a little more work, llvmpipe will be able to turn this off and
318 * do its own x/y clipping.
319 *
320 * Some hardware can turn off clipping altogether - in particular any
321 * hardware with a TNL unit can do its own clipping, even if it is
322 * relying on the draw module for some other reason.
323 * Setting bypass_clip_points to achieve d3d-style point clipping (the driver
324 * will need to do the "vp scissoring") _requires_ the driver to implement
325 * wide points / point sprites itself (points will still be clipped if rasterizer
326 * point_tri_clip isn't set). Only relevant if bypass_clip_xy isn't set.
327 */
328 void draw_set_driver_clipping( struct draw_context *draw,
329 boolean bypass_clip_xy,
330 boolean bypass_clip_z,
331 boolean guard_band_xy,
332 boolean bypass_clip_points)
333 {
334 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
335
336 draw->driver.bypass_clip_xy = bypass_clip_xy;
337 draw->driver.bypass_clip_z = bypass_clip_z;
338 draw->driver.guard_band_xy = guard_band_xy;
339 draw->driver.bypass_clip_points = bypass_clip_points;
340 draw_update_clip_flags(draw);
341 }
342
343
344 /**
345 * Plug in the primitive rendering/rasterization stage (which is the last
346 * stage in the drawing pipeline).
347 * This is provided by the device driver.
348 */
349 void draw_set_rasterize_stage( struct draw_context *draw,
350 struct draw_stage *stage )
351 {
352 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
353
354 draw->pipeline.rasterize = stage;
355 }
356
357
358 /**
359 * Set the draw module's clipping state.
360 */
361 void draw_set_clip_state( struct draw_context *draw,
362 const struct pipe_clip_state *clip )
363 {
364 draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
365
366 memcpy(&draw->plane[6], clip->ucp, sizeof(clip->ucp));
367 }
368
369
370 /**
371 * Set the draw module's viewport state.
372 */
373 void draw_set_viewport_states( struct draw_context *draw,
374 unsigned start_slot,
375 unsigned num_viewports,
376 const struct pipe_viewport_state *vps )
377 {
378 const struct pipe_viewport_state *viewport = vps;
379 draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
380
381 debug_assert(start_slot < PIPE_MAX_VIEWPORTS);
382 debug_assert((start_slot + num_viewports) <= PIPE_MAX_VIEWPORTS);
383
384 memcpy(draw->viewports + start_slot, vps,
385 sizeof(struct pipe_viewport_state) * num_viewports);
386
387 draw->identity_viewport = (num_viewports == 1) &&
388 (viewport->scale[0] == 1.0f &&
389 viewport->scale[1] == 1.0f &&
390 viewport->scale[2] == 1.0f &&
391 viewport->translate[0] == 0.0f &&
392 viewport->translate[1] == 0.0f &&
393 viewport->translate[2] == 0.0f);
394 draw_update_viewport_flags(draw);
395 }
396
397
398
399 void
400 draw_set_vertex_buffers(struct draw_context *draw,
401 unsigned start_slot, unsigned count,
402 const struct pipe_vertex_buffer *buffers)
403 {
404 assert(start_slot + count <= PIPE_MAX_ATTRIBS);
405
406 util_set_vertex_buffers_count(draw->pt.vertex_buffer,
407 &draw->pt.nr_vertex_buffers,
408 buffers, start_slot, count);
409 }
410
411
412 void
413 draw_set_vertex_elements(struct draw_context *draw,
414 unsigned count,
415 const struct pipe_vertex_element *elements)
416 {
417 assert(count <= PIPE_MAX_ATTRIBS);
418
419 /* We could improve this by only flushing the frontend and the fetch part
420 * of the middle. This would avoid recalculating the emit keys.*/
421 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
422
423 memcpy(draw->pt.vertex_element, elements, count * sizeof(elements[0]));
424 draw->pt.nr_vertex_elements = count;
425 }
426
427
428 /**
429 * Tell drawing context where to find mapped vertex buffers.
430 */
431 void
432 draw_set_mapped_vertex_buffer(struct draw_context *draw,
433 unsigned attr, const void *buffer,
434 size_t size)
435 {
436 draw->pt.user.vbuffer[attr].map = buffer;
437 draw->pt.user.vbuffer[attr].size = size;
438 }
439
440
441 void
442 draw_set_mapped_constant_buffer(struct draw_context *draw,
443 unsigned shader_type,
444 unsigned slot,
445 const void *buffer,
446 unsigned size )
447 {
448 debug_assert(shader_type == PIPE_SHADER_VERTEX ||
449 shader_type == PIPE_SHADER_GEOMETRY);
450 debug_assert(slot < PIPE_MAX_CONSTANT_BUFFERS);
451
452 draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
453
454 switch (shader_type) {
455 case PIPE_SHADER_VERTEX:
456 draw->pt.user.vs_constants[slot] = buffer;
457 draw->pt.user.vs_constants_size[slot] = size;
458 break;
459 case PIPE_SHADER_GEOMETRY:
460 draw->pt.user.gs_constants[slot] = buffer;
461 draw->pt.user.gs_constants_size[slot] = size;
462 break;
463 default:
464 assert(0 && "invalid shader type in draw_set_mapped_constant_buffer");
465 }
466 }
467
468
469 /**
470 * Tells the draw module to draw points with triangles if their size
471 * is greater than this threshold.
472 */
473 void
474 draw_wide_point_threshold(struct draw_context *draw, float threshold)
475 {
476 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
477 draw->pipeline.wide_point_threshold = threshold;
478 }
479
480
481 /**
482 * Should the draw module handle point->quad conversion for drawing sprites?
483 */
484 void
485 draw_wide_point_sprites(struct draw_context *draw, boolean draw_sprite)
486 {
487 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
488 draw->pipeline.wide_point_sprites = draw_sprite;
489 }
490
491
492 /**
493 * Tells the draw module to draw lines with triangles if their width
494 * is greater than this threshold.
495 */
496 void
497 draw_wide_line_threshold(struct draw_context *draw, float threshold)
498 {
499 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
500 draw->pipeline.wide_line_threshold = roundf(threshold);
501 }
502
503
504 /**
505 * Tells the draw module whether or not to implement line stipple.
506 */
507 void
508 draw_enable_line_stipple(struct draw_context *draw, boolean enable)
509 {
510 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
511 draw->pipeline.line_stipple = enable;
512 }
513
514
515 /**
516 * Tells draw module whether to convert points to quads for sprite mode.
517 */
518 void
519 draw_enable_point_sprites(struct draw_context *draw, boolean enable)
520 {
521 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
522 draw->pipeline.point_sprite = enable;
523 }
524
525
526 void
527 draw_set_force_passthrough( struct draw_context *draw, boolean enable )
528 {
529 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
530 draw->force_passthrough = enable;
531 }
532
533
534
535 /**
536 * Allocate an extra vertex/geometry shader vertex attribute, if it doesn't
537 * exist already.
538 *
539 * This is used by some of the optional draw module stages such
540 * as wide_point which may need to allocate additional generic/texcoord
541 * attributes.
542 */
543 int
544 draw_alloc_extra_vertex_attrib(struct draw_context *draw,
545 uint semantic_name, uint semantic_index)
546 {
547 int slot;
548 uint num_outputs;
549 uint n;
550
551 slot = draw_find_shader_output(draw, semantic_name, semantic_index);
552 if (slot >= 0) {
553 return slot;
554 }
555
556 num_outputs = draw_current_shader_outputs(draw);
557 n = draw->extra_shader_outputs.num;
558
559 assert(n < ARRAY_SIZE(draw->extra_shader_outputs.semantic_name));
560
561 draw->extra_shader_outputs.semantic_name[n] = semantic_name;
562 draw->extra_shader_outputs.semantic_index[n] = semantic_index;
563 draw->extra_shader_outputs.slot[n] = num_outputs + n;
564 draw->extra_shader_outputs.num++;
565
566 return draw->extra_shader_outputs.slot[n];
567 }
568
569
570 /**
571 * Remove all extra vertex attributes that were allocated with
572 * draw_alloc_extra_vertex_attrib().
573 */
574 void
575 draw_remove_extra_vertex_attribs(struct draw_context *draw)
576 {
577 draw->extra_shader_outputs.num = 0;
578 }
579
580
581 /**
582 * If a geometry shader is present, return its info, else the vertex shader's
583 * info.
584 */
585 struct tgsi_shader_info *
586 draw_get_shader_info(const struct draw_context *draw)
587 {
588
589 if (draw->gs.geometry_shader) {
590 return &draw->gs.geometry_shader->info;
591 } else {
592 return &draw->vs.vertex_shader->info;
593 }
594 }
595
596 /**
597 * Prepare outputs slots from the draw module
598 *
599 * Certain parts of the draw module can emit additional
600 * outputs that can be quite useful to the backends, a good
601 * example of it is the process of decomposing primitives
602 * into wireframes (aka. lines) which normally would lose
603 * the face-side information, but using this method we can
604 * inject another shader output which passes the original
605 * face side information to the backend.
606 */
607 void
608 draw_prepare_shader_outputs(struct draw_context *draw)
609 {
610 draw_remove_extra_vertex_attribs(draw);
611 draw_prim_assembler_prepare_outputs(draw->ia);
612 draw_unfilled_prepare_outputs(draw, draw->pipeline.unfilled);
613 if (draw->pipeline.aapoint)
614 draw_aapoint_prepare_outputs(draw, draw->pipeline.aapoint);
615 if (draw->pipeline.aaline)
616 draw_aaline_prepare_outputs(draw, draw->pipeline.aaline);
617 }
618
619 /**
620 * Ask the draw module for the location/slot of the given vertex attribute in
621 * a post-transformed vertex.
622 *
623 * With this function, drivers that use the draw module should have no reason
624 * to track the current vertex/geometry shader.
625 *
626 * Note that the draw module may sometimes generate vertices with extra
627 * attributes (such as texcoords for AA lines). The driver can call this
628 * function to find those attributes.
629 *
630 * -1 is returned if the attribute is not found since this is
631 * an undefined situation. Note, that zero is valid and can
632 * be used by any of the attributes, because position is not
633 * required to be attribute 0 or even at all present.
634 */
635 int
636 draw_find_shader_output(const struct draw_context *draw,
637 uint semantic_name, uint semantic_index)
638 {
639 const struct tgsi_shader_info *info = draw_get_shader_info(draw);
640 uint i;
641
642 for (i = 0; i < info->num_outputs; i++) {
643 if (info->output_semantic_name[i] == semantic_name &&
644 info->output_semantic_index[i] == semantic_index)
645 return i;
646 }
647
648 /* Search the extra vertex attributes */
649 for (i = 0; i < draw->extra_shader_outputs.num; i++) {
650 if (draw->extra_shader_outputs.semantic_name[i] == semantic_name &&
651 draw->extra_shader_outputs.semantic_index[i] == semantic_index) {
652 return draw->extra_shader_outputs.slot[i];
653 }
654 }
655
656 return -1;
657 }
658
659
660 /**
661 * Return total number of the shader outputs. This function is similar to
662 * draw_current_shader_outputs() but this function also counts any extra
663 * vertex/geometry output attributes that may be filled in by some draw
664 * stages (such as AA point, AA line).
665 *
666 * If geometry shader is present, its output will be returned,
667 * if not vertex shader is used.
668 */
669 uint
670 draw_num_shader_outputs(const struct draw_context *draw)
671 {
672 const struct tgsi_shader_info *info = draw_get_shader_info(draw);
673 uint count;
674
675 count = info->num_outputs;
676 count += draw->extra_shader_outputs.num;
677
678 return count;
679 }
680
681
682 /**
683 * Return total number of the vertex shader outputs. This function
684 * also counts any extra vertex output attributes that may
685 * be filled in by some draw stages (such as AA point, AA line,
686 * front face).
687 */
688 uint
689 draw_total_vs_outputs(const struct draw_context *draw)
690 {
691 const struct tgsi_shader_info *info = &draw->vs.vertex_shader->info;
692
693 return info->num_outputs + draw->extra_shader_outputs.num;
694 }
695
696 /**
697 * Return total number of the geometry shader outputs. This function
698 * also counts any extra geometry output attributes that may
699 * be filled in by some draw stages (such as AA point, AA line, front
700 * face).
701 */
702 uint
703 draw_total_gs_outputs(const struct draw_context *draw)
704 {
705 const struct tgsi_shader_info *info;
706
707 if (!draw->gs.geometry_shader)
708 return 0;
709
710 info = &draw->gs.geometry_shader->info;
711
712 return info->num_outputs + draw->extra_shader_outputs.num;
713 }
714
715
716 /**
717 * Provide TGSI sampler objects for vertex/geometry shaders that use
718 * texture fetches. This state only needs to be set once per context.
719 * This might only be used by software drivers for the time being.
720 */
721 void
722 draw_texture_sampler(struct draw_context *draw,
723 uint shader,
724 struct tgsi_sampler *sampler)
725 {
726 if (shader == PIPE_SHADER_VERTEX) {
727 draw->vs.tgsi.sampler = sampler;
728 } else {
729 debug_assert(shader == PIPE_SHADER_GEOMETRY);
730 draw->gs.tgsi.sampler = sampler;
731 }
732 }
733
734 /**
735 * Provide TGSI image objects for vertex/geometry shaders that use
736 * texture fetches. This state only needs to be set once per context.
737 * This might only be used by software drivers for the time being.
738 */
739 void
740 draw_image(struct draw_context *draw,
741 uint shader,
742 struct tgsi_image *image)
743 {
744 if (shader == PIPE_SHADER_VERTEX) {
745 draw->vs.tgsi.image = image;
746 } else {
747 debug_assert(shader == PIPE_SHADER_GEOMETRY);
748 draw->gs.tgsi.image = image;
749 }
750 }
751
752 /**
753 * Provide TGSI buffer objects for vertex/geometry shaders that use
754 * load/store/atomic ops. This state only needs to be set once per context.
755 * This might only be used by software drivers for the time being.
756 */
757 void
758 draw_buffer(struct draw_context *draw,
759 uint shader,
760 struct tgsi_buffer *buffer)
761 {
762 if (shader == PIPE_SHADER_VERTEX) {
763 draw->vs.tgsi.buffer = buffer;
764 } else {
765 debug_assert(shader == PIPE_SHADER_GEOMETRY);
766 draw->gs.tgsi.buffer = buffer;
767 }
768 }
769
770
771 void draw_set_render( struct draw_context *draw,
772 struct vbuf_render *render )
773 {
774 draw->render = render;
775 }
776
777
778 /**
779 * Tell the draw module where vertex indexes/elements are located, and
780 * their size (in bytes).
781 *
782 * Note: the caller must apply the pipe_index_buffer::offset value to
783 * the address. The draw module doesn't do that.
784 */
785 void
786 draw_set_indexes(struct draw_context *draw,
787 const void *elements, unsigned elem_size,
788 unsigned elem_buffer_space)
789 {
790 assert(elem_size == 0 ||
791 elem_size == 1 ||
792 elem_size == 2 ||
793 elem_size == 4);
794 draw->pt.user.elts = elements;
795 draw->pt.user.eltSizeIB = elem_size;
796 if (elem_size)
797 draw->pt.user.eltMax = elem_buffer_space / elem_size;
798 else
799 draw->pt.user.eltMax = 0;
800 }
801
802
803 /* Revamp me please:
804 */
805 void draw_do_flush( struct draw_context *draw, unsigned flags )
806 {
807 if (!draw->suspend_flushing)
808 {
809 assert(!draw->flushing); /* catch inadvertant recursion */
810
811 draw->flushing = TRUE;
812
813 draw_pipeline_flush( draw, flags );
814
815 draw_pt_flush( draw, flags );
816
817 draw->flushing = FALSE;
818 }
819 }
820
821
822 /**
823 * Return the number of output attributes produced by the geometry
824 * shader, if present. If no geometry shader, return the number of
825 * outputs from the vertex shader.
826 * \sa draw_num_shader_outputs
827 */
828 uint
829 draw_current_shader_outputs(const struct draw_context *draw)
830 {
831 if (draw->gs.geometry_shader)
832 return draw->gs.num_gs_outputs;
833 return draw->vs.num_vs_outputs;
834 }
835
836
837 /**
838 * Return the index of the shader output which will contain the
839 * vertex position.
840 */
841 uint
842 draw_current_shader_position_output(const struct draw_context *draw)
843 {
844 if (draw->gs.geometry_shader)
845 return draw->gs.position_output;
846 return draw->vs.position_output;
847 }
848
849
850 /**
851 * Return the index of the shader output which will contain the
852 * viewport index.
853 */
854 uint
855 draw_current_shader_viewport_index_output(const struct draw_context *draw)
856 {
857 if (draw->gs.geometry_shader)
858 return draw->gs.geometry_shader->viewport_index_output;
859 return draw->vs.vertex_shader->viewport_index_output;
860 }
861
862 /**
863 * Returns true if there's a geometry shader bound and the geometry
864 * shader writes out a viewport index.
865 */
866 boolean
867 draw_current_shader_uses_viewport_index(const struct draw_context *draw)
868 {
869 if (draw->gs.geometry_shader)
870 return draw->gs.geometry_shader->info.writes_viewport_index;
871 return draw->vs.vertex_shader->info.writes_viewport_index;
872 }
873
874
875 /**
876 * Return the index of the shader output which will contain the
877 * clip vertex position.
878 * Note we don't support clipvertex output in the gs. For clipping
879 * to work correctly hence we return ordinary position output instead.
880 */
881 uint
882 draw_current_shader_clipvertex_output(const struct draw_context *draw)
883 {
884 if (draw->gs.geometry_shader)
885 return draw->gs.position_output;
886 return draw->vs.clipvertex_output;
887 }
888
889 uint
890 draw_current_shader_ccdistance_output(const struct draw_context *draw, int index)
891 {
892 debug_assert(index < PIPE_MAX_CLIP_OR_CULL_DISTANCE_ELEMENT_COUNT);
893 if (draw->gs.geometry_shader)
894 return draw->gs.geometry_shader->ccdistance_output[index];
895 return draw->vs.ccdistance_output[index];
896 }
897
898
899 uint
900 draw_current_shader_num_written_clipdistances(const struct draw_context *draw)
901 {
902 if (draw->gs.geometry_shader)
903 return draw->gs.geometry_shader->info.num_written_clipdistance;
904 return draw->vs.vertex_shader->info.num_written_clipdistance;
905 }
906
907 uint
908 draw_current_shader_num_written_culldistances(const struct draw_context *draw)
909 {
910 if (draw->gs.geometry_shader)
911 return draw->gs.geometry_shader->info.num_written_culldistance;
912 return draw->vs.vertex_shader->info.num_written_culldistance;
913 }
914
915 /**
916 * Return a pointer/handle for a driver/CSO rasterizer object which
917 * disabled culling, stippling, unfilled tris, etc.
918 * This is used by some pipeline stages (such as wide_point, aa_line
919 * and aa_point) which convert points/lines into triangles. In those
920 * cases we don't want to accidentally cull the triangles.
921 *
922 * \param scissor should the rasterizer state enable scissoring?
923 * \param flatshade should the rasterizer state use flat shading?
924 * \return rasterizer CSO handle
925 */
926 void *
927 draw_get_rasterizer_no_cull( struct draw_context *draw,
928 boolean scissor,
929 boolean flatshade )
930 {
931 if (!draw->rasterizer_no_cull[scissor][flatshade]) {
932 /* create now */
933 struct pipe_context *pipe = draw->pipe;
934 struct pipe_rasterizer_state rast;
935
936 memset(&rast, 0, sizeof(rast));
937 rast.scissor = scissor;
938 rast.flatshade = flatshade;
939 rast.front_ccw = 1;
940 rast.half_pixel_center = draw->rasterizer->half_pixel_center;
941 rast.bottom_edge_rule = draw->rasterizer->bottom_edge_rule;
942 rast.clip_halfz = draw->rasterizer->clip_halfz;
943
944 draw->rasterizer_no_cull[scissor][flatshade] =
945 pipe->create_rasterizer_state(pipe, &rast);
946 }
947 return draw->rasterizer_no_cull[scissor][flatshade];
948 }
949
950 void
951 draw_set_mapped_so_targets(struct draw_context *draw,
952 int num_targets,
953 struct draw_so_target *targets[PIPE_MAX_SO_BUFFERS])
954 {
955 int i;
956
957 for (i = 0; i < num_targets; i++)
958 draw->so.targets[i] = targets[i];
959 for (i = num_targets; i < PIPE_MAX_SO_BUFFERS; i++)
960 draw->so.targets[i] = NULL;
961
962 draw->so.num_targets = num_targets;
963 }
964
965 void
966 draw_set_sampler_views(struct draw_context *draw,
967 unsigned shader_stage,
968 struct pipe_sampler_view **views,
969 unsigned num)
970 {
971 unsigned i;
972
973 debug_assert(shader_stage < PIPE_SHADER_TYPES);
974 debug_assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
975
976 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
977
978 for (i = 0; i < num; ++i)
979 draw->sampler_views[shader_stage][i] = views[i];
980 for (i = num; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; ++i)
981 draw->sampler_views[shader_stage][i] = NULL;
982
983 draw->num_sampler_views[shader_stage] = num;
984 }
985
986 void
987 draw_set_samplers(struct draw_context *draw,
988 unsigned shader_stage,
989 struct pipe_sampler_state **samplers,
990 unsigned num)
991 {
992 unsigned i;
993
994 debug_assert(shader_stage < PIPE_SHADER_TYPES);
995 debug_assert(num <= PIPE_MAX_SAMPLERS);
996
997 draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
998
999 for (i = 0; i < num; ++i)
1000 draw->samplers[shader_stage][i] = samplers[i];
1001 for (i = num; i < PIPE_MAX_SAMPLERS; ++i)
1002 draw->samplers[shader_stage][i] = NULL;
1003
1004 draw->num_samplers[shader_stage] = num;
1005
1006 #ifdef HAVE_LLVM
1007 if (draw->llvm)
1008 draw_llvm_set_sampler_state(draw, shader_stage);
1009 #endif
1010 }
1011
1012 void
1013 draw_set_mapped_texture(struct draw_context *draw,
1014 unsigned shader_stage,
1015 unsigned sview_idx,
1016 uint32_t width, uint32_t height, uint32_t depth,
1017 uint32_t first_level, uint32_t last_level,
1018 const void *base_ptr,
1019 uint32_t row_stride[PIPE_MAX_TEXTURE_LEVELS],
1020 uint32_t img_stride[PIPE_MAX_TEXTURE_LEVELS],
1021 uint32_t mip_offsets[PIPE_MAX_TEXTURE_LEVELS])
1022 {
1023 #ifdef HAVE_LLVM
1024 if (draw->llvm)
1025 draw_llvm_set_mapped_texture(draw,
1026 shader_stage,
1027 sview_idx,
1028 width, height, depth, first_level,
1029 last_level, base_ptr,
1030 row_stride, img_stride, mip_offsets);
1031 #endif
1032 }
1033
1034 /**
1035 * XXX: Results for PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS because there are two
1036 * different ways of setting textures, and drivers typically only support one.
1037 */
1038 int
1039 draw_get_shader_param_no_llvm(unsigned shader, enum pipe_shader_cap param)
1040 {
1041 switch(shader) {
1042 case PIPE_SHADER_VERTEX:
1043 case PIPE_SHADER_GEOMETRY:
1044 return tgsi_exec_get_shader_param(param);
1045 default:
1046 return 0;
1047 }
1048 }
1049
1050 /**
1051 * XXX: Results for PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS because there are two
1052 * different ways of setting textures, and drivers typically only support one.
1053 * Drivers requesting a draw context explicitly without llvm must call
1054 * draw_get_shader_param_no_llvm instead.
1055 */
1056 int
1057 draw_get_shader_param(unsigned shader, enum pipe_shader_cap param)
1058 {
1059
1060 #ifdef HAVE_LLVM
1061 if (draw_get_option_use_llvm()) {
1062 switch(shader) {
1063 case PIPE_SHADER_VERTEX:
1064 case PIPE_SHADER_GEOMETRY:
1065 return gallivm_get_shader_param(param);
1066 default:
1067 return 0;
1068 }
1069 }
1070 #endif
1071
1072 return draw_get_shader_param_no_llvm(shader, param);
1073 }
1074
1075 /**
1076 * Enables or disables collection of statistics.
1077 *
1078 * Draw module is capable of generating statistics for the vertex
1079 * processing pipeline. Collection of that data isn't free and so
1080 * it's disabled by default. The users of the module can enable
1081 * (or disable) this functionality through this function.
1082 * The actual data will be emitted through the VBUF interface,
1083 * the 'pipeline_statistics' callback to be exact.
1084 */
1085 void
1086 draw_collect_pipeline_statistics(struct draw_context *draw,
1087 boolean enable)
1088 {
1089 draw->collect_statistics = enable;
1090 }
1091
1092 /**
1093 * Computes clipper invocation statistics.
1094 *
1095 * Figures out how many primitives would have been
1096 * sent to the clipper given the specified
1097 * prim info data.
1098 */
1099 void
1100 draw_stats_clipper_primitives(struct draw_context *draw,
1101 const struct draw_prim_info *prim_info)
1102 {
1103 if (draw->collect_statistics) {
1104 unsigned i;
1105 for (i = 0; i < prim_info->primitive_count; i++) {
1106 draw->statistics.c_invocations +=
1107 u_decomposed_prims_for_vertices(prim_info->prim,
1108 prim_info->primitive_lengths[i]);
1109 }
1110 }
1111 }
1112
1113
1114 /**
1115 * Returns true if the draw module will inject the frontface
1116 * info into the outputs.
1117 *
1118 * Given the specified primitive and rasterizer state
1119 * the function will figure out if the draw module
1120 * will inject the front-face information into shader
1121 * outputs. This is done to preserve the front-facing
1122 * info when decomposing primitives into wireframes.
1123 */
1124 boolean
1125 draw_will_inject_frontface(const struct draw_context *draw)
1126 {
1127 unsigned reduced_prim = u_reduced_prim(draw->pt.prim);
1128 const struct pipe_rasterizer_state *rast = draw->rasterizer;
1129
1130 if (reduced_prim != PIPE_PRIM_TRIANGLES) {
1131 return FALSE;
1132 }
1133
1134 return (rast &&
1135 (rast->fill_front != PIPE_POLYGON_MODE_FILL ||
1136 rast->fill_back != PIPE_POLYGON_MODE_FILL));
1137 }