1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
27 #include "util/u_format.h"
28 #include "util/u_helpers.h"
29 #include "util/u_inlines.h"
30 #include "util/u_prim.h"
31 #include "util/u_prim_restart.h"
32 #include "util/u_time.h"
33 #include "util/u_upload_mgr.h"
34 #include "indices/u_indices.h"
36 #include "svga_hw_reg.h"
38 #include "svga_context.h"
39 #include "svga_screen.h"
40 #include "svga_draw.h"
41 #include "svga_shader.h"
42 #include "svga_state.h"
43 #include "svga_surface.h"
44 #include "svga_swtnl.h"
45 #include "svga_debug.h"
46 #include "svga_resource_buffer.h"
48 /* Returns TRUE if we are currently using flat shading.
51 is_using_flat_shading(const struct svga_context
*svga
)
54 svga
->state
.hw_draw
.fs
? svga
->state
.hw_draw
.fs
->uses_flat_interp
: FALSE
;
58 static enum pipe_error
59 retry_draw_range_elements( struct svga_context
*svga
,
60 struct pipe_resource
*index_buffer
,
65 enum pipe_prim_type prim
,
68 unsigned start_instance
,
69 unsigned instance_count
,
72 enum pipe_error ret
= PIPE_OK
;
74 SVGA_STATS_TIME_PUSH(svga_sws(svga
), SVGA_STATS_TIME_DRAWELEMENTS
);
76 svga_hwtnl_set_fillmode(svga
->hwtnl
, svga
->curr
.rast
->hw_fillmode
);
78 ret
= svga_update_state( svga
, SVGA_STATE_HW_DRAW
);
82 /** determine if flatshade is to be used after svga_update_state()
83 * in case the fragment shader is changed.
85 svga_hwtnl_set_flatshade(svga
->hwtnl
,
86 svga
->curr
.rast
->templ
.flatshade
||
87 is_using_flat_shading(svga
),
88 svga
->curr
.rast
->templ
.flatshade_first
);
90 ret
= svga_hwtnl_draw_range_elements( svga
->hwtnl
,
91 index_buffer
, index_size
, index_bias
,
94 start_instance
, instance_count
);
101 svga_context_flush( svga
, NULL
);
105 ret
= retry_draw_range_elements(svga
,
106 index_buffer
, index_size
, index_bias
,
107 min_index
, max_index
,
109 start_instance
, instance_count
, FALSE
);
113 SVGA_STATS_TIME_POP(svga_sws(svga
));
118 static enum pipe_error
119 retry_draw_arrays( struct svga_context
*svga
,
120 enum pipe_prim_type prim
, unsigned start
, unsigned count
,
121 unsigned start_instance
, unsigned instance_count
,
126 SVGA_STATS_TIME_PUSH(svga_sws(svga
), SVGA_STATS_TIME_DRAWARRAYS
);
128 svga_hwtnl_set_fillmode(svga
->hwtnl
, svga
->curr
.rast
->hw_fillmode
);
130 ret
= svga_update_state( svga
, SVGA_STATE_HW_DRAW
);
134 /** determine if flatshade is to be used after svga_update_state()
135 * in case the fragment shader is changed.
137 svga_hwtnl_set_flatshade(svga
->hwtnl
,
138 svga
->curr
.rast
->templ
.flatshade
||
139 is_using_flat_shading(svga
),
140 svga
->curr
.rast
->templ
.flatshade_first
);
142 ret
= svga_hwtnl_draw_arrays(svga
->hwtnl
, prim
, start
, count
,
143 start_instance
, instance_count
);
150 if (ret
== PIPE_ERROR_OUT_OF_MEMORY
&& do_retry
)
152 svga_context_flush( svga
, NULL
);
154 ret
= retry_draw_arrays(svga
, prim
, start
, count
,
155 start_instance
, instance_count
,
160 SVGA_STATS_TIME_POP(svga_sws(svga
));
166 * Determine if we need to implement primitive restart with a fallback
167 * path which breaks the original primitive into sub-primitive at the
171 need_fallback_prim_restart(const struct svga_context
*svga
,
172 const struct pipe_draw_info
*info
)
174 if (info
->primitive_restart
&& info
->index_size
) {
175 if (!svga_have_vgpu10(svga
))
177 else if (!svga
->state
.sw
.need_swtnl
) {
178 if (info
->index_size
== 1)
179 return TRUE
; /* no device support for 1-byte indexes */
180 else if (info
->index_size
== 2)
181 return info
->restart_index
!= 0xffff;
183 return info
->restart_index
!= 0xffffffff;
192 svga_draw_vbo(struct pipe_context
*pipe
, const struct pipe_draw_info
*info
)
194 struct svga_context
*svga
= svga_context( pipe
);
195 enum pipe_prim_type reduced_prim
= u_reduced_prim( info
->mode
);
196 unsigned count
= info
->count
;
197 enum pipe_error ret
= 0;
198 boolean needed_swtnl
;
199 struct pipe_resource
*indexbuf
=
200 info
->has_user_indices
? NULL
: info
->index
.resource
;
202 SVGA_STATS_TIME_PUSH(svga_sws(svga
), SVGA_STATS_TIME_DRAWVBO
);
204 svga
->hud
.num_draw_calls
++; /* for SVGA_QUERY_NUM_DRAW_CALLS */
206 if (u_reduced_prim(info
->mode
) == PIPE_PRIM_TRIANGLES
&&
207 svga
->curr
.rast
->templ
.cull_face
== PIPE_FACE_FRONT_AND_BACK
)
210 /* Upload a user index buffer. */
211 unsigned index_offset
= 0;
212 if (info
->index_size
&& info
->has_user_indices
&&
213 !util_upload_index_buffer(pipe
, info
, &indexbuf
, &index_offset
)) {
218 * Mark currently bound target surfaces as dirty
219 * doesn't really matter if it is done before drawing.
221 * TODO If we ever normaly return something other then
222 * true we should not mark it as dirty then.
224 svga_mark_surfaces_dirty(svga_context(pipe
));
226 if (svga
->curr
.reduced_prim
!= reduced_prim
) {
227 svga
->curr
.reduced_prim
= reduced_prim
;
228 svga
->dirty
|= SVGA_NEW_REDUCED_PRIMITIVE
;
231 if (need_fallback_prim_restart(svga
, info
)) {
233 r
= util_draw_vbo_without_prim_restart(pipe
, info
);
234 assert(r
== PIPE_OK
);
239 if (!u_trim_pipe_prim( info
->mode
, &count
))
242 needed_swtnl
= svga
->state
.sw
.need_swtnl
;
244 svga_update_state_retry( svga
, SVGA_STATE_NEED_SWTNL
);
246 if (svga
->state
.sw
.need_swtnl
) {
247 svga
->hud
.num_fallbacks
++; /* for SVGA_QUERY_NUM_FALLBACKS */
250 * We're switching from HW to SW TNL. SW TNL will require mapping all
251 * currently bound vertex buffers, some of which may already be
252 * referenced in the current command buffer as result of previous HW
253 * TNL. So flush now, to prevent the context to flush while a referred
254 * vertex buffer is mapped.
257 svga_context_flush(svga
, NULL
);
260 /* Avoid leaking the previous hwtnl bias to swtnl */
261 svga_hwtnl_set_index_bias( svga
->hwtnl
, 0 );
262 ret
= svga_swtnl_draw_vbo(svga
, info
, indexbuf
, index_offset
);
265 if (info
->index_size
&& indexbuf
) {
268 assert(index_offset
% info
->index_size
== 0);
269 offset
= index_offset
/ info
->index_size
;
271 ret
= retry_draw_range_elements( svga
,
278 info
->start
+ offset
,
280 info
->start_instance
,
281 info
->instance_count
,
285 ret
= retry_draw_arrays(svga
, info
->mode
, info
->start
, count
,
286 info
->start_instance
, info
->instance_count
,
291 /* XXX: Silence warnings, do something sensible here? */
294 if (SVGA_DEBUG
& DEBUG_FLUSH
) {
295 svga_hwtnl_flush_retry( svga
);
296 svga_context_flush(svga
, NULL
);
300 if (info
->index_size
&& info
->index
.resource
!= indexbuf
)
301 pipe_resource_reference(&indexbuf
, NULL
);
302 SVGA_STATS_TIME_POP(svga_sws(svga
));
306 void svga_init_draw_functions( struct svga_context
*svga
)
308 svga
->pipe
.draw_vbo
= svga_draw_vbo
;