1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
27 #include "util/u_format.h"
28 #include "util/u_inlines.h"
29 #include "util/u_prim.h"
30 #include "util/u_time.h"
31 #include "indices/u_indices.h"
33 #include "svga_hw_reg.h"
35 #include "svga_context.h"
36 #include "svga_screen.h"
37 #include "svga_draw.h"
38 #include "svga_state.h"
39 #include "svga_swtnl.h"
40 #include "svga_debug.h"
41 #include "svga_resource_buffer.h"
44 static enum pipe_error
45 retry_draw_range_elements( struct svga_context
*svga
,
46 struct pipe_resource
*index_buffer
,
54 unsigned instance_count
,
57 enum pipe_error ret
= PIPE_OK
;
59 svga_hwtnl_set_unfilled( svga
->hwtnl
,
60 svga
->curr
.rast
->hw_unfilled
);
62 svga_hwtnl_set_flatshade( svga
->hwtnl
,
63 svga
->curr
.rast
->templ
.flatshade
,
64 svga
->curr
.rast
->templ
.flatshade_first
);
66 ret
= svga_update_state( svga
, SVGA_STATE_HW_DRAW
);
70 ret
= svga_hwtnl_draw_range_elements( svga
->hwtnl
,
71 index_buffer
, index_size
, index_bias
,
80 svga_context_flush( svga
, NULL
);
84 return retry_draw_range_elements( svga
,
85 index_buffer
, index_size
, index_bias
,
88 instance_count
, FALSE
);
95 static enum pipe_error
96 retry_draw_arrays( struct svga_context
*svga
,
100 unsigned instance_count
,
105 svga_hwtnl_set_unfilled( svga
->hwtnl
,
106 svga
->curr
.rast
->hw_unfilled
);
108 svga_hwtnl_set_flatshade( svga
->hwtnl
,
109 svga
->curr
.rast
->templ
.flatshade
,
110 svga
->curr
.rast
->templ
.flatshade_first
);
112 ret
= svga_update_state( svga
, SVGA_STATE_HW_DRAW
);
116 ret
= svga_hwtnl_draw_arrays( svga
->hwtnl
, prim
,
124 if (ret
== PIPE_ERROR_OUT_OF_MEMORY
&& do_retry
)
126 svga_context_flush( svga
, NULL
);
128 return retry_draw_arrays( svga
,
141 svga_draw_vbo(struct pipe_context
*pipe
, const struct pipe_draw_info
*info
)
143 struct svga_context
*svga
= svga_context( pipe
);
144 unsigned reduced_prim
= u_reduced_prim( info
->mode
);
145 unsigned count
= info
->count
;
146 enum pipe_error ret
= 0;
147 boolean needed_swtnl
;
149 svga
->num_draw_calls
++; /* for SVGA_QUERY_DRAW_CALLS */
151 if (!u_trim_pipe_prim( info
->mode
, &count
))
155 * Mark currently bound target surfaces as dirty
156 * doesn't really matter if it is done before drawing.
158 * TODO If we ever normaly return something other then
159 * true we should not mark it as dirty then.
161 svga_mark_surfaces_dirty(svga_context(pipe
));
163 if (svga
->curr
.reduced_prim
!= reduced_prim
) {
164 svga
->curr
.reduced_prim
= reduced_prim
;
165 svga
->dirty
|= SVGA_NEW_REDUCED_PRIMITIVE
;
168 needed_swtnl
= svga
->state
.sw
.need_swtnl
;
170 svga_update_state_retry( svga
, SVGA_STATE_NEED_SWTNL
);
173 if (svga
->curr
.vs
->base
.id
== svga
->debug
.disable_shader
||
174 svga
->curr
.fs
->base
.id
== svga
->debug
.disable_shader
)
178 if (svga
->state
.sw
.need_swtnl
) {
179 svga
->num_fallbacks
++; /* for SVGA_QUERY_FALLBACKS */
182 * We're switching from HW to SW TNL. SW TNL will require mapping all
183 * currently bound vertex buffers, some of which may already be
184 * referenced in the current command buffer as result of previous HW
185 * TNL. So flush now, to prevent the context to flush while a referred
186 * vertex buffer is mapped.
189 svga_context_flush(svga
, NULL
);
192 /* Avoid leaking the previous hwtnl bias to swtnl */
193 svga_hwtnl_set_index_bias( svga
->hwtnl
, 0 );
194 ret
= svga_swtnl_draw_vbo( svga
, info
);
197 if (info
->indexed
&& svga
->curr
.ib
.buffer
) {
200 assert(svga
->curr
.ib
.offset
% svga
->curr
.ib
.index_size
== 0);
201 offset
= svga
->curr
.ib
.offset
/ svga
->curr
.ib
.index_size
;
203 ret
= retry_draw_range_elements( svga
,
204 svga
->curr
.ib
.buffer
,
205 svga
->curr
.ib
.index_size
,
210 info
->start
+ offset
,
212 info
->instance_count
,
216 ret
= retry_draw_arrays( svga
,
220 info
->instance_count
,
225 /* XXX: Silence warnings, do something sensible here? */
228 if (SVGA_DEBUG
& DEBUG_FLUSH
) {
229 svga_hwtnl_flush_retry( svga
);
230 svga_context_flush(svga
, NULL
);
235 void svga_init_draw_functions( struct svga_context
*svga
)
237 svga
->pipe
.draw_vbo
= svga_draw_vbo
;