Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / gallium / drivers / svga / svga_state_vs.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "util/u_inlines.h"
27 #include "pipe/p_defines.h"
28 #include "util/u_math.h"
29 #include "util/u_memory.h"
30 #include "util/u_bitmask.h"
31 #include "translate/translate.h"
32 #include "tgsi/tgsi_ureg.h"
33
34 #include "svga_context.h"
35 #include "svga_state.h"
36 #include "svga_cmd.h"
37 #include "svga_shader.h"
38 #include "svga_tgsi.h"
39
40 #include "svga_hw_reg.h"
41
42
43 /**
44 * If we fail to compile a vertex shader we'll use a dummy/fallback shader
45 * that simply emits a (0,0,0,1) vertex position.
46 */
47 static const struct tgsi_token *
48 get_dummy_vertex_shader(void)
49 {
50 static const float zero[4] = { 0.0, 0.0, 0.0, 1.0 };
51 struct ureg_program *ureg;
52 const struct tgsi_token *tokens;
53 struct ureg_src src;
54 struct ureg_dst dst;
55
56 ureg = ureg_create(PIPE_SHADER_VERTEX);
57 if (!ureg)
58 return NULL;
59
60 dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
61 src = ureg_DECL_immediate(ureg, zero, 4);
62 ureg_MOV(ureg, dst, src);
63 ureg_END(ureg);
64
65 tokens = ureg_get_tokens(ureg, NULL);
66
67 ureg_destroy(ureg);
68
69 return tokens;
70 }
71
72
73 static struct svga_shader_variant *
74 translate_vertex_program(struct svga_context *svga,
75 const struct svga_vertex_shader *vs,
76 const struct svga_compile_key *key)
77 {
78 if (svga_have_vgpu10(svga)) {
79 return svga_tgsi_vgpu10_translate(svga, &vs->base, key,
80 PIPE_SHADER_VERTEX);
81 }
82 else {
83 return svga_tgsi_vgpu9_translate(svga, &vs->base, key,
84 PIPE_SHADER_VERTEX);
85 }
86 }
87
88
89 /**
90 * Replace the given shader's instruction with a simple / dummy shader.
91 * We use this when normal shader translation fails.
92 */
93 static struct svga_shader_variant *
94 get_compiled_dummy_vertex_shader(struct svga_context *svga,
95 struct svga_vertex_shader *vs,
96 const struct svga_compile_key *key)
97 {
98 const struct tgsi_token *dummy = get_dummy_vertex_shader();
99 struct svga_shader_variant *variant;
100
101 if (!dummy) {
102 return NULL;
103 }
104
105 FREE((void *) vs->base.tokens);
106 vs->base.tokens = dummy;
107
108 tgsi_scan_shader(vs->base.tokens, &vs->base.info);
109 vs->generic_outputs = svga_get_generic_outputs_mask(&vs->base.info);
110
111 variant = translate_vertex_program(svga, vs, key);
112 return variant;
113 }
114
115
116 /**
117 * Translate TGSI shader into an svga shader variant.
118 */
119 static enum pipe_error
120 compile_vs(struct svga_context *svga,
121 struct svga_vertex_shader *vs,
122 const struct svga_compile_key *key,
123 struct svga_shader_variant **out_variant)
124 {
125 struct svga_shader_variant *variant;
126 enum pipe_error ret = PIPE_ERROR;
127
128 variant = translate_vertex_program(svga, vs, key);
129 if (variant == NULL) {
130 debug_printf("Failed to compile vertex shader,"
131 " using dummy shader instead.\n");
132 variant = get_compiled_dummy_vertex_shader(svga, vs, key);
133 }
134 else if (svga_shader_too_large(svga, variant)) {
135 /* too big, use dummy shader */
136 debug_printf("Shader too large (%u bytes),"
137 " using dummy shader instead.\n",
138 (unsigned) (variant->nr_tokens
139 * sizeof(variant->tokens[0])));
140 /* Free the too-large variant */
141 svga_destroy_shader_variant(svga, variant);
142 /* Use simple pass-through shader instead */
143 variant = get_compiled_dummy_vertex_shader(svga, vs, key);
144 }
145
146 if (!variant) {
147 return PIPE_ERROR;
148 }
149
150 ret = svga_define_shader(svga, variant);
151 if (ret != PIPE_OK) {
152 svga_destroy_shader_variant(svga, variant);
153 return ret;
154 }
155
156 *out_variant = variant;
157
158 return PIPE_OK;
159 }
160
161
162 /* SVGA_NEW_PRESCALE, SVGA_NEW_RAST, SVGA_NEW_FS
163 */
164 static void
165 make_vs_key(struct svga_context *svga, struct svga_compile_key *key)
166 {
167 struct svga_vertex_shader *vs = svga->curr.vs;
168
169 memset(key, 0, sizeof *key);
170
171 if (svga->state.sw.need_swtnl && svga_have_vgpu10(svga)) {
172 /* Set both of these flags, to match compile_passthrough_vs() */
173 key->vs.passthrough = 1;
174 key->vs.undo_viewport = 1;
175 return;
176 }
177
178 if (svga_have_vgpu10(svga)) {
179 key->vs.need_vertex_id_bias = 1;
180 }
181
182 /* SVGA_NEW_PRESCALE */
183 key->vs.need_prescale = svga->state.hw_clear.prescale[0].enabled &&
184 (svga->curr.tes == NULL) &&
185 (svga->curr.gs == NULL);
186
187 /* SVGA_NEW_RAST */
188 key->vs.allow_psiz = svga->curr.rast->templ.point_size_per_vertex;
189
190 /* SVGA_NEW_FS */
191 key->vs.fs_generic_inputs = svga->curr.fs->generic_inputs;
192
193 svga_remap_generics(key->vs.fs_generic_inputs, key->generic_remap_table);
194
195 /* SVGA_NEW_VELEMENT */
196 key->vs.adjust_attrib_range = svga->curr.velems->adjust_attrib_range;
197 key->vs.adjust_attrib_w_1 = svga->curr.velems->adjust_attrib_w_1;
198 key->vs.attrib_is_pure_int = svga->curr.velems->attrib_is_pure_int;
199 key->vs.adjust_attrib_itof = svga->curr.velems->adjust_attrib_itof;
200 key->vs.adjust_attrib_utof = svga->curr.velems->adjust_attrib_utof;
201 key->vs.attrib_is_bgra = svga->curr.velems->attrib_is_bgra;
202 key->vs.attrib_puint_to_snorm = svga->curr.velems->attrib_puint_to_snorm;
203 key->vs.attrib_puint_to_uscaled = svga->curr.velems->attrib_puint_to_uscaled;
204 key->vs.attrib_puint_to_sscaled = svga->curr.velems->attrib_puint_to_sscaled;
205
206 /* SVGA_NEW_TEXTURE_BINDING | SVGA_NEW_SAMPLER */
207 svga_init_shader_key_common(svga, PIPE_SHADER_VERTEX, &vs->base, key);
208
209 /* SVGA_NEW_RAST */
210 key->clip_plane_enable = svga->curr.rast->templ.clip_plane_enable;
211
212 /* Determine if this shader is the last shader in the vertex
213 * processing stage.
214 */
215 key->last_vertex_stage = !(svga->curr.gs ||
216 svga->curr.tcs || svga->curr.tes);
217 }
218
219
220 /**
221 * svga_reemit_vs_bindings - Reemit the vertex shader bindings
222 */
223 enum pipe_error
224 svga_reemit_vs_bindings(struct svga_context *svga)
225 {
226 enum pipe_error ret;
227 struct svga_winsys_gb_shader *gbshader = NULL;
228 SVGA3dShaderId shaderId = SVGA3D_INVALID_ID;
229
230 assert(svga->rebind.flags.vs);
231 assert(svga_have_gb_objects(svga));
232
233 if (svga->state.hw_draw.vs) {
234 gbshader = svga->state.hw_draw.vs->gb_shader;
235 shaderId = svga->state.hw_draw.vs->id;
236 }
237
238 if (!svga_need_to_rebind_resources(svga)) {
239 ret = svga->swc->resource_rebind(svga->swc, NULL, gbshader,
240 SVGA_RELOC_READ);
241 }
242 else {
243 if (svga_have_vgpu10(svga))
244 ret = SVGA3D_vgpu10_SetShader(svga->swc, SVGA3D_SHADERTYPE_VS,
245 gbshader, shaderId);
246 else
247 ret = SVGA3D_SetGBShader(svga->swc, SVGA3D_SHADERTYPE_VS, gbshader);
248 }
249
250 if (ret != PIPE_OK)
251 return ret;
252
253 svga->rebind.flags.vs = FALSE;
254 return PIPE_OK;
255 }
256
257
258 /**
259 * The current vertex shader is already executed by the 'draw'
260 * module, so we just need to generate a simple vertex shader
261 * to pass through all those VS outputs that will
262 * be consumed by the fragment shader.
263 * Used when we employ the 'draw' module.
264 */
265 static enum pipe_error
266 compile_passthrough_vs(struct svga_context *svga,
267 struct svga_vertex_shader *vs,
268 struct svga_fragment_shader *fs,
269 struct svga_shader_variant **out_variant)
270 {
271 struct svga_shader_variant *variant = NULL;
272 unsigned num_inputs;
273 unsigned i;
274 unsigned num_elements;
275 struct svga_vertex_shader new_vs;
276 struct ureg_src src[PIPE_MAX_SHADER_INPUTS];
277 struct ureg_dst dst[PIPE_MAX_SHADER_OUTPUTS];
278 struct ureg_program *ureg;
279 struct svga_compile_key key;
280 enum pipe_error ret;
281
282 assert(svga_have_vgpu10(svga));
283 assert(fs);
284
285 num_inputs = fs->base.info.num_inputs;
286
287 ureg = ureg_create(PIPE_SHADER_VERTEX);
288 if (!ureg)
289 return PIPE_ERROR_OUT_OF_MEMORY;
290
291 /* draw will always add position */
292 dst[0] = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
293 src[0] = ureg_DECL_vs_input(ureg, 0);
294 num_elements = 1;
295
296 /**
297 * swtnl backend redefines the input layout based on the
298 * fragment shader's inputs. So we only need to passthrough
299 * those inputs that will be consumed by the fragment shader.
300 * Note: DX10 requires the number of vertex elements
301 * specified in the input layout to be no less than the
302 * number of inputs to the vertex shader.
303 */
304 for (i = 0; i < num_inputs; i++) {
305 switch (fs->base.info.input_semantic_name[i]) {
306 case TGSI_SEMANTIC_COLOR:
307 case TGSI_SEMANTIC_GENERIC:
308 case TGSI_SEMANTIC_FOG:
309 dst[num_elements] = ureg_DECL_output(ureg,
310 fs->base.info.input_semantic_name[i],
311 fs->base.info.input_semantic_index[i]);
312 src[num_elements] = ureg_DECL_vs_input(ureg, num_elements);
313 num_elements++;
314 break;
315 default:
316 break;
317 }
318 }
319
320 for (i = 0; i < num_elements; i++) {
321 ureg_MOV(ureg, dst[i], src[i]);
322 }
323
324 ureg_END(ureg);
325
326 memset(&new_vs, 0, sizeof(new_vs));
327 new_vs.base.tokens = ureg_get_tokens(ureg, NULL);
328 tgsi_scan_shader(new_vs.base.tokens, &new_vs.base.info);
329
330 memset(&key, 0, sizeof(key));
331 key.vs.undo_viewport = 1;
332
333 ret = compile_vs(svga, &new_vs, &key, &variant);
334 if (ret != PIPE_OK)
335 return ret;
336
337 ureg_free_tokens(new_vs.base.tokens);
338 ureg_destroy(ureg);
339
340 /* Overwrite the variant key to indicate it's a pass-through VS */
341 memset(&variant->key, 0, sizeof(variant->key));
342 variant->key.vs.passthrough = 1;
343 variant->key.vs.undo_viewport = 1;
344
345 *out_variant = variant;
346
347 return PIPE_OK;
348 }
349
350
351 static enum pipe_error
352 emit_hw_vs(struct svga_context *svga, uint64_t dirty)
353 {
354 struct svga_shader_variant *variant;
355 struct svga_vertex_shader *vs = svga->curr.vs;
356 struct svga_fragment_shader *fs = svga->curr.fs;
357 enum pipe_error ret = PIPE_OK;
358 struct svga_compile_key key;
359
360 SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_EMITVS);
361
362 /* If there is an active geometry shader, and it has stream output
363 * defined, then we will skip the stream output from the vertex shader
364 */
365 if (!svga_have_gs_streamout(svga)) {
366 /* No GS stream out */
367 if (svga_have_vs_streamout(svga)) {
368 /* Set VS stream out */
369 ret = svga_set_stream_output(svga, vs->base.stream_output);
370 }
371 else {
372 /* turn off stream out */
373 ret = svga_set_stream_output(svga, NULL);
374 }
375 if (ret != PIPE_OK) {
376 goto done;
377 }
378 }
379
380 /* SVGA_NEW_NEED_SWTNL */
381 if (svga->state.sw.need_swtnl && !svga_have_vgpu10(svga)) {
382 /* No vertex shader is needed */
383 variant = NULL;
384 }
385 else {
386 make_vs_key(svga, &key);
387
388 /* See if we already have a VS variant that matches the key */
389 variant = svga_search_shader_key(&vs->base, &key);
390
391 if (!variant) {
392 /* Create VS variant now */
393 if (key.vs.passthrough) {
394 ret = compile_passthrough_vs(svga, vs, fs, &variant);
395 }
396 else {
397 ret = compile_vs(svga, vs, &key, &variant);
398 }
399 if (ret != PIPE_OK)
400 goto done;
401
402 /* insert the new variant at head of linked list */
403 assert(variant);
404 variant->next = vs->base.variants;
405 vs->base.variants = variant;
406 }
407 }
408
409 if (variant != svga->state.hw_draw.vs) {
410 /* Bind the new variant */
411 if (variant) {
412 ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_VS, variant);
413 if (ret != PIPE_OK)
414 goto done;
415 svga->rebind.flags.vs = FALSE;
416 }
417
418 svga->dirty |= SVGA_NEW_VS_VARIANT;
419 svga->state.hw_draw.vs = variant;
420 }
421
422 done:
423 SVGA_STATS_TIME_POP(svga_sws(svga));
424 return ret;
425 }
426
427 struct svga_tracked_state svga_hw_vs =
428 {
429 "vertex shader (hwtnl)",
430 (SVGA_NEW_VS |
431 SVGA_NEW_FS |
432 SVGA_NEW_TEXTURE_BINDING |
433 SVGA_NEW_SAMPLER |
434 SVGA_NEW_RAST |
435 SVGA_NEW_PRESCALE |
436 SVGA_NEW_VELEMENT |
437 SVGA_NEW_NEED_SWTNL),
438 emit_hw_vs
439 };