gallium/tgsi: Add a helper for initializing ureg from a shader_info.
[mesa.git] / src / gallium / auxiliary / tgsi / tgsi_ureg.c
1 /**************************************************************************
2 *
3 * Copyright 2009-2010 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE, INC AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "pipe/p_screen.h"
30 #include "pipe/p_context.h"
31 #include "pipe/p_state.h"
32 #include "tgsi/tgsi_ureg.h"
33 #include "tgsi/tgsi_build.h"
34 #include "tgsi/tgsi_info.h"
35 #include "tgsi/tgsi_dump.h"
36 #include "tgsi/tgsi_sanity.h"
37 #include "util/u_debug.h"
38 #include "util/u_inlines.h"
39 #include "util/u_memory.h"
40 #include "util/u_math.h"
41 #include "util/u_bitmask.h"
42 #include "GL/gl.h"
43 #include "compiler/shader_info.h"
44
45 union tgsi_any_token {
46 struct tgsi_header header;
47 struct tgsi_processor processor;
48 struct tgsi_token token;
49 struct tgsi_property prop;
50 struct tgsi_property_data prop_data;
51 struct tgsi_declaration decl;
52 struct tgsi_declaration_range decl_range;
53 struct tgsi_declaration_dimension decl_dim;
54 struct tgsi_declaration_interp decl_interp;
55 struct tgsi_declaration_image decl_image;
56 struct tgsi_declaration_semantic decl_semantic;
57 struct tgsi_declaration_sampler_view decl_sampler_view;
58 struct tgsi_declaration_array array;
59 struct tgsi_immediate imm;
60 union tgsi_immediate_data imm_data;
61 struct tgsi_instruction insn;
62 struct tgsi_instruction_label insn_label;
63 struct tgsi_instruction_texture insn_texture;
64 struct tgsi_instruction_memory insn_memory;
65 struct tgsi_texture_offset insn_texture_offset;
66 struct tgsi_src_register src;
67 struct tgsi_ind_register ind;
68 struct tgsi_dimension dim;
69 struct tgsi_dst_register dst;
70 unsigned value;
71 };
72
73
74 struct ureg_tokens {
75 union tgsi_any_token *tokens;
76 unsigned size;
77 unsigned order;
78 unsigned count;
79 };
80
81 #define UREG_MAX_INPUT (4 * PIPE_MAX_SHADER_INPUTS)
82 #define UREG_MAX_SYSTEM_VALUE PIPE_MAX_ATTRIBS
83 #define UREG_MAX_OUTPUT (4 * PIPE_MAX_SHADER_OUTPUTS)
84 #define UREG_MAX_CONSTANT_RANGE 32
85 #define UREG_MAX_HW_ATOMIC_RANGE 32
86 #define UREG_MAX_IMMEDIATE 4096
87 #define UREG_MAX_ADDR 3
88 #define UREG_MAX_ARRAY_TEMPS 256
89
90 struct const_decl {
91 struct {
92 unsigned first;
93 unsigned last;
94 } constant_range[UREG_MAX_CONSTANT_RANGE];
95 unsigned nr_constant_ranges;
96 };
97
98 struct hw_atomic_decl {
99 struct {
100 unsigned first;
101 unsigned last;
102 unsigned array_id;
103 } hw_atomic_range[UREG_MAX_HW_ATOMIC_RANGE];
104 unsigned nr_hw_atomic_ranges;
105 };
106
107 #define DOMAIN_DECL 0
108 #define DOMAIN_INSN 1
109
110 struct ureg_program
111 {
112 enum pipe_shader_type processor;
113 bool supports_any_inout_decl_range;
114 int next_shader_processor;
115
116 struct {
117 enum tgsi_semantic semantic_name;
118 unsigned semantic_index;
119 enum tgsi_interpolate_mode interp;
120 unsigned char cylindrical_wrap;
121 unsigned char usage_mask;
122 enum tgsi_interpolate_loc interp_location;
123 unsigned first;
124 unsigned last;
125 unsigned array_id;
126 } input[UREG_MAX_INPUT];
127 unsigned nr_inputs, nr_input_regs;
128
129 unsigned vs_inputs[PIPE_MAX_ATTRIBS/32];
130
131 struct {
132 enum tgsi_semantic semantic_name;
133 unsigned semantic_index;
134 } system_value[UREG_MAX_SYSTEM_VALUE];
135 unsigned nr_system_values;
136
137 struct {
138 enum tgsi_semantic semantic_name;
139 unsigned semantic_index;
140 unsigned streams;
141 unsigned usage_mask; /* = TGSI_WRITEMASK_* */
142 unsigned first;
143 unsigned last;
144 unsigned array_id;
145 boolean invariant;
146 } output[UREG_MAX_OUTPUT];
147 unsigned nr_outputs, nr_output_regs;
148
149 struct {
150 union {
151 float f[4];
152 unsigned u[4];
153 int i[4];
154 } value;
155 unsigned nr;
156 unsigned type;
157 } immediate[UREG_MAX_IMMEDIATE];
158 unsigned nr_immediates;
159
160 struct ureg_src sampler[PIPE_MAX_SAMPLERS];
161 unsigned nr_samplers;
162
163 struct {
164 unsigned index;
165 enum tgsi_texture_type target;
166 enum tgsi_return_type return_type_x;
167 enum tgsi_return_type return_type_y;
168 enum tgsi_return_type return_type_z;
169 enum tgsi_return_type return_type_w;
170 } sampler_view[PIPE_MAX_SHADER_SAMPLER_VIEWS];
171 unsigned nr_sampler_views;
172
173 struct {
174 unsigned index;
175 enum tgsi_texture_type target;
176 enum pipe_format format;
177 boolean wr;
178 boolean raw;
179 } image[PIPE_MAX_SHADER_IMAGES];
180 unsigned nr_images;
181
182 struct {
183 unsigned index;
184 bool atomic;
185 } buffer[PIPE_MAX_SHADER_BUFFERS];
186 unsigned nr_buffers;
187
188 struct util_bitmask *free_temps;
189 struct util_bitmask *local_temps;
190 struct util_bitmask *decl_temps;
191 unsigned nr_temps;
192
193 unsigned array_temps[UREG_MAX_ARRAY_TEMPS];
194 unsigned nr_array_temps;
195
196 struct const_decl const_decls[PIPE_MAX_CONSTANT_BUFFERS];
197
198 struct hw_atomic_decl hw_atomic_decls[PIPE_MAX_HW_ATOMIC_BUFFERS];
199
200 unsigned properties[TGSI_PROPERTY_COUNT];
201
202 unsigned nr_addrs;
203 unsigned nr_instructions;
204
205 struct ureg_tokens domain[2];
206
207 bool use_memory[TGSI_MEMORY_TYPE_COUNT];
208 };
209
210 static union tgsi_any_token error_tokens[32];
211
212 static void tokens_error( struct ureg_tokens *tokens )
213 {
214 if (tokens->tokens && tokens->tokens != error_tokens)
215 FREE(tokens->tokens);
216
217 tokens->tokens = error_tokens;
218 tokens->size = ARRAY_SIZE(error_tokens);
219 tokens->count = 0;
220 }
221
222
223 static void tokens_expand( struct ureg_tokens *tokens,
224 unsigned count )
225 {
226 unsigned old_size = tokens->size * sizeof(unsigned);
227
228 if (tokens->tokens == error_tokens) {
229 return;
230 }
231
232 while (tokens->count + count > tokens->size) {
233 tokens->size = (1 << ++tokens->order);
234 }
235
236 tokens->tokens = REALLOC(tokens->tokens,
237 old_size,
238 tokens->size * sizeof(unsigned));
239 if (tokens->tokens == NULL) {
240 tokens_error(tokens);
241 }
242 }
243
244 static void set_bad( struct ureg_program *ureg )
245 {
246 tokens_error(&ureg->domain[0]);
247 }
248
249
250
251 static union tgsi_any_token *get_tokens( struct ureg_program *ureg,
252 unsigned domain,
253 unsigned count )
254 {
255 struct ureg_tokens *tokens = &ureg->domain[domain];
256 union tgsi_any_token *result;
257
258 if (tokens->count + count > tokens->size)
259 tokens_expand(tokens, count);
260
261 result = &tokens->tokens[tokens->count];
262 tokens->count += count;
263 return result;
264 }
265
266
267 static union tgsi_any_token *retrieve_token( struct ureg_program *ureg,
268 unsigned domain,
269 unsigned nr )
270 {
271 if (ureg->domain[domain].tokens == error_tokens)
272 return &error_tokens[0];
273
274 return &ureg->domain[domain].tokens[nr];
275 }
276
277
278 void
279 ureg_property(struct ureg_program *ureg, unsigned name, unsigned value)
280 {
281 assert(name < ARRAY_SIZE(ureg->properties));
282 ureg->properties[name] = value;
283 }
284
285 struct ureg_src
286 ureg_DECL_fs_input_cyl_centroid_layout(struct ureg_program *ureg,
287 enum tgsi_semantic semantic_name,
288 unsigned semantic_index,
289 enum tgsi_interpolate_mode interp_mode,
290 unsigned cylindrical_wrap,
291 enum tgsi_interpolate_loc interp_location,
292 unsigned index,
293 unsigned usage_mask,
294 unsigned array_id,
295 unsigned array_size)
296 {
297 unsigned i;
298
299 assert(usage_mask != 0);
300 assert(usage_mask <= TGSI_WRITEMASK_XYZW);
301
302 for (i = 0; i < ureg->nr_inputs; i++) {
303 if (ureg->input[i].semantic_name == semantic_name &&
304 ureg->input[i].semantic_index == semantic_index) {
305 assert(ureg->input[i].interp == interp_mode);
306 assert(ureg->input[i].cylindrical_wrap == cylindrical_wrap);
307 assert(ureg->input[i].interp_location == interp_location);
308 if (ureg->input[i].array_id == array_id) {
309 ureg->input[i].usage_mask |= usage_mask;
310 goto out;
311 }
312 assert((ureg->input[i].usage_mask & usage_mask) == 0);
313 }
314 }
315
316 if (ureg->nr_inputs < UREG_MAX_INPUT) {
317 assert(array_size >= 1);
318 ureg->input[i].semantic_name = semantic_name;
319 ureg->input[i].semantic_index = semantic_index;
320 ureg->input[i].interp = interp_mode;
321 ureg->input[i].cylindrical_wrap = cylindrical_wrap;
322 ureg->input[i].interp_location = interp_location;
323 ureg->input[i].first = index;
324 ureg->input[i].last = index + array_size - 1;
325 ureg->input[i].array_id = array_id;
326 ureg->input[i].usage_mask = usage_mask;
327 ureg->nr_input_regs = MAX2(ureg->nr_input_regs, index + array_size);
328 ureg->nr_inputs++;
329 } else {
330 set_bad(ureg);
331 }
332
333 out:
334 return ureg_src_array_register(TGSI_FILE_INPUT, ureg->input[i].first,
335 array_id);
336 }
337
338 struct ureg_src
339 ureg_DECL_fs_input_cyl_centroid(struct ureg_program *ureg,
340 enum tgsi_semantic semantic_name,
341 unsigned semantic_index,
342 enum tgsi_interpolate_mode interp_mode,
343 unsigned cylindrical_wrap,
344 enum tgsi_interpolate_loc interp_location,
345 unsigned array_id,
346 unsigned array_size)
347 {
348 return ureg_DECL_fs_input_cyl_centroid_layout(ureg,
349 semantic_name, semantic_index, interp_mode,
350 cylindrical_wrap, interp_location,
351 ureg->nr_input_regs, TGSI_WRITEMASK_XYZW, array_id, array_size);
352 }
353
354
355 struct ureg_src
356 ureg_DECL_vs_input( struct ureg_program *ureg,
357 unsigned index )
358 {
359 assert(ureg->processor == PIPE_SHADER_VERTEX);
360 assert(index / 32 < ARRAY_SIZE(ureg->vs_inputs));
361
362 ureg->vs_inputs[index/32] |= 1 << (index % 32);
363 return ureg_src_register( TGSI_FILE_INPUT, index );
364 }
365
366
367 struct ureg_src
368 ureg_DECL_input_layout(struct ureg_program *ureg,
369 enum tgsi_semantic semantic_name,
370 unsigned semantic_index,
371 unsigned index,
372 unsigned usage_mask,
373 unsigned array_id,
374 unsigned array_size)
375 {
376 return ureg_DECL_fs_input_cyl_centroid_layout(ureg,
377 semantic_name, semantic_index,
378 TGSI_INTERPOLATE_CONSTANT, 0, TGSI_INTERPOLATE_LOC_CENTER,
379 index, usage_mask, array_id, array_size);
380 }
381
382
383 struct ureg_src
384 ureg_DECL_input(struct ureg_program *ureg,
385 enum tgsi_semantic semantic_name,
386 unsigned semantic_index,
387 unsigned array_id,
388 unsigned array_size)
389 {
390 return ureg_DECL_fs_input_cyl_centroid(ureg, semantic_name, semantic_index,
391 TGSI_INTERPOLATE_CONSTANT, 0,
392 TGSI_INTERPOLATE_LOC_CENTER,
393 array_id, array_size);
394 }
395
396
397 struct ureg_src
398 ureg_DECL_system_value(struct ureg_program *ureg,
399 enum tgsi_semantic semantic_name,
400 unsigned semantic_index)
401 {
402 unsigned i;
403
404 for (i = 0; i < ureg->nr_system_values; i++) {
405 if (ureg->system_value[i].semantic_name == semantic_name &&
406 ureg->system_value[i].semantic_index == semantic_index) {
407 goto out;
408 }
409 }
410
411 if (ureg->nr_system_values < UREG_MAX_SYSTEM_VALUE) {
412 ureg->system_value[ureg->nr_system_values].semantic_name = semantic_name;
413 ureg->system_value[ureg->nr_system_values].semantic_index = semantic_index;
414 i = ureg->nr_system_values;
415 ureg->nr_system_values++;
416 } else {
417 set_bad(ureg);
418 }
419
420 out:
421 return ureg_src_register(TGSI_FILE_SYSTEM_VALUE, i);
422 }
423
424
425 struct ureg_dst
426 ureg_DECL_output_layout(struct ureg_program *ureg,
427 enum tgsi_semantic semantic_name,
428 unsigned semantic_index,
429 unsigned streams,
430 unsigned index,
431 unsigned usage_mask,
432 unsigned array_id,
433 unsigned array_size,
434 boolean invariant)
435 {
436 unsigned i;
437
438 assert(usage_mask != 0);
439 assert(!(streams & 0x03) || (usage_mask & 1));
440 assert(!(streams & 0x0c) || (usage_mask & 2));
441 assert(!(streams & 0x30) || (usage_mask & 4));
442 assert(!(streams & 0xc0) || (usage_mask & 8));
443
444 for (i = 0; i < ureg->nr_outputs; i++) {
445 if (ureg->output[i].semantic_name == semantic_name &&
446 ureg->output[i].semantic_index == semantic_index) {
447 if (ureg->output[i].array_id == array_id) {
448 ureg->output[i].usage_mask |= usage_mask;
449 goto out;
450 }
451 assert((ureg->output[i].usage_mask & usage_mask) == 0);
452 }
453 }
454
455 if (ureg->nr_outputs < UREG_MAX_OUTPUT) {
456 ureg->output[i].semantic_name = semantic_name;
457 ureg->output[i].semantic_index = semantic_index;
458 ureg->output[i].usage_mask = usage_mask;
459 ureg->output[i].first = index;
460 ureg->output[i].last = index + array_size - 1;
461 ureg->output[i].array_id = array_id;
462 ureg->output[i].invariant = invariant;
463 ureg->nr_output_regs = MAX2(ureg->nr_output_regs, index + array_size);
464 ureg->nr_outputs++;
465 }
466 else {
467 set_bad( ureg );
468 i = 0;
469 }
470
471 out:
472 ureg->output[i].streams |= streams;
473
474 return ureg_dst_array_register(TGSI_FILE_OUTPUT, ureg->output[i].first,
475 array_id);
476 }
477
478
479 struct ureg_dst
480 ureg_DECL_output_masked(struct ureg_program *ureg,
481 unsigned name,
482 unsigned index,
483 unsigned usage_mask,
484 unsigned array_id,
485 unsigned array_size)
486 {
487 return ureg_DECL_output_layout(ureg, name, index, 0,
488 ureg->nr_output_regs, usage_mask, array_id,
489 array_size, FALSE);
490 }
491
492
493 struct ureg_dst
494 ureg_DECL_output(struct ureg_program *ureg,
495 enum tgsi_semantic name,
496 unsigned index)
497 {
498 return ureg_DECL_output_masked(ureg, name, index, TGSI_WRITEMASK_XYZW,
499 0, 1);
500 }
501
502 struct ureg_dst
503 ureg_DECL_output_array(struct ureg_program *ureg,
504 enum tgsi_semantic semantic_name,
505 unsigned semantic_index,
506 unsigned array_id,
507 unsigned array_size)
508 {
509 return ureg_DECL_output_masked(ureg, semantic_name, semantic_index,
510 TGSI_WRITEMASK_XYZW,
511 array_id, array_size);
512 }
513
514
515 /* Returns a new constant register. Keep track of which have been
516 * referred to so that we can emit decls later.
517 *
518 * Constant operands declared with this function must be addressed
519 * with a two-dimensional index.
520 *
521 * There is nothing in this code to bind this constant to any tracked
522 * value or manage any constant_buffer contents -- that's the
523 * resposibility of the calling code.
524 */
525 void
526 ureg_DECL_constant2D(struct ureg_program *ureg,
527 unsigned first,
528 unsigned last,
529 unsigned index2D)
530 {
531 struct const_decl *decl = &ureg->const_decls[index2D];
532
533 assert(index2D < PIPE_MAX_CONSTANT_BUFFERS);
534
535 if (decl->nr_constant_ranges < UREG_MAX_CONSTANT_RANGE) {
536 uint i = decl->nr_constant_ranges++;
537
538 decl->constant_range[i].first = first;
539 decl->constant_range[i].last = last;
540 }
541 }
542
543
544 /* A one-dimensional, deprecated version of ureg_DECL_constant2D().
545 *
546 * Constant operands declared with this function must be addressed
547 * with a one-dimensional index.
548 */
549 struct ureg_src
550 ureg_DECL_constant(struct ureg_program *ureg,
551 unsigned index)
552 {
553 struct const_decl *decl = &ureg->const_decls[0];
554 unsigned minconst = index, maxconst = index;
555 unsigned i;
556
557 /* Inside existing range?
558 */
559 for (i = 0; i < decl->nr_constant_ranges; i++) {
560 if (decl->constant_range[i].first <= index &&
561 decl->constant_range[i].last >= index) {
562 goto out;
563 }
564 }
565
566 /* Extend existing range?
567 */
568 for (i = 0; i < decl->nr_constant_ranges; i++) {
569 if (decl->constant_range[i].last == index - 1) {
570 decl->constant_range[i].last = index;
571 goto out;
572 }
573
574 if (decl->constant_range[i].first == index + 1) {
575 decl->constant_range[i].first = index;
576 goto out;
577 }
578
579 minconst = MIN2(minconst, decl->constant_range[i].first);
580 maxconst = MAX2(maxconst, decl->constant_range[i].last);
581 }
582
583 /* Create new range?
584 */
585 if (decl->nr_constant_ranges < UREG_MAX_CONSTANT_RANGE) {
586 i = decl->nr_constant_ranges++;
587 decl->constant_range[i].first = index;
588 decl->constant_range[i].last = index;
589 goto out;
590 }
591
592 /* Collapse all ranges down to one:
593 */
594 i = 0;
595 decl->constant_range[0].first = minconst;
596 decl->constant_range[0].last = maxconst;
597 decl->nr_constant_ranges = 1;
598
599 out:
600 assert(i < decl->nr_constant_ranges);
601 assert(decl->constant_range[i].first <= index);
602 assert(decl->constant_range[i].last >= index);
603
604 struct ureg_src src = ureg_src_register(TGSI_FILE_CONSTANT, index);
605 return ureg_src_dimension(src, 0);
606 }
607
608
609 /* Returns a new hw atomic register. Keep track of which have been
610 * referred to so that we can emit decls later.
611 */
612 void
613 ureg_DECL_hw_atomic(struct ureg_program *ureg,
614 unsigned first,
615 unsigned last,
616 unsigned buffer_id,
617 unsigned array_id)
618 {
619 struct hw_atomic_decl *decl = &ureg->hw_atomic_decls[buffer_id];
620
621 if (decl->nr_hw_atomic_ranges < UREG_MAX_HW_ATOMIC_RANGE) {
622 uint i = decl->nr_hw_atomic_ranges++;
623
624 decl->hw_atomic_range[i].first = first;
625 decl->hw_atomic_range[i].last = last;
626 decl->hw_atomic_range[i].array_id = array_id;
627 } else {
628 set_bad(ureg);
629 }
630 }
631
632 static struct ureg_dst alloc_temporary( struct ureg_program *ureg,
633 boolean local )
634 {
635 unsigned i;
636
637 /* Look for a released temporary.
638 */
639 for (i = util_bitmask_get_first_index(ureg->free_temps);
640 i != UTIL_BITMASK_INVALID_INDEX;
641 i = util_bitmask_get_next_index(ureg->free_temps, i + 1)) {
642 if (util_bitmask_get(ureg->local_temps, i) == local)
643 break;
644 }
645
646 /* Or allocate a new one.
647 */
648 if (i == UTIL_BITMASK_INVALID_INDEX) {
649 i = ureg->nr_temps++;
650
651 if (local)
652 util_bitmask_set(ureg->local_temps, i);
653
654 /* Start a new declaration when the local flag changes */
655 if (!i || util_bitmask_get(ureg->local_temps, i - 1) != local)
656 util_bitmask_set(ureg->decl_temps, i);
657 }
658
659 util_bitmask_clear(ureg->free_temps, i);
660
661 return ureg_dst_register( TGSI_FILE_TEMPORARY, i );
662 }
663
664 struct ureg_dst ureg_DECL_temporary( struct ureg_program *ureg )
665 {
666 return alloc_temporary(ureg, FALSE);
667 }
668
669 struct ureg_dst ureg_DECL_local_temporary( struct ureg_program *ureg )
670 {
671 return alloc_temporary(ureg, TRUE);
672 }
673
674 struct ureg_dst ureg_DECL_array_temporary( struct ureg_program *ureg,
675 unsigned size,
676 boolean local )
677 {
678 unsigned i = ureg->nr_temps;
679 struct ureg_dst dst = ureg_dst_register( TGSI_FILE_TEMPORARY, i );
680
681 if (local)
682 util_bitmask_set(ureg->local_temps, i);
683
684 /* Always start a new declaration at the start */
685 util_bitmask_set(ureg->decl_temps, i);
686
687 ureg->nr_temps += size;
688
689 /* and also at the end of the array */
690 util_bitmask_set(ureg->decl_temps, ureg->nr_temps);
691
692 if (ureg->nr_array_temps < UREG_MAX_ARRAY_TEMPS) {
693 ureg->array_temps[ureg->nr_array_temps++] = i;
694 dst.ArrayID = ureg->nr_array_temps;
695 }
696
697 return dst;
698 }
699
700 void ureg_release_temporary( struct ureg_program *ureg,
701 struct ureg_dst tmp )
702 {
703 if(tmp.File == TGSI_FILE_TEMPORARY)
704 util_bitmask_set(ureg->free_temps, tmp.Index);
705 }
706
707
708 /* Allocate a new address register.
709 */
710 struct ureg_dst ureg_DECL_address( struct ureg_program *ureg )
711 {
712 if (ureg->nr_addrs < UREG_MAX_ADDR)
713 return ureg_dst_register( TGSI_FILE_ADDRESS, ureg->nr_addrs++ );
714
715 assert( 0 );
716 return ureg_dst_register( TGSI_FILE_ADDRESS, 0 );
717 }
718
719 /* Allocate a new sampler.
720 */
721 struct ureg_src ureg_DECL_sampler( struct ureg_program *ureg,
722 unsigned nr )
723 {
724 unsigned i;
725
726 for (i = 0; i < ureg->nr_samplers; i++)
727 if (ureg->sampler[i].Index == (int)nr)
728 return ureg->sampler[i];
729
730 if (i < PIPE_MAX_SAMPLERS) {
731 ureg->sampler[i] = ureg_src_register( TGSI_FILE_SAMPLER, nr );
732 ureg->nr_samplers++;
733 return ureg->sampler[i];
734 }
735
736 assert( 0 );
737 return ureg->sampler[0];
738 }
739
740 /*
741 * Allocate a new shader sampler view.
742 */
743 struct ureg_src
744 ureg_DECL_sampler_view(struct ureg_program *ureg,
745 unsigned index,
746 enum tgsi_texture_type target,
747 enum tgsi_return_type return_type_x,
748 enum tgsi_return_type return_type_y,
749 enum tgsi_return_type return_type_z,
750 enum tgsi_return_type return_type_w)
751 {
752 struct ureg_src reg = ureg_src_register(TGSI_FILE_SAMPLER_VIEW, index);
753 uint i;
754
755 for (i = 0; i < ureg->nr_sampler_views; i++) {
756 if (ureg->sampler_view[i].index == index) {
757 return reg;
758 }
759 }
760
761 if (i < PIPE_MAX_SHADER_SAMPLER_VIEWS) {
762 ureg->sampler_view[i].index = index;
763 ureg->sampler_view[i].target = target;
764 ureg->sampler_view[i].return_type_x = return_type_x;
765 ureg->sampler_view[i].return_type_y = return_type_y;
766 ureg->sampler_view[i].return_type_z = return_type_z;
767 ureg->sampler_view[i].return_type_w = return_type_w;
768 ureg->nr_sampler_views++;
769 return reg;
770 }
771
772 assert(0);
773 return reg;
774 }
775
776 /* Allocate a new image.
777 */
778 struct ureg_src
779 ureg_DECL_image(struct ureg_program *ureg,
780 unsigned index,
781 enum tgsi_texture_type target,
782 enum pipe_format format,
783 boolean wr,
784 boolean raw)
785 {
786 struct ureg_src reg = ureg_src_register(TGSI_FILE_IMAGE, index);
787 unsigned i;
788
789 for (i = 0; i < ureg->nr_images; i++)
790 if (ureg->image[i].index == index)
791 return reg;
792
793 if (i < PIPE_MAX_SHADER_IMAGES) {
794 ureg->image[i].index = index;
795 ureg->image[i].target = target;
796 ureg->image[i].wr = wr;
797 ureg->image[i].raw = raw;
798 ureg->image[i].format = format;
799 ureg->nr_images++;
800 return reg;
801 }
802
803 assert(0);
804 return reg;
805 }
806
807 /* Allocate a new buffer.
808 */
809 struct ureg_src ureg_DECL_buffer(struct ureg_program *ureg, unsigned nr,
810 bool atomic)
811 {
812 struct ureg_src reg = ureg_src_register(TGSI_FILE_BUFFER, nr);
813 unsigned i;
814
815 for (i = 0; i < ureg->nr_buffers; i++)
816 if (ureg->buffer[i].index == nr)
817 return reg;
818
819 if (i < PIPE_MAX_SHADER_BUFFERS) {
820 ureg->buffer[i].index = nr;
821 ureg->buffer[i].atomic = atomic;
822 ureg->nr_buffers++;
823 return reg;
824 }
825
826 assert(0);
827 return reg;
828 }
829
830 /* Allocate a memory area.
831 */
832 struct ureg_src ureg_DECL_memory(struct ureg_program *ureg,
833 unsigned memory_type)
834 {
835 struct ureg_src reg = ureg_src_register(TGSI_FILE_MEMORY, memory_type);
836
837 ureg->use_memory[memory_type] = true;
838 return reg;
839 }
840
841 static int
842 match_or_expand_immediate64( const unsigned *v,
843 unsigned nr,
844 unsigned *v2,
845 unsigned *pnr2,
846 unsigned *swizzle )
847 {
848 unsigned nr2 = *pnr2;
849 unsigned i, j;
850 *swizzle = 0;
851
852 for (i = 0; i < nr; i += 2) {
853 boolean found = FALSE;
854
855 for (j = 0; j < nr2 && !found; j += 2) {
856 if (v[i] == v2[j] && v[i + 1] == v2[j + 1]) {
857 *swizzle |= (j << (i * 2)) | ((j + 1) << ((i + 1) * 2));
858 found = TRUE;
859 }
860 }
861 if (!found) {
862 if ((nr2) >= 4) {
863 return FALSE;
864 }
865
866 v2[nr2] = v[i];
867 v2[nr2 + 1] = v[i + 1];
868
869 *swizzle |= (nr2 << (i * 2)) | ((nr2 + 1) << ((i + 1) * 2));
870 nr2 += 2;
871 }
872 }
873
874 /* Actually expand immediate only when fully succeeded.
875 */
876 *pnr2 = nr2;
877 return TRUE;
878 }
879
880 static int
881 match_or_expand_immediate( const unsigned *v,
882 int type,
883 unsigned nr,
884 unsigned *v2,
885 unsigned *pnr2,
886 unsigned *swizzle )
887 {
888 unsigned nr2 = *pnr2;
889 unsigned i, j;
890
891 if (type == TGSI_IMM_FLOAT64 ||
892 type == TGSI_IMM_UINT64 ||
893 type == TGSI_IMM_INT64)
894 return match_or_expand_immediate64(v, nr, v2, pnr2, swizzle);
895
896 *swizzle = 0;
897
898 for (i = 0; i < nr; i++) {
899 boolean found = FALSE;
900
901 for (j = 0; j < nr2 && !found; j++) {
902 if (v[i] == v2[j]) {
903 *swizzle |= j << (i * 2);
904 found = TRUE;
905 }
906 }
907
908 if (!found) {
909 if (nr2 >= 4) {
910 return FALSE;
911 }
912
913 v2[nr2] = v[i];
914 *swizzle |= nr2 << (i * 2);
915 nr2++;
916 }
917 }
918
919 /* Actually expand immediate only when fully succeeded.
920 */
921 *pnr2 = nr2;
922 return TRUE;
923 }
924
925
926 static struct ureg_src
927 decl_immediate( struct ureg_program *ureg,
928 const unsigned *v,
929 unsigned nr,
930 unsigned type )
931 {
932 unsigned i, j;
933 unsigned swizzle = 0;
934
935 /* Could do a first pass where we examine all existing immediates
936 * without expanding.
937 */
938
939 for (i = 0; i < ureg->nr_immediates; i++) {
940 if (ureg->immediate[i].type != type) {
941 continue;
942 }
943 if (match_or_expand_immediate(v,
944 type,
945 nr,
946 ureg->immediate[i].value.u,
947 &ureg->immediate[i].nr,
948 &swizzle)) {
949 goto out;
950 }
951 }
952
953 if (ureg->nr_immediates < UREG_MAX_IMMEDIATE) {
954 i = ureg->nr_immediates++;
955 ureg->immediate[i].type = type;
956 if (match_or_expand_immediate(v,
957 type,
958 nr,
959 ureg->immediate[i].value.u,
960 &ureg->immediate[i].nr,
961 &swizzle)) {
962 goto out;
963 }
964 }
965
966 set_bad(ureg);
967
968 out:
969 /* Make sure that all referenced elements are from this immediate.
970 * Has the effect of making size-one immediates into scalars.
971 */
972 if (type == TGSI_IMM_FLOAT64 ||
973 type == TGSI_IMM_UINT64 ||
974 type == TGSI_IMM_INT64) {
975 for (j = nr; j < 4; j+=2) {
976 swizzle |= (swizzle & 0xf) << (j * 2);
977 }
978 } else {
979 for (j = nr; j < 4; j++) {
980 swizzle |= (swizzle & 0x3) << (j * 2);
981 }
982 }
983 return ureg_swizzle(ureg_src_register(TGSI_FILE_IMMEDIATE, i),
984 (swizzle >> 0) & 0x3,
985 (swizzle >> 2) & 0x3,
986 (swizzle >> 4) & 0x3,
987 (swizzle >> 6) & 0x3);
988 }
989
990
991 struct ureg_src
992 ureg_DECL_immediate( struct ureg_program *ureg,
993 const float *v,
994 unsigned nr )
995 {
996 union {
997 float f[4];
998 unsigned u[4];
999 } fu;
1000 unsigned int i;
1001
1002 for (i = 0; i < nr; i++) {
1003 fu.f[i] = v[i];
1004 }
1005
1006 return decl_immediate(ureg, fu.u, nr, TGSI_IMM_FLOAT32);
1007 }
1008
1009 struct ureg_src
1010 ureg_DECL_immediate_f64( struct ureg_program *ureg,
1011 const double *v,
1012 unsigned nr )
1013 {
1014 union {
1015 unsigned u[4];
1016 double d[2];
1017 } fu;
1018 unsigned int i;
1019
1020 assert((nr / 2) < 3);
1021 for (i = 0; i < nr / 2; i++) {
1022 fu.d[i] = v[i];
1023 }
1024
1025 return decl_immediate(ureg, fu.u, nr, TGSI_IMM_FLOAT64);
1026 }
1027
1028 struct ureg_src
1029 ureg_DECL_immediate_uint( struct ureg_program *ureg,
1030 const unsigned *v,
1031 unsigned nr )
1032 {
1033 return decl_immediate(ureg, v, nr, TGSI_IMM_UINT32);
1034 }
1035
1036
1037 struct ureg_src
1038 ureg_DECL_immediate_block_uint( struct ureg_program *ureg,
1039 const unsigned *v,
1040 unsigned nr )
1041 {
1042 uint index;
1043 uint i;
1044
1045 if (ureg->nr_immediates + (nr + 3) / 4 > UREG_MAX_IMMEDIATE) {
1046 set_bad(ureg);
1047 return ureg_src_register(TGSI_FILE_IMMEDIATE, 0);
1048 }
1049
1050 index = ureg->nr_immediates;
1051 ureg->nr_immediates += (nr + 3) / 4;
1052
1053 for (i = index; i < ureg->nr_immediates; i++) {
1054 ureg->immediate[i].type = TGSI_IMM_UINT32;
1055 ureg->immediate[i].nr = nr > 4 ? 4 : nr;
1056 memcpy(ureg->immediate[i].value.u,
1057 &v[(i - index) * 4],
1058 ureg->immediate[i].nr * sizeof(uint));
1059 nr -= 4;
1060 }
1061
1062 return ureg_src_register(TGSI_FILE_IMMEDIATE, index);
1063 }
1064
1065
1066 struct ureg_src
1067 ureg_DECL_immediate_int( struct ureg_program *ureg,
1068 const int *v,
1069 unsigned nr )
1070 {
1071 return decl_immediate(ureg, (const unsigned *)v, nr, TGSI_IMM_INT32);
1072 }
1073
1074 struct ureg_src
1075 ureg_DECL_immediate_uint64( struct ureg_program *ureg,
1076 const uint64_t *v,
1077 unsigned nr )
1078 {
1079 union {
1080 unsigned u[4];
1081 uint64_t u64[2];
1082 } fu;
1083 unsigned int i;
1084
1085 assert((nr / 2) < 3);
1086 for (i = 0; i < nr / 2; i++) {
1087 fu.u64[i] = v[i];
1088 }
1089
1090 return decl_immediate(ureg, fu.u, nr, TGSI_IMM_UINT64);
1091 }
1092
1093 struct ureg_src
1094 ureg_DECL_immediate_int64( struct ureg_program *ureg,
1095 const int64_t *v,
1096 unsigned nr )
1097 {
1098 union {
1099 unsigned u[4];
1100 int64_t i64[2];
1101 } fu;
1102 unsigned int i;
1103
1104 assert((nr / 2) < 3);
1105 for (i = 0; i < nr / 2; i++) {
1106 fu.i64[i] = v[i];
1107 }
1108
1109 return decl_immediate(ureg, fu.u, nr, TGSI_IMM_INT64);
1110 }
1111
1112 void
1113 ureg_emit_src( struct ureg_program *ureg,
1114 struct ureg_src src )
1115 {
1116 unsigned size = 1 + (src.Indirect ? 1 : 0) +
1117 (src.Dimension ? (src.DimIndirect ? 2 : 1) : 0);
1118
1119 union tgsi_any_token *out = get_tokens( ureg, DOMAIN_INSN, size );
1120 unsigned n = 0;
1121
1122 assert(src.File != TGSI_FILE_NULL);
1123 assert(src.File < TGSI_FILE_COUNT);
1124
1125 out[n].value = 0;
1126 out[n].src.File = src.File;
1127 out[n].src.SwizzleX = src.SwizzleX;
1128 out[n].src.SwizzleY = src.SwizzleY;
1129 out[n].src.SwizzleZ = src.SwizzleZ;
1130 out[n].src.SwizzleW = src.SwizzleW;
1131 out[n].src.Index = src.Index;
1132 out[n].src.Negate = src.Negate;
1133 out[0].src.Absolute = src.Absolute;
1134 n++;
1135
1136 if (src.Indirect) {
1137 out[0].src.Indirect = 1;
1138 out[n].value = 0;
1139 out[n].ind.File = src.IndirectFile;
1140 out[n].ind.Swizzle = src.IndirectSwizzle;
1141 out[n].ind.Index = src.IndirectIndex;
1142 if (!ureg->supports_any_inout_decl_range &&
1143 (src.File == TGSI_FILE_INPUT || src.File == TGSI_FILE_OUTPUT))
1144 out[n].ind.ArrayID = 0;
1145 else
1146 out[n].ind.ArrayID = src.ArrayID;
1147 n++;
1148 }
1149
1150 if (src.Dimension) {
1151 out[0].src.Dimension = 1;
1152 out[n].dim.Dimension = 0;
1153 out[n].dim.Padding = 0;
1154 if (src.DimIndirect) {
1155 out[n].dim.Indirect = 1;
1156 out[n].dim.Index = src.DimensionIndex;
1157 n++;
1158 out[n].value = 0;
1159 out[n].ind.File = src.DimIndFile;
1160 out[n].ind.Swizzle = src.DimIndSwizzle;
1161 out[n].ind.Index = src.DimIndIndex;
1162 if (!ureg->supports_any_inout_decl_range &&
1163 (src.File == TGSI_FILE_INPUT || src.File == TGSI_FILE_OUTPUT))
1164 out[n].ind.ArrayID = 0;
1165 else
1166 out[n].ind.ArrayID = src.ArrayID;
1167 } else {
1168 out[n].dim.Indirect = 0;
1169 out[n].dim.Index = src.DimensionIndex;
1170 }
1171 n++;
1172 }
1173
1174 assert(n == size);
1175 }
1176
1177
1178 void
1179 ureg_emit_dst( struct ureg_program *ureg,
1180 struct ureg_dst dst )
1181 {
1182 unsigned size = 1 + (dst.Indirect ? 1 : 0) +
1183 (dst.Dimension ? (dst.DimIndirect ? 2 : 1) : 0);
1184
1185 union tgsi_any_token *out = get_tokens( ureg, DOMAIN_INSN, size );
1186 unsigned n = 0;
1187
1188 assert(dst.File != TGSI_FILE_NULL);
1189 assert(dst.File != TGSI_FILE_SAMPLER);
1190 assert(dst.File != TGSI_FILE_SAMPLER_VIEW);
1191 assert(dst.File != TGSI_FILE_IMMEDIATE);
1192 assert(dst.File < TGSI_FILE_COUNT);
1193
1194 out[n].value = 0;
1195 out[n].dst.File = dst.File;
1196 out[n].dst.WriteMask = dst.WriteMask;
1197 out[n].dst.Indirect = dst.Indirect;
1198 out[n].dst.Index = dst.Index;
1199 n++;
1200
1201 if (dst.Indirect) {
1202 out[n].value = 0;
1203 out[n].ind.File = dst.IndirectFile;
1204 out[n].ind.Swizzle = dst.IndirectSwizzle;
1205 out[n].ind.Index = dst.IndirectIndex;
1206 if (!ureg->supports_any_inout_decl_range &&
1207 (dst.File == TGSI_FILE_INPUT || dst.File == TGSI_FILE_OUTPUT))
1208 out[n].ind.ArrayID = 0;
1209 else
1210 out[n].ind.ArrayID = dst.ArrayID;
1211 n++;
1212 }
1213
1214 if (dst.Dimension) {
1215 out[0].dst.Dimension = 1;
1216 out[n].dim.Dimension = 0;
1217 out[n].dim.Padding = 0;
1218 if (dst.DimIndirect) {
1219 out[n].dim.Indirect = 1;
1220 out[n].dim.Index = dst.DimensionIndex;
1221 n++;
1222 out[n].value = 0;
1223 out[n].ind.File = dst.DimIndFile;
1224 out[n].ind.Swizzle = dst.DimIndSwizzle;
1225 out[n].ind.Index = dst.DimIndIndex;
1226 if (!ureg->supports_any_inout_decl_range &&
1227 (dst.File == TGSI_FILE_INPUT || dst.File == TGSI_FILE_OUTPUT))
1228 out[n].ind.ArrayID = 0;
1229 else
1230 out[n].ind.ArrayID = dst.ArrayID;
1231 } else {
1232 out[n].dim.Indirect = 0;
1233 out[n].dim.Index = dst.DimensionIndex;
1234 }
1235 n++;
1236 }
1237
1238 assert(n == size);
1239 }
1240
1241
1242 static void validate( enum tgsi_opcode opcode,
1243 unsigned nr_dst,
1244 unsigned nr_src )
1245 {
1246 #ifndef NDEBUG
1247 const struct tgsi_opcode_info *info = tgsi_get_opcode_info( opcode );
1248 assert(info);
1249 if (info) {
1250 assert(nr_dst == info->num_dst);
1251 assert(nr_src == info->num_src);
1252 }
1253 #endif
1254 }
1255
1256 struct ureg_emit_insn_result
1257 ureg_emit_insn(struct ureg_program *ureg,
1258 enum tgsi_opcode opcode,
1259 boolean saturate,
1260 unsigned precise,
1261 unsigned num_dst,
1262 unsigned num_src)
1263 {
1264 union tgsi_any_token *out;
1265 uint count = 1;
1266 struct ureg_emit_insn_result result;
1267
1268 validate( opcode, num_dst, num_src );
1269
1270 out = get_tokens( ureg, DOMAIN_INSN, count );
1271 out[0].insn = tgsi_default_instruction();
1272 out[0].insn.Opcode = opcode;
1273 out[0].insn.Saturate = saturate;
1274 out[0].insn.Precise = precise;
1275 out[0].insn.NumDstRegs = num_dst;
1276 out[0].insn.NumSrcRegs = num_src;
1277
1278 result.insn_token = ureg->domain[DOMAIN_INSN].count - count;
1279 result.extended_token = result.insn_token;
1280
1281 ureg->nr_instructions++;
1282
1283 return result;
1284 }
1285
1286
1287 /**
1288 * Emit a label token.
1289 * \param label_token returns a token number indicating where the label
1290 * needs to be patched later. Later, this value should be passed to the
1291 * ureg_fixup_label() function.
1292 */
1293 void
1294 ureg_emit_label(struct ureg_program *ureg,
1295 unsigned extended_token,
1296 unsigned *label_token )
1297 {
1298 union tgsi_any_token *out, *insn;
1299
1300 if (!label_token)
1301 return;
1302
1303 out = get_tokens( ureg, DOMAIN_INSN, 1 );
1304 out[0].value = 0;
1305
1306 insn = retrieve_token( ureg, DOMAIN_INSN, extended_token );
1307 insn->insn.Label = 1;
1308
1309 *label_token = ureg->domain[DOMAIN_INSN].count - 1;
1310 }
1311
1312 /* Will return a number which can be used in a label to point to the
1313 * next instruction to be emitted.
1314 */
1315 unsigned
1316 ureg_get_instruction_number( struct ureg_program *ureg )
1317 {
1318 return ureg->nr_instructions;
1319 }
1320
1321 /* Patch a given label (expressed as a token number) to point to a
1322 * given instruction (expressed as an instruction number).
1323 */
1324 void
1325 ureg_fixup_label(struct ureg_program *ureg,
1326 unsigned label_token,
1327 unsigned instruction_number )
1328 {
1329 union tgsi_any_token *out = retrieve_token( ureg, DOMAIN_INSN, label_token );
1330
1331 out->insn_label.Label = instruction_number;
1332 }
1333
1334
1335 void
1336 ureg_emit_texture(struct ureg_program *ureg,
1337 unsigned extended_token,
1338 enum tgsi_texture_type target,
1339 enum tgsi_return_type return_type, unsigned num_offsets)
1340 {
1341 union tgsi_any_token *out, *insn;
1342
1343 out = get_tokens( ureg, DOMAIN_INSN, 1 );
1344 insn = retrieve_token( ureg, DOMAIN_INSN, extended_token );
1345
1346 insn->insn.Texture = 1;
1347
1348 out[0].value = 0;
1349 out[0].insn_texture.Texture = target;
1350 out[0].insn_texture.NumOffsets = num_offsets;
1351 out[0].insn_texture.ReturnType = return_type;
1352 }
1353
1354 void
1355 ureg_emit_texture_offset(struct ureg_program *ureg,
1356 const struct tgsi_texture_offset *offset)
1357 {
1358 union tgsi_any_token *out;
1359
1360 out = get_tokens( ureg, DOMAIN_INSN, 1);
1361
1362 out[0].value = 0;
1363 out[0].insn_texture_offset = *offset;
1364 }
1365
1366 void
1367 ureg_emit_memory(struct ureg_program *ureg,
1368 unsigned extended_token,
1369 unsigned qualifier,
1370 enum tgsi_texture_type texture,
1371 enum pipe_format format)
1372 {
1373 union tgsi_any_token *out, *insn;
1374
1375 out = get_tokens( ureg, DOMAIN_INSN, 1 );
1376 insn = retrieve_token( ureg, DOMAIN_INSN, extended_token );
1377
1378 insn->insn.Memory = 1;
1379
1380 out[0].value = 0;
1381 out[0].insn_memory.Qualifier = qualifier;
1382 out[0].insn_memory.Texture = texture;
1383 out[0].insn_memory.Format = format;
1384 }
1385
1386 void
1387 ureg_fixup_insn_size(struct ureg_program *ureg,
1388 unsigned insn )
1389 {
1390 union tgsi_any_token *out = retrieve_token( ureg, DOMAIN_INSN, insn );
1391
1392 assert(out->insn.Type == TGSI_TOKEN_TYPE_INSTRUCTION);
1393 out->insn.NrTokens = ureg->domain[DOMAIN_INSN].count - insn - 1;
1394 }
1395
1396
1397 void
1398 ureg_insn(struct ureg_program *ureg,
1399 enum tgsi_opcode opcode,
1400 const struct ureg_dst *dst,
1401 unsigned nr_dst,
1402 const struct ureg_src *src,
1403 unsigned nr_src,
1404 unsigned precise )
1405 {
1406 struct ureg_emit_insn_result insn;
1407 unsigned i;
1408 boolean saturate;
1409
1410 if (nr_dst && ureg_dst_is_empty(dst[0])) {
1411 return;
1412 }
1413
1414 saturate = nr_dst ? dst[0].Saturate : FALSE;
1415
1416 insn = ureg_emit_insn(ureg,
1417 opcode,
1418 saturate,
1419 precise,
1420 nr_dst,
1421 nr_src);
1422
1423 for (i = 0; i < nr_dst; i++)
1424 ureg_emit_dst( ureg, dst[i] );
1425
1426 for (i = 0; i < nr_src; i++)
1427 ureg_emit_src( ureg, src[i] );
1428
1429 ureg_fixup_insn_size( ureg, insn.insn_token );
1430 }
1431
1432 void
1433 ureg_tex_insn(struct ureg_program *ureg,
1434 enum tgsi_opcode opcode,
1435 const struct ureg_dst *dst,
1436 unsigned nr_dst,
1437 enum tgsi_texture_type target,
1438 enum tgsi_return_type return_type,
1439 const struct tgsi_texture_offset *texoffsets,
1440 unsigned nr_offset,
1441 const struct ureg_src *src,
1442 unsigned nr_src )
1443 {
1444 struct ureg_emit_insn_result insn;
1445 unsigned i;
1446 boolean saturate;
1447
1448 if (nr_dst && ureg_dst_is_empty(dst[0])) {
1449 return;
1450 }
1451
1452 saturate = nr_dst ? dst[0].Saturate : FALSE;
1453
1454 insn = ureg_emit_insn(ureg,
1455 opcode,
1456 saturate,
1457 0,
1458 nr_dst,
1459 nr_src);
1460
1461 ureg_emit_texture( ureg, insn.extended_token, target, return_type,
1462 nr_offset );
1463
1464 for (i = 0; i < nr_offset; i++)
1465 ureg_emit_texture_offset( ureg, &texoffsets[i]);
1466
1467 for (i = 0; i < nr_dst; i++)
1468 ureg_emit_dst( ureg, dst[i] );
1469
1470 for (i = 0; i < nr_src; i++)
1471 ureg_emit_src( ureg, src[i] );
1472
1473 ureg_fixup_insn_size( ureg, insn.insn_token );
1474 }
1475
1476
1477 void
1478 ureg_memory_insn(struct ureg_program *ureg,
1479 enum tgsi_opcode opcode,
1480 const struct ureg_dst *dst,
1481 unsigned nr_dst,
1482 const struct ureg_src *src,
1483 unsigned nr_src,
1484 unsigned qualifier,
1485 enum tgsi_texture_type texture,
1486 enum pipe_format format)
1487 {
1488 struct ureg_emit_insn_result insn;
1489 unsigned i;
1490
1491 insn = ureg_emit_insn(ureg,
1492 opcode,
1493 FALSE,
1494 0,
1495 nr_dst,
1496 nr_src);
1497
1498 ureg_emit_memory(ureg, insn.extended_token, qualifier, texture, format);
1499
1500 for (i = 0; i < nr_dst; i++)
1501 ureg_emit_dst(ureg, dst[i]);
1502
1503 for (i = 0; i < nr_src; i++)
1504 ureg_emit_src(ureg, src[i]);
1505
1506 ureg_fixup_insn_size(ureg, insn.insn_token);
1507 }
1508
1509
1510 static void
1511 emit_decl_semantic(struct ureg_program *ureg,
1512 unsigned file,
1513 unsigned first,
1514 unsigned last,
1515 enum tgsi_semantic semantic_name,
1516 unsigned semantic_index,
1517 unsigned streams,
1518 unsigned usage_mask,
1519 unsigned array_id,
1520 boolean invariant)
1521 {
1522 union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, array_id ? 4 : 3);
1523
1524 out[0].value = 0;
1525 out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1526 out[0].decl.NrTokens = 3;
1527 out[0].decl.File = file;
1528 out[0].decl.UsageMask = usage_mask;
1529 out[0].decl.Semantic = 1;
1530 out[0].decl.Array = array_id != 0;
1531 out[0].decl.Invariant = invariant;
1532
1533 out[1].value = 0;
1534 out[1].decl_range.First = first;
1535 out[1].decl_range.Last = last;
1536
1537 out[2].value = 0;
1538 out[2].decl_semantic.Name = semantic_name;
1539 out[2].decl_semantic.Index = semantic_index;
1540 out[2].decl_semantic.StreamX = streams & 3;
1541 out[2].decl_semantic.StreamY = (streams >> 2) & 3;
1542 out[2].decl_semantic.StreamZ = (streams >> 4) & 3;
1543 out[2].decl_semantic.StreamW = (streams >> 6) & 3;
1544
1545 if (array_id) {
1546 out[3].value = 0;
1547 out[3].array.ArrayID = array_id;
1548 }
1549 }
1550
1551 static void
1552 emit_decl_atomic_2d(struct ureg_program *ureg,
1553 unsigned first,
1554 unsigned last,
1555 unsigned index2D,
1556 unsigned array_id)
1557 {
1558 union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, array_id ? 4 : 3);
1559
1560 out[0].value = 0;
1561 out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1562 out[0].decl.NrTokens = 3;
1563 out[0].decl.File = TGSI_FILE_HW_ATOMIC;
1564 out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
1565 out[0].decl.Dimension = 1;
1566 out[0].decl.Array = array_id != 0;
1567
1568 out[1].value = 0;
1569 out[1].decl_range.First = first;
1570 out[1].decl_range.Last = last;
1571
1572 out[2].value = 0;
1573 out[2].decl_dim.Index2D = index2D;
1574
1575 if (array_id) {
1576 out[3].value = 0;
1577 out[3].array.ArrayID = array_id;
1578 }
1579 }
1580
1581 static void
1582 emit_decl_fs(struct ureg_program *ureg,
1583 unsigned file,
1584 unsigned first,
1585 unsigned last,
1586 enum tgsi_semantic semantic_name,
1587 unsigned semantic_index,
1588 enum tgsi_interpolate_mode interpolate,
1589 unsigned cylindrical_wrap,
1590 enum tgsi_interpolate_loc interpolate_location,
1591 unsigned array_id,
1592 unsigned usage_mask)
1593 {
1594 union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL,
1595 array_id ? 5 : 4);
1596
1597 out[0].value = 0;
1598 out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1599 out[0].decl.NrTokens = 4;
1600 out[0].decl.File = file;
1601 out[0].decl.UsageMask = usage_mask;
1602 out[0].decl.Interpolate = 1;
1603 out[0].decl.Semantic = 1;
1604 out[0].decl.Array = array_id != 0;
1605
1606 out[1].value = 0;
1607 out[1].decl_range.First = first;
1608 out[1].decl_range.Last = last;
1609
1610 out[2].value = 0;
1611 out[2].decl_interp.Interpolate = interpolate;
1612 out[2].decl_interp.CylindricalWrap = cylindrical_wrap;
1613 out[2].decl_interp.Location = interpolate_location;
1614
1615 out[3].value = 0;
1616 out[3].decl_semantic.Name = semantic_name;
1617 out[3].decl_semantic.Index = semantic_index;
1618
1619 if (array_id) {
1620 out[4].value = 0;
1621 out[4].array.ArrayID = array_id;
1622 }
1623 }
1624
1625 static void
1626 emit_decl_temps( struct ureg_program *ureg,
1627 unsigned first, unsigned last,
1628 boolean local,
1629 unsigned arrayid )
1630 {
1631 union tgsi_any_token *out = get_tokens( ureg, DOMAIN_DECL,
1632 arrayid ? 3 : 2 );
1633
1634 out[0].value = 0;
1635 out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1636 out[0].decl.NrTokens = 2;
1637 out[0].decl.File = TGSI_FILE_TEMPORARY;
1638 out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
1639 out[0].decl.Local = local;
1640
1641 out[1].value = 0;
1642 out[1].decl_range.First = first;
1643 out[1].decl_range.Last = last;
1644
1645 if (arrayid) {
1646 out[0].decl.Array = 1;
1647 out[2].value = 0;
1648 out[2].array.ArrayID = arrayid;
1649 }
1650 }
1651
1652 static void emit_decl_range( struct ureg_program *ureg,
1653 unsigned file,
1654 unsigned first,
1655 unsigned count )
1656 {
1657 union tgsi_any_token *out = get_tokens( ureg, DOMAIN_DECL, 2 );
1658
1659 out[0].value = 0;
1660 out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1661 out[0].decl.NrTokens = 2;
1662 out[0].decl.File = file;
1663 out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
1664 out[0].decl.Semantic = 0;
1665
1666 out[1].value = 0;
1667 out[1].decl_range.First = first;
1668 out[1].decl_range.Last = first + count - 1;
1669 }
1670
1671 static void
1672 emit_decl_range2D(struct ureg_program *ureg,
1673 unsigned file,
1674 unsigned first,
1675 unsigned last,
1676 unsigned index2D)
1677 {
1678 union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, 3);
1679
1680 out[0].value = 0;
1681 out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1682 out[0].decl.NrTokens = 3;
1683 out[0].decl.File = file;
1684 out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
1685 out[0].decl.Dimension = 1;
1686
1687 out[1].value = 0;
1688 out[1].decl_range.First = first;
1689 out[1].decl_range.Last = last;
1690
1691 out[2].value = 0;
1692 out[2].decl_dim.Index2D = index2D;
1693 }
1694
1695 static void
1696 emit_decl_sampler_view(struct ureg_program *ureg,
1697 unsigned index,
1698 enum tgsi_texture_type target,
1699 enum tgsi_return_type return_type_x,
1700 enum tgsi_return_type return_type_y,
1701 enum tgsi_return_type return_type_z,
1702 enum tgsi_return_type return_type_w )
1703 {
1704 union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, 3);
1705
1706 out[0].value = 0;
1707 out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1708 out[0].decl.NrTokens = 3;
1709 out[0].decl.File = TGSI_FILE_SAMPLER_VIEW;
1710 out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
1711
1712 out[1].value = 0;
1713 out[1].decl_range.First = index;
1714 out[1].decl_range.Last = index;
1715
1716 out[2].value = 0;
1717 out[2].decl_sampler_view.Resource = target;
1718 out[2].decl_sampler_view.ReturnTypeX = return_type_x;
1719 out[2].decl_sampler_view.ReturnTypeY = return_type_y;
1720 out[2].decl_sampler_view.ReturnTypeZ = return_type_z;
1721 out[2].decl_sampler_view.ReturnTypeW = return_type_w;
1722 }
1723
1724 static void
1725 emit_decl_image(struct ureg_program *ureg,
1726 unsigned index,
1727 enum tgsi_texture_type target,
1728 enum pipe_format format,
1729 boolean wr,
1730 boolean raw)
1731 {
1732 union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, 3);
1733
1734 out[0].value = 0;
1735 out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1736 out[0].decl.NrTokens = 3;
1737 out[0].decl.File = TGSI_FILE_IMAGE;
1738 out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
1739
1740 out[1].value = 0;
1741 out[1].decl_range.First = index;
1742 out[1].decl_range.Last = index;
1743
1744 out[2].value = 0;
1745 out[2].decl_image.Resource = target;
1746 out[2].decl_image.Writable = wr;
1747 out[2].decl_image.Raw = raw;
1748 out[2].decl_image.Format = format;
1749 }
1750
1751 static void
1752 emit_decl_buffer(struct ureg_program *ureg,
1753 unsigned index,
1754 bool atomic)
1755 {
1756 union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, 2);
1757
1758 out[0].value = 0;
1759 out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1760 out[0].decl.NrTokens = 2;
1761 out[0].decl.File = TGSI_FILE_BUFFER;
1762 out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
1763 out[0].decl.Atomic = atomic;
1764
1765 out[1].value = 0;
1766 out[1].decl_range.First = index;
1767 out[1].decl_range.Last = index;
1768 }
1769
1770 static void
1771 emit_decl_memory(struct ureg_program *ureg, unsigned memory_type)
1772 {
1773 union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, 2);
1774
1775 out[0].value = 0;
1776 out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
1777 out[0].decl.NrTokens = 2;
1778 out[0].decl.File = TGSI_FILE_MEMORY;
1779 out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW;
1780 out[0].decl.MemType = memory_type;
1781
1782 out[1].value = 0;
1783 out[1].decl_range.First = memory_type;
1784 out[1].decl_range.Last = memory_type;
1785 }
1786
1787 static void
1788 emit_immediate( struct ureg_program *ureg,
1789 const unsigned *v,
1790 unsigned type )
1791 {
1792 union tgsi_any_token *out = get_tokens( ureg, DOMAIN_DECL, 5 );
1793
1794 out[0].value = 0;
1795 out[0].imm.Type = TGSI_TOKEN_TYPE_IMMEDIATE;
1796 out[0].imm.NrTokens = 5;
1797 out[0].imm.DataType = type;
1798 out[0].imm.Padding = 0;
1799
1800 out[1].imm_data.Uint = v[0];
1801 out[2].imm_data.Uint = v[1];
1802 out[3].imm_data.Uint = v[2];
1803 out[4].imm_data.Uint = v[3];
1804 }
1805
1806 static void
1807 emit_property(struct ureg_program *ureg,
1808 unsigned name,
1809 unsigned data)
1810 {
1811 union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, 2);
1812
1813 out[0].value = 0;
1814 out[0].prop.Type = TGSI_TOKEN_TYPE_PROPERTY;
1815 out[0].prop.NrTokens = 2;
1816 out[0].prop.PropertyName = name;
1817
1818 out[1].prop_data.Data = data;
1819 }
1820
1821
1822 static void emit_decls( struct ureg_program *ureg )
1823 {
1824 unsigned i,j;
1825
1826 for (i = 0; i < ARRAY_SIZE(ureg->properties); i++)
1827 if (ureg->properties[i] != ~0u)
1828 emit_property(ureg, i, ureg->properties[i]);
1829
1830 if (ureg->processor == PIPE_SHADER_VERTEX) {
1831 for (i = 0; i < PIPE_MAX_ATTRIBS; i++) {
1832 if (ureg->vs_inputs[i/32] & (1u << (i%32))) {
1833 emit_decl_range( ureg, TGSI_FILE_INPUT, i, 1 );
1834 }
1835 }
1836 } else if (ureg->processor == PIPE_SHADER_FRAGMENT) {
1837 if (ureg->supports_any_inout_decl_range) {
1838 for (i = 0; i < ureg->nr_inputs; i++) {
1839 emit_decl_fs(ureg,
1840 TGSI_FILE_INPUT,
1841 ureg->input[i].first,
1842 ureg->input[i].last,
1843 ureg->input[i].semantic_name,
1844 ureg->input[i].semantic_index,
1845 ureg->input[i].interp,
1846 ureg->input[i].cylindrical_wrap,
1847 ureg->input[i].interp_location,
1848 ureg->input[i].array_id,
1849 ureg->input[i].usage_mask);
1850 }
1851 }
1852 else {
1853 for (i = 0; i < ureg->nr_inputs; i++) {
1854 for (j = ureg->input[i].first; j <= ureg->input[i].last; j++) {
1855 emit_decl_fs(ureg,
1856 TGSI_FILE_INPUT,
1857 j, j,
1858 ureg->input[i].semantic_name,
1859 ureg->input[i].semantic_index +
1860 (j - ureg->input[i].first),
1861 ureg->input[i].interp,
1862 ureg->input[i].cylindrical_wrap,
1863 ureg->input[i].interp_location, 0,
1864 ureg->input[i].usage_mask);
1865 }
1866 }
1867 }
1868 } else {
1869 if (ureg->supports_any_inout_decl_range) {
1870 for (i = 0; i < ureg->nr_inputs; i++) {
1871 emit_decl_semantic(ureg,
1872 TGSI_FILE_INPUT,
1873 ureg->input[i].first,
1874 ureg->input[i].last,
1875 ureg->input[i].semantic_name,
1876 ureg->input[i].semantic_index,
1877 0,
1878 TGSI_WRITEMASK_XYZW,
1879 ureg->input[i].array_id,
1880 FALSE);
1881 }
1882 }
1883 else {
1884 for (i = 0; i < ureg->nr_inputs; i++) {
1885 for (j = ureg->input[i].first; j <= ureg->input[i].last; j++) {
1886 emit_decl_semantic(ureg,
1887 TGSI_FILE_INPUT,
1888 j, j,
1889 ureg->input[i].semantic_name,
1890 ureg->input[i].semantic_index +
1891 (j - ureg->input[i].first),
1892 0,
1893 TGSI_WRITEMASK_XYZW, 0, FALSE);
1894 }
1895 }
1896 }
1897 }
1898
1899 for (i = 0; i < ureg->nr_system_values; i++) {
1900 emit_decl_semantic(ureg,
1901 TGSI_FILE_SYSTEM_VALUE,
1902 i,
1903 i,
1904 ureg->system_value[i].semantic_name,
1905 ureg->system_value[i].semantic_index,
1906 0,
1907 TGSI_WRITEMASK_XYZW, 0, FALSE);
1908 }
1909
1910 if (ureg->supports_any_inout_decl_range) {
1911 for (i = 0; i < ureg->nr_outputs; i++) {
1912 emit_decl_semantic(ureg,
1913 TGSI_FILE_OUTPUT,
1914 ureg->output[i].first,
1915 ureg->output[i].last,
1916 ureg->output[i].semantic_name,
1917 ureg->output[i].semantic_index,
1918 ureg->output[i].streams,
1919 ureg->output[i].usage_mask,
1920 ureg->output[i].array_id,
1921 ureg->output[i].invariant);
1922 }
1923 }
1924 else {
1925 for (i = 0; i < ureg->nr_outputs; i++) {
1926 for (j = ureg->output[i].first; j <= ureg->output[i].last; j++) {
1927 emit_decl_semantic(ureg,
1928 TGSI_FILE_OUTPUT,
1929 j, j,
1930 ureg->output[i].semantic_name,
1931 ureg->output[i].semantic_index +
1932 (j - ureg->output[i].first),
1933 ureg->output[i].streams,
1934 ureg->output[i].usage_mask,
1935 0,
1936 ureg->output[i].invariant);
1937 }
1938 }
1939 }
1940
1941 for (i = 0; i < ureg->nr_samplers; i++) {
1942 emit_decl_range( ureg,
1943 TGSI_FILE_SAMPLER,
1944 ureg->sampler[i].Index, 1 );
1945 }
1946
1947 for (i = 0; i < ureg->nr_sampler_views; i++) {
1948 emit_decl_sampler_view(ureg,
1949 ureg->sampler_view[i].index,
1950 ureg->sampler_view[i].target,
1951 ureg->sampler_view[i].return_type_x,
1952 ureg->sampler_view[i].return_type_y,
1953 ureg->sampler_view[i].return_type_z,
1954 ureg->sampler_view[i].return_type_w);
1955 }
1956
1957 for (i = 0; i < ureg->nr_images; i++) {
1958 emit_decl_image(ureg,
1959 ureg->image[i].index,
1960 ureg->image[i].target,
1961 ureg->image[i].format,
1962 ureg->image[i].wr,
1963 ureg->image[i].raw);
1964 }
1965
1966 for (i = 0; i < ureg->nr_buffers; i++) {
1967 emit_decl_buffer(ureg, ureg->buffer[i].index, ureg->buffer[i].atomic);
1968 }
1969
1970 for (i = 0; i < TGSI_MEMORY_TYPE_COUNT; i++) {
1971 if (ureg->use_memory[i])
1972 emit_decl_memory(ureg, i);
1973 }
1974
1975 for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
1976 struct const_decl *decl = &ureg->const_decls[i];
1977
1978 if (decl->nr_constant_ranges) {
1979 uint j;
1980
1981 for (j = 0; j < decl->nr_constant_ranges; j++) {
1982 emit_decl_range2D(ureg,
1983 TGSI_FILE_CONSTANT,
1984 decl->constant_range[j].first,
1985 decl->constant_range[j].last,
1986 i);
1987 }
1988 }
1989 }
1990
1991 for (i = 0; i < PIPE_MAX_HW_ATOMIC_BUFFERS; i++) {
1992 struct hw_atomic_decl *decl = &ureg->hw_atomic_decls[i];
1993
1994 if (decl->nr_hw_atomic_ranges) {
1995 uint j;
1996
1997 for (j = 0; j < decl->nr_hw_atomic_ranges; j++) {
1998 emit_decl_atomic_2d(ureg,
1999 decl->hw_atomic_range[j].first,
2000 decl->hw_atomic_range[j].last,
2001 i,
2002 decl->hw_atomic_range[j].array_id);
2003 }
2004 }
2005 }
2006
2007 if (ureg->nr_temps) {
2008 unsigned array = 0;
2009 for (i = 0; i < ureg->nr_temps;) {
2010 boolean local = util_bitmask_get(ureg->local_temps, i);
2011 unsigned first = i;
2012 i = util_bitmask_get_next_index(ureg->decl_temps, i + 1);
2013 if (i == UTIL_BITMASK_INVALID_INDEX)
2014 i = ureg->nr_temps;
2015
2016 if (array < ureg->nr_array_temps && ureg->array_temps[array] == first)
2017 emit_decl_temps( ureg, first, i - 1, local, ++array );
2018 else
2019 emit_decl_temps( ureg, first, i - 1, local, 0 );
2020 }
2021 }
2022
2023 if (ureg->nr_addrs) {
2024 emit_decl_range( ureg,
2025 TGSI_FILE_ADDRESS,
2026 0, ureg->nr_addrs );
2027 }
2028
2029 for (i = 0; i < ureg->nr_immediates; i++) {
2030 emit_immediate( ureg,
2031 ureg->immediate[i].value.u,
2032 ureg->immediate[i].type );
2033 }
2034 }
2035
2036 /* Append the instruction tokens onto the declarations to build a
2037 * contiguous stream suitable to send to the driver.
2038 */
2039 static void copy_instructions( struct ureg_program *ureg )
2040 {
2041 unsigned nr_tokens = ureg->domain[DOMAIN_INSN].count;
2042 union tgsi_any_token *out = get_tokens( ureg,
2043 DOMAIN_DECL,
2044 nr_tokens );
2045
2046 memcpy(out,
2047 ureg->domain[DOMAIN_INSN].tokens,
2048 nr_tokens * sizeof out[0] );
2049 }
2050
2051
2052 static void
2053 fixup_header_size(struct ureg_program *ureg)
2054 {
2055 union tgsi_any_token *out = retrieve_token( ureg, DOMAIN_DECL, 0 );
2056
2057 out->header.BodySize = ureg->domain[DOMAIN_DECL].count - 2;
2058 }
2059
2060
2061 static void
2062 emit_header( struct ureg_program *ureg )
2063 {
2064 union tgsi_any_token *out = get_tokens( ureg, DOMAIN_DECL, 2 );
2065
2066 out[0].header.HeaderSize = 2;
2067 out[0].header.BodySize = 0;
2068
2069 out[1].processor.Processor = ureg->processor;
2070 out[1].processor.Padding = 0;
2071 }
2072
2073
2074 const struct tgsi_token *ureg_finalize( struct ureg_program *ureg )
2075 {
2076 const struct tgsi_token *tokens;
2077
2078 switch (ureg->processor) {
2079 case PIPE_SHADER_VERTEX:
2080 case PIPE_SHADER_TESS_EVAL:
2081 ureg_property(ureg, TGSI_PROPERTY_NEXT_SHADER,
2082 ureg->next_shader_processor == -1 ?
2083 PIPE_SHADER_FRAGMENT :
2084 ureg->next_shader_processor);
2085 break;
2086 default:
2087 ; /* nothing */
2088 }
2089
2090 emit_header( ureg );
2091 emit_decls( ureg );
2092 copy_instructions( ureg );
2093 fixup_header_size( ureg );
2094
2095 if (ureg->domain[0].tokens == error_tokens ||
2096 ureg->domain[1].tokens == error_tokens) {
2097 debug_printf("%s: error in generated shader\n", __FUNCTION__);
2098 assert(0);
2099 return NULL;
2100 }
2101
2102 tokens = &ureg->domain[DOMAIN_DECL].tokens[0].token;
2103
2104 if (0) {
2105 debug_printf("%s: emitted shader %d tokens:\n", __FUNCTION__,
2106 ureg->domain[DOMAIN_DECL].count);
2107 tgsi_dump( tokens, 0 );
2108 }
2109
2110 #if DEBUG
2111 /* tgsi_sanity doesn't seem to return if there are too many constants. */
2112 bool too_many_constants = false;
2113 for (unsigned i = 0; i < ARRAY_SIZE(ureg->const_decls); i++) {
2114 for (unsigned j = 0; j < ureg->const_decls[i].nr_constant_ranges; j++) {
2115 if (ureg->const_decls[i].constant_range[j].last > 4096) {
2116 too_many_constants = true;
2117 break;
2118 }
2119 }
2120 }
2121
2122 if (tokens && !too_many_constants && !tgsi_sanity_check(tokens)) {
2123 debug_printf("tgsi_ureg.c, sanity check failed on generated tokens:\n");
2124 tgsi_dump(tokens, 0);
2125 assert(0);
2126 }
2127 #endif
2128
2129
2130 return tokens;
2131 }
2132
2133
2134 void *ureg_create_shader( struct ureg_program *ureg,
2135 struct pipe_context *pipe,
2136 const struct pipe_stream_output_info *so )
2137 {
2138 struct pipe_shader_state state = {0};
2139
2140 pipe_shader_state_from_tgsi(&state, ureg_finalize(ureg));
2141 if(!state.tokens)
2142 return NULL;
2143
2144 if (so)
2145 state.stream_output = *so;
2146
2147 switch (ureg->processor) {
2148 case PIPE_SHADER_VERTEX:
2149 return pipe->create_vs_state(pipe, &state);
2150 case PIPE_SHADER_TESS_CTRL:
2151 return pipe->create_tcs_state(pipe, &state);
2152 case PIPE_SHADER_TESS_EVAL:
2153 return pipe->create_tes_state(pipe, &state);
2154 case PIPE_SHADER_GEOMETRY:
2155 return pipe->create_gs_state(pipe, &state);
2156 case PIPE_SHADER_FRAGMENT:
2157 return pipe->create_fs_state(pipe, &state);
2158 default:
2159 return NULL;
2160 }
2161 }
2162
2163
2164 const struct tgsi_token *ureg_get_tokens( struct ureg_program *ureg,
2165 unsigned *nr_tokens )
2166 {
2167 const struct tgsi_token *tokens;
2168
2169 ureg_finalize(ureg);
2170
2171 tokens = &ureg->domain[DOMAIN_DECL].tokens[0].token;
2172
2173 if (nr_tokens)
2174 *nr_tokens = ureg->domain[DOMAIN_DECL].count;
2175
2176 ureg->domain[DOMAIN_DECL].tokens = 0;
2177 ureg->domain[DOMAIN_DECL].size = 0;
2178 ureg->domain[DOMAIN_DECL].order = 0;
2179 ureg->domain[DOMAIN_DECL].count = 0;
2180
2181 return tokens;
2182 }
2183
2184
2185 void ureg_free_tokens( const struct tgsi_token *tokens )
2186 {
2187 FREE((struct tgsi_token *)tokens);
2188 }
2189
2190
2191 struct ureg_program *
2192 ureg_create(enum pipe_shader_type processor)
2193 {
2194 return ureg_create_with_screen(processor, NULL);
2195 }
2196
2197
2198 struct ureg_program *
2199 ureg_create_with_screen(enum pipe_shader_type processor,
2200 struct pipe_screen *screen)
2201 {
2202 uint i;
2203 struct ureg_program *ureg = CALLOC_STRUCT( ureg_program );
2204 if (!ureg)
2205 goto no_ureg;
2206
2207 ureg->processor = processor;
2208 ureg->supports_any_inout_decl_range =
2209 screen &&
2210 screen->get_shader_param(screen, processor,
2211 PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE) != 0;
2212 ureg->next_shader_processor = -1;
2213
2214 for (i = 0; i < ARRAY_SIZE(ureg->properties); i++)
2215 ureg->properties[i] = ~0;
2216
2217 ureg->free_temps = util_bitmask_create();
2218 if (ureg->free_temps == NULL)
2219 goto no_free_temps;
2220
2221 ureg->local_temps = util_bitmask_create();
2222 if (ureg->local_temps == NULL)
2223 goto no_local_temps;
2224
2225 ureg->decl_temps = util_bitmask_create();
2226 if (ureg->decl_temps == NULL)
2227 goto no_decl_temps;
2228
2229 return ureg;
2230
2231 no_decl_temps:
2232 util_bitmask_destroy(ureg->local_temps);
2233 no_local_temps:
2234 util_bitmask_destroy(ureg->free_temps);
2235 no_free_temps:
2236 FREE(ureg);
2237 no_ureg:
2238 return NULL;
2239 }
2240
2241
2242 void
2243 ureg_set_next_shader_processor(struct ureg_program *ureg, unsigned processor)
2244 {
2245 ureg->next_shader_processor = processor;
2246 }
2247
2248
2249 unsigned
2250 ureg_get_nr_outputs( const struct ureg_program *ureg )
2251 {
2252 if (!ureg)
2253 return 0;
2254 return ureg->nr_outputs;
2255 }
2256
2257 static void
2258 ureg_setup_clipdist_info(struct ureg_program *ureg,
2259 const struct shader_info *info)
2260 {
2261 if (info->clip_distance_array_size)
2262 ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
2263 info->clip_distance_array_size);
2264 if (info->cull_distance_array_size)
2265 ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
2266 info->cull_distance_array_size);
2267 }
2268
2269 static void
2270 ureg_setup_tess_ctrl_shader(struct ureg_program *ureg,
2271 const struct shader_info *info)
2272 {
2273 ureg_property(ureg, TGSI_PROPERTY_TCS_VERTICES_OUT,
2274 info->tess.tcs_vertices_out);
2275 }
2276
2277 static void
2278 ureg_setup_tess_eval_shader(struct ureg_program *ureg,
2279 const struct shader_info *info)
2280 {
2281 if (info->tess.primitive_mode == GL_ISOLINES)
2282 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE, GL_LINES);
2283 else
2284 ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE,
2285 info->tess.primitive_mode);
2286
2287 STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
2288 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
2289 PIPE_TESS_SPACING_FRACTIONAL_ODD);
2290 STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
2291 PIPE_TESS_SPACING_FRACTIONAL_EVEN);
2292
2293 ureg_property(ureg, TGSI_PROPERTY_TES_SPACING,
2294 (info->tess.spacing + 1) % 3);
2295
2296 ureg_property(ureg, TGSI_PROPERTY_TES_VERTEX_ORDER_CW,
2297 !info->tess.ccw);
2298 ureg_property(ureg, TGSI_PROPERTY_TES_POINT_MODE,
2299 info->tess.point_mode);
2300 }
2301
2302 static void
2303 ureg_setup_geometry_shader(struct ureg_program *ureg,
2304 const struct shader_info *info)
2305 {
2306 ureg_property(ureg, TGSI_PROPERTY_GS_INPUT_PRIM,
2307 info->gs.input_primitive);
2308 ureg_property(ureg, TGSI_PROPERTY_GS_OUTPUT_PRIM,
2309 info->gs.output_primitive);
2310 ureg_property(ureg, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES,
2311 info->gs.vertices_out);
2312 ureg_property(ureg, TGSI_PROPERTY_GS_INVOCATIONS,
2313 info->gs.invocations);
2314 }
2315
2316 static void
2317 ureg_setup_fragment_shader(struct ureg_program *ureg,
2318 const struct shader_info *info)
2319 {
2320 if (info->fs.early_fragment_tests || info->fs.post_depth_coverage) {
2321 ureg_property(ureg, TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL, 1);
2322
2323 if (info->fs.post_depth_coverage)
2324 ureg_property(ureg, TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE, 1);
2325 }
2326
2327 if (info->fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
2328 switch (info->fs.depth_layout) {
2329 case FRAG_DEPTH_LAYOUT_ANY:
2330 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
2331 TGSI_FS_DEPTH_LAYOUT_ANY);
2332 break;
2333 case FRAG_DEPTH_LAYOUT_GREATER:
2334 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
2335 TGSI_FS_DEPTH_LAYOUT_GREATER);
2336 break;
2337 case FRAG_DEPTH_LAYOUT_LESS:
2338 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
2339 TGSI_FS_DEPTH_LAYOUT_LESS);
2340 break;
2341 case FRAG_DEPTH_LAYOUT_UNCHANGED:
2342 ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
2343 TGSI_FS_DEPTH_LAYOUT_UNCHANGED);
2344 break;
2345 default:
2346 assert(0);
2347 }
2348 }
2349 }
2350
2351 static void
2352 ureg_setup_compute_shader(struct ureg_program *ureg,
2353 const struct shader_info *info)
2354 {
2355 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH,
2356 info->cs.local_size[0]);
2357 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT,
2358 info->cs.local_size[1]);
2359 ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH,
2360 info->cs.local_size[2]);
2361
2362 if (info->cs.shared_size)
2363 ureg_DECL_memory(ureg, TGSI_MEMORY_TYPE_SHARED);
2364 }
2365
2366 void
2367 ureg_setup_shader_info(struct ureg_program *ureg,
2368 const struct shader_info *info)
2369 {
2370 if (info->layer_viewport_relative)
2371 ureg_property(ureg, TGSI_PROPERTY_LAYER_VIEWPORT_RELATIVE, 1);
2372
2373 switch (info->stage) {
2374 case MESA_SHADER_VERTEX:
2375 ureg_setup_clipdist_info(ureg, info);
2376 break;
2377 case MESA_SHADER_TESS_CTRL:
2378 ureg_setup_tess_ctrl_shader(ureg, info);
2379 break;
2380 case MESA_SHADER_TESS_EVAL:
2381 ureg_setup_tess_eval_shader(ureg, info);
2382 ureg_setup_clipdist_info(ureg, info);
2383 ureg_set_next_shader_processor(ureg, pipe_shader_type_from_mesa(info->next_stage));
2384 break;
2385 case MESA_SHADER_GEOMETRY:
2386 ureg_setup_geometry_shader(ureg, info);
2387 ureg_setup_clipdist_info(ureg, info);
2388 break;
2389 case MESA_SHADER_FRAGMENT:
2390 ureg_setup_fragment_shader(ureg, info);
2391 break;
2392 case MESA_SHADER_COMPUTE:
2393 ureg_setup_compute_shader(ureg, info);
2394 break;
2395 default:
2396 break;
2397 }
2398 }
2399
2400
2401 void ureg_destroy( struct ureg_program *ureg )
2402 {
2403 unsigned i;
2404
2405 for (i = 0; i < ARRAY_SIZE(ureg->domain); i++) {
2406 if (ureg->domain[i].tokens &&
2407 ureg->domain[i].tokens != error_tokens)
2408 FREE(ureg->domain[i].tokens);
2409 }
2410
2411 util_bitmask_destroy(ureg->free_temps);
2412 util_bitmask_destroy(ureg->local_temps);
2413 util_bitmask_destroy(ureg->decl_temps);
2414
2415 FREE(ureg);
2416 }