2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "pipe/p_state.h"
27 #include "util/u_format.h"
28 #include "util/u_hash.h"
29 #include "util/u_memory.h"
30 #include "util/u_pack_color.h"
31 #include "util/format_srgb.h"
32 #include "util/ralloc.h"
33 #include "util/hash_table.h"
34 #include "tgsi/tgsi_dump.h"
35 #include "tgsi/tgsi_info.h"
36 #include "tgsi/tgsi_lowering.h"
38 #include "vc4_context.h"
41 #ifdef USE_VC4_SIMULATOR
42 #include "simpenrose/simpenrose.h"
46 struct vc4_uncompiled_shader
*shader_state
;
48 enum pipe_format format
;
49 unsigned compare_mode
:1;
50 unsigned compare_func
:3;
54 } tex
[VC4_MAX_TEXTURE_SAMPLERS
];
60 enum pipe_format color_format
;
64 bool stencil_full_writemasks
;
68 bool point_coord_upper_left
;
70 uint8_t alpha_test_func
;
72 uint32_t point_sprite_mask
;
74 struct pipe_rt_blend_state blend
;
81 * This is a proxy for the array of FS input semantics, which is
82 * larger than we would want to put in the key.
84 uint64_t compiled_fs_id
;
86 enum pipe_format attr_formats
[8];
88 bool per_vertex_point_size
;
92 resize_qreg_array(struct vc4_compile
*c
,
97 if (*size
>= decl_size
)
100 uint32_t old_size
= *size
;
101 *size
= MAX2(*size
* 2, decl_size
);
102 *regs
= reralloc(c
, *regs
, struct qreg
, *size
);
104 fprintf(stderr
, "Malloc failure\n");
108 for (uint32_t i
= old_size
; i
< *size
; i
++)
109 (*regs
)[i
] = c
->undef
;
113 add_uniform(struct vc4_compile
*c
,
114 enum quniform_contents contents
,
117 for (int i
= 0; i
< c
->num_uniforms
; i
++) {
118 if (c
->uniform_contents
[i
] == contents
&&
119 c
->uniform_data
[i
] == data
) {
120 return (struct qreg
) { QFILE_UNIF
, i
};
124 uint32_t uniform
= c
->num_uniforms
++;
125 struct qreg u
= { QFILE_UNIF
, uniform
};
127 if (uniform
>= c
->uniform_array_size
) {
128 c
->uniform_array_size
= MAX2(MAX2(16, uniform
+ 1),
129 c
->uniform_array_size
* 2);
131 c
->uniform_data
= reralloc(c
, c
->uniform_data
,
133 c
->uniform_array_size
);
134 c
->uniform_contents
= reralloc(c
, c
->uniform_contents
,
135 enum quniform_contents
,
136 c
->uniform_array_size
);
139 c
->uniform_contents
[uniform
] = contents
;
140 c
->uniform_data
[uniform
] = data
;
146 get_temp_for_uniform(struct vc4_compile
*c
, enum quniform_contents contents
,
149 struct qreg u
= add_uniform(c
, contents
, data
);
150 struct qreg t
= qir_MOV(c
, u
);
155 qir_uniform_ui(struct vc4_compile
*c
, uint32_t ui
)
157 return get_temp_for_uniform(c
, QUNIFORM_CONSTANT
, ui
);
161 qir_uniform_f(struct vc4_compile
*c
, float f
)
163 return qir_uniform_ui(c
, fui(f
));
167 indirect_uniform_load(struct vc4_compile
*c
,
168 struct tgsi_full_src_register
*src
, int swiz
)
170 struct tgsi_ind_register
*indirect
= &src
->Indirect
;
171 struct vc4_compiler_ubo_range
*range
= &c
->ubo_ranges
[indirect
->ArrayID
];
174 range
->dst_offset
= c
->next_ubo_dst_offset
;
175 c
->next_ubo_dst_offset
+= range
->size
;
179 assert(src
->Register
.Indirect
);
180 assert(indirect
->File
== TGSI_FILE_ADDRESS
);
182 struct qreg addr_val
= c
->addr
[indirect
->Swizzle
];
183 struct qreg indirect_offset
=
184 qir_ADD(c
, addr_val
, qir_uniform_ui(c
,
186 (src
->Register
.Index
* 16)+
188 indirect_offset
= qir_MIN(c
, indirect_offset
, qir_uniform_ui(c
, (range
->dst_offset
+
191 qir_TEX_DIRECT(c
, indirect_offset
, add_uniform(c
, QUNIFORM_UBO_ADDR
, 0));
192 struct qreg r4
= qir_TEX_RESULT(c
);
193 c
->num_texture_samples
++;
194 return qir_MOV(c
, r4
);
198 get_src(struct vc4_compile
*c
, unsigned tgsi_op
,
199 struct tgsi_full_src_register
*full_src
, int i
)
201 struct tgsi_src_register
*src
= &full_src
->Register
;
202 struct qreg r
= c
->undef
;
225 case TGSI_FILE_TEMPORARY
:
226 r
= c
->temps
[src
->Index
* 4 + s
];
228 case TGSI_FILE_IMMEDIATE
:
229 r
= c
->consts
[src
->Index
* 4 + s
];
231 case TGSI_FILE_CONSTANT
:
233 r
= indirect_uniform_load(c
, full_src
, s
);
235 r
= get_temp_for_uniform(c
, QUNIFORM_UNIFORM
,
239 case TGSI_FILE_INPUT
:
240 r
= c
->inputs
[src
->Index
* 4 + s
];
242 case TGSI_FILE_SAMPLER
:
243 case TGSI_FILE_SAMPLER_VIEW
:
247 fprintf(stderr
, "unknown src file %d\n", src
->File
);
252 r
= qir_FMAXABS(c
, r
, r
);
255 switch (tgsi_opcode_infer_src_type(tgsi_op
)) {
256 case TGSI_TYPE_SIGNED
:
257 case TGSI_TYPE_UNSIGNED
:
258 r
= qir_SUB(c
, qir_uniform_ui(c
, 0), r
);
261 r
= qir_FSUB(c
, qir_uniform_f(c
, 0.0), r
);
271 update_dst(struct vc4_compile
*c
, struct tgsi_full_instruction
*tgsi_inst
,
272 int i
, struct qreg val
)
274 struct tgsi_dst_register
*tgsi_dst
= &tgsi_inst
->Dst
[0].Register
;
276 assert(!tgsi_dst
->Indirect
);
278 switch (tgsi_dst
->File
) {
279 case TGSI_FILE_TEMPORARY
:
280 c
->temps
[tgsi_dst
->Index
* 4 + i
] = val
;
282 case TGSI_FILE_OUTPUT
:
283 c
->outputs
[tgsi_dst
->Index
* 4 + i
] = val
;
284 c
->num_outputs
= MAX2(c
->num_outputs
,
285 tgsi_dst
->Index
* 4 + i
+ 1);
287 case TGSI_FILE_ADDRESS
:
288 assert(tgsi_dst
->Index
== 0);
292 fprintf(stderr
, "unknown dst file %d\n", tgsi_dst
->File
);
298 get_swizzled_channel(struct vc4_compile
*c
,
299 struct qreg
*srcs
, int swiz
)
303 case UTIL_FORMAT_SWIZZLE_NONE
:
304 fprintf(stderr
, "warning: unknown swizzle\n");
306 case UTIL_FORMAT_SWIZZLE_0
:
307 return qir_uniform_f(c
, 0.0);
308 case UTIL_FORMAT_SWIZZLE_1
:
309 return qir_uniform_f(c
, 1.0);
310 case UTIL_FORMAT_SWIZZLE_X
:
311 case UTIL_FORMAT_SWIZZLE_Y
:
312 case UTIL_FORMAT_SWIZZLE_Z
:
313 case UTIL_FORMAT_SWIZZLE_W
:
319 tgsi_to_qir_alu(struct vc4_compile
*c
,
320 struct tgsi_full_instruction
*tgsi_inst
,
321 enum qop op
, struct qreg
*src
, int i
)
323 struct qreg dst
= qir_get_temp(c
);
324 qir_emit(c
, qir_inst4(op
, dst
,
333 tgsi_to_qir_scalar(struct vc4_compile
*c
,
334 struct tgsi_full_instruction
*tgsi_inst
,
335 enum qop op
, struct qreg
*src
, int i
)
337 struct qreg dst
= qir_get_temp(c
);
338 qir_emit(c
, qir_inst(op
, dst
,
345 tgsi_to_qir_rcp(struct vc4_compile
*c
,
346 struct tgsi_full_instruction
*tgsi_inst
,
347 enum qop op
, struct qreg
*src
, int i
)
349 struct qreg x
= src
[0 * 4 + 0];
350 struct qreg r
= qir_RCP(c
, x
);
352 /* Apply a Newton-Raphson step to improve the accuracy. */
353 r
= qir_FMUL(c
, r
, qir_FSUB(c
,
354 qir_uniform_f(c
, 2.0),
361 tgsi_to_qir_rsq(struct vc4_compile
*c
,
362 struct tgsi_full_instruction
*tgsi_inst
,
363 enum qop op
, struct qreg
*src
, int i
)
365 struct qreg x
= src
[0 * 4 + 0];
366 struct qreg r
= qir_RSQ(c
, x
);
368 /* Apply a Newton-Raphson step to improve the accuracy. */
369 r
= qir_FMUL(c
, r
, qir_FSUB(c
,
370 qir_uniform_f(c
, 1.5),
372 qir_uniform_f(c
, 0.5),
374 qir_FMUL(c
, r
, r
)))));
380 qir_srgb_decode(struct vc4_compile
*c
, struct qreg srgb
)
382 struct qreg low
= qir_FMUL(c
, srgb
, qir_uniform_f(c
, 1.0 / 12.92));
383 struct qreg high
= qir_POW(c
,
387 qir_uniform_f(c
, 0.055)),
388 qir_uniform_f(c
, 1.0 / 1.055)),
389 qir_uniform_f(c
, 2.4));
391 qir_SF(c
, qir_FSUB(c
, srgb
, qir_uniform_f(c
, 0.04045)));
392 return qir_SEL_X_Y_NS(c
, low
, high
);
396 qir_srgb_encode(struct vc4_compile
*c
, struct qreg linear
)
398 struct qreg low
= qir_FMUL(c
, linear
, qir_uniform_f(c
, 12.92));
399 struct qreg high
= qir_FSUB(c
,
401 qir_uniform_f(c
, 1.055),
404 qir_uniform_f(c
, 0.41666))),
405 qir_uniform_f(c
, 0.055));
407 qir_SF(c
, qir_FSUB(c
, linear
, qir_uniform_f(c
, 0.0031308)));
408 return qir_SEL_X_Y_NS(c
, low
, high
);
412 tgsi_to_qir_umul(struct vc4_compile
*c
,
413 struct tgsi_full_instruction
*tgsi_inst
,
414 enum qop op
, struct qreg
*src
, int i
)
416 struct qreg src0_hi
= qir_SHR(c
, src
[0 * 4 + i
],
417 qir_uniform_ui(c
, 16));
418 struct qreg src0_lo
= qir_AND(c
, src
[0 * 4 + i
],
419 qir_uniform_ui(c
, 0xffff));
420 struct qreg src1_hi
= qir_SHR(c
, src
[1 * 4 + i
],
421 qir_uniform_ui(c
, 16));
422 struct qreg src1_lo
= qir_AND(c
, src
[1 * 4 + i
],
423 qir_uniform_ui(c
, 0xffff));
425 struct qreg hilo
= qir_MUL24(c
, src0_hi
, src1_lo
);
426 struct qreg lohi
= qir_MUL24(c
, src0_lo
, src1_hi
);
427 struct qreg lolo
= qir_MUL24(c
, src0_lo
, src1_lo
);
429 return qir_ADD(c
, lolo
, qir_SHL(c
,
430 qir_ADD(c
, hilo
, lohi
),
431 qir_uniform_ui(c
, 16)));
435 tgsi_to_qir_umad(struct vc4_compile
*c
,
436 struct tgsi_full_instruction
*tgsi_inst
,
437 enum qop op
, struct qreg
*src
, int i
)
439 return qir_ADD(c
, tgsi_to_qir_umul(c
, NULL
, 0, src
, i
), src
[2 * 4 + i
]);
443 tgsi_to_qir_idiv(struct vc4_compile
*c
,
444 struct tgsi_full_instruction
*tgsi_inst
,
445 enum qop op
, struct qreg
*src
, int i
)
447 return qir_FTOI(c
, qir_FMUL(c
,
448 qir_ITOF(c
, src
[0 * 4 + i
]),
449 qir_RCP(c
, qir_ITOF(c
, src
[1 * 4 + i
]))));
453 tgsi_to_qir_ineg(struct vc4_compile
*c
,
454 struct tgsi_full_instruction
*tgsi_inst
,
455 enum qop op
, struct qreg
*src
, int i
)
457 return qir_SUB(c
, qir_uniform_ui(c
, 0), src
[0 * 4 + i
]);
461 tgsi_to_qir_seq(struct vc4_compile
*c
,
462 struct tgsi_full_instruction
*tgsi_inst
,
463 enum qop op
, struct qreg
*src
, int i
)
465 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
466 return qir_SEL_X_0_ZS(c
, qir_uniform_f(c
, 1.0));
470 tgsi_to_qir_sne(struct vc4_compile
*c
,
471 struct tgsi_full_instruction
*tgsi_inst
,
472 enum qop op
, struct qreg
*src
, int i
)
474 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
475 return qir_SEL_X_0_ZC(c
, qir_uniform_f(c
, 1.0));
479 tgsi_to_qir_slt(struct vc4_compile
*c
,
480 struct tgsi_full_instruction
*tgsi_inst
,
481 enum qop op
, struct qreg
*src
, int i
)
483 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
484 return qir_SEL_X_0_NS(c
, qir_uniform_f(c
, 1.0));
488 tgsi_to_qir_sge(struct vc4_compile
*c
,
489 struct tgsi_full_instruction
*tgsi_inst
,
490 enum qop op
, struct qreg
*src
, int i
)
492 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
493 return qir_SEL_X_0_NC(c
, qir_uniform_f(c
, 1.0));
497 tgsi_to_qir_fseq(struct vc4_compile
*c
,
498 struct tgsi_full_instruction
*tgsi_inst
,
499 enum qop op
, struct qreg
*src
, int i
)
501 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
502 return qir_SEL_X_0_ZS(c
, qir_uniform_ui(c
, ~0));
506 tgsi_to_qir_fsne(struct vc4_compile
*c
,
507 struct tgsi_full_instruction
*tgsi_inst
,
508 enum qop op
, struct qreg
*src
, int i
)
510 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
511 return qir_SEL_X_0_ZC(c
, qir_uniform_ui(c
, ~0));
515 tgsi_to_qir_fslt(struct vc4_compile
*c
,
516 struct tgsi_full_instruction
*tgsi_inst
,
517 enum qop op
, struct qreg
*src
, int i
)
519 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
520 return qir_SEL_X_0_NS(c
, qir_uniform_ui(c
, ~0));
524 tgsi_to_qir_fsge(struct vc4_compile
*c
,
525 struct tgsi_full_instruction
*tgsi_inst
,
526 enum qop op
, struct qreg
*src
, int i
)
528 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
529 return qir_SEL_X_0_NC(c
, qir_uniform_ui(c
, ~0));
533 tgsi_to_qir_useq(struct vc4_compile
*c
,
534 struct tgsi_full_instruction
*tgsi_inst
,
535 enum qop op
, struct qreg
*src
, int i
)
537 qir_SF(c
, qir_SUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
538 return qir_SEL_X_0_ZS(c
, qir_uniform_ui(c
, ~0));
542 tgsi_to_qir_usne(struct vc4_compile
*c
,
543 struct tgsi_full_instruction
*tgsi_inst
,
544 enum qop op
, struct qreg
*src
, int i
)
546 qir_SF(c
, qir_SUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
547 return qir_SEL_X_0_ZC(c
, qir_uniform_ui(c
, ~0));
551 tgsi_to_qir_islt(struct vc4_compile
*c
,
552 struct tgsi_full_instruction
*tgsi_inst
,
553 enum qop op
, struct qreg
*src
, int i
)
555 qir_SF(c
, qir_SUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
556 return qir_SEL_X_0_NS(c
, qir_uniform_ui(c
, ~0));
560 tgsi_to_qir_isge(struct vc4_compile
*c
,
561 struct tgsi_full_instruction
*tgsi_inst
,
562 enum qop op
, struct qreg
*src
, int i
)
564 qir_SF(c
, qir_SUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
565 return qir_SEL_X_0_NC(c
, qir_uniform_ui(c
, ~0));
569 tgsi_to_qir_cmp(struct vc4_compile
*c
,
570 struct tgsi_full_instruction
*tgsi_inst
,
571 enum qop op
, struct qreg
*src
, int i
)
573 qir_SF(c
, src
[0 * 4 + i
]);
574 return qir_SEL_X_Y_NS(c
,
580 tgsi_to_qir_ucmp(struct vc4_compile
*c
,
581 struct tgsi_full_instruction
*tgsi_inst
,
582 enum qop op
, struct qreg
*src
, int i
)
584 qir_SF(c
, src
[0 * 4 + i
]);
585 return qir_SEL_X_Y_ZC(c
,
591 tgsi_to_qir_mad(struct vc4_compile
*c
,
592 struct tgsi_full_instruction
*tgsi_inst
,
593 enum qop op
, struct qreg
*src
, int i
)
603 tgsi_to_qir_lrp(struct vc4_compile
*c
,
604 struct tgsi_full_instruction
*tgsi_inst
,
605 enum qop op
, struct qreg
*src
, int i
)
607 struct qreg src0
= src
[0 * 4 + i
];
608 struct qreg src1
= src
[1 * 4 + i
];
609 struct qreg src2
= src
[2 * 4 + i
];
612 * src0 * src1 + (1 - src0) * src2.
613 * -> src0 * src1 + src2 - src0 * src2
614 * -> src2 + src0 * (src1 - src2)
616 return qir_FADD(c
, src2
, qir_FMUL(c
, src0
, qir_FSUB(c
, src1
, src2
)));
621 tgsi_to_qir_tex(struct vc4_compile
*c
,
622 struct tgsi_full_instruction
*tgsi_inst
,
623 enum qop op
, struct qreg
*src
)
625 assert(!tgsi_inst
->Instruction
.Saturate
);
627 struct qreg s
= src
[0 * 4 + 0];
628 struct qreg t
= src
[0 * 4 + 1];
629 struct qreg r
= src
[0 * 4 + 2];
630 uint32_t unit
= tgsi_inst
->Src
[1].Register
.Index
;
631 bool is_txl
= tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL
;
633 struct qreg proj
= c
->undef
;
634 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXP
) {
635 proj
= qir_RCP(c
, src
[0 * 4 + 3]);
636 s
= qir_FMUL(c
, s
, proj
);
637 t
= qir_FMUL(c
, t
, proj
);
640 struct qreg texture_u
[] = {
641 add_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P0
, unit
),
642 add_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P1
, unit
),
643 add_uniform(c
, QUNIFORM_CONSTANT
, 0),
644 add_uniform(c
, QUNIFORM_CONSTANT
, 0),
646 uint32_t next_texture_u
= 0;
648 /* There is no native support for GL texture rectangle coordinates, so
649 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
652 if (tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_RECT
||
653 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWRECT
) {
655 get_temp_for_uniform(c
,
656 QUNIFORM_TEXRECT_SCALE_X
,
659 get_temp_for_uniform(c
,
660 QUNIFORM_TEXRECT_SCALE_Y
,
664 if (tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_CUBE
||
665 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE
||
667 texture_u
[2] = add_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P2
,
668 unit
| (is_txl
<< 16));
671 if (tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_CUBE
||
672 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE
) {
673 struct qreg ma
= qir_FMAXABS(c
, qir_FMAXABS(c
, s
, t
), r
);
674 struct qreg rcp_ma
= qir_RCP(c
, ma
);
675 s
= qir_FMUL(c
, s
, rcp_ma
);
676 t
= qir_FMUL(c
, t
, rcp_ma
);
677 r
= qir_FMUL(c
, r
, rcp_ma
);
679 qir_TEX_R(c
, r
, texture_u
[next_texture_u
++]);
680 } else if (c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
681 c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP
||
682 c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
683 c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP
) {
684 qir_TEX_R(c
, get_temp_for_uniform(c
, QUNIFORM_TEXTURE_BORDER_COLOR
, unit
),
685 texture_u
[next_texture_u
++]);
688 if (c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP
) {
689 s
= qir_FMIN(c
, qir_FMAX(c
, s
, qir_uniform_f(c
, 0.0)),
690 qir_uniform_f(c
, 1.0));
693 if (c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP
) {
694 t
= qir_FMIN(c
, qir_FMAX(c
, t
, qir_uniform_f(c
, 0.0)),
695 qir_uniform_f(c
, 1.0));
698 qir_TEX_T(c
, t
, texture_u
[next_texture_u
++]);
700 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB
||
701 tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL
)
702 qir_TEX_B(c
, src
[0 * 4 + 3], texture_u
[next_texture_u
++]);
704 qir_TEX_S(c
, s
, texture_u
[next_texture_u
++]);
706 c
->num_texture_samples
++;
707 struct qreg r4
= qir_TEX_RESULT(c
);
709 enum pipe_format format
= c
->key
->tex
[unit
].format
;
711 struct qreg unpacked
[4];
712 if (util_format_is_depth_or_stencil(format
)) {
713 struct qreg depthf
= qir_ITOF(c
, qir_SHR(c
, r4
,
714 qir_uniform_ui(c
, 8)));
715 struct qreg normalized
= qir_FMUL(c
, depthf
,
716 qir_uniform_f(c
, 1.0f
/0xffffff));
718 struct qreg depth_output
;
720 struct qreg one
= qir_uniform_f(c
, 1.0f
);
721 if (c
->key
->tex
[unit
].compare_mode
) {
722 struct qreg compare
= src
[0 * 4 + 2];
724 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXP
)
725 compare
= qir_FMUL(c
, compare
, proj
);
727 switch (c
->key
->tex
[unit
].compare_func
) {
728 case PIPE_FUNC_NEVER
:
729 depth_output
= qir_uniform_f(c
, 0.0f
);
731 case PIPE_FUNC_ALWAYS
:
734 case PIPE_FUNC_EQUAL
:
735 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
736 depth_output
= qir_SEL_X_0_ZS(c
, one
);
738 case PIPE_FUNC_NOTEQUAL
:
739 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
740 depth_output
= qir_SEL_X_0_ZC(c
, one
);
742 case PIPE_FUNC_GREATER
:
743 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
744 depth_output
= qir_SEL_X_0_NC(c
, one
);
746 case PIPE_FUNC_GEQUAL
:
747 qir_SF(c
, qir_FSUB(c
, normalized
, compare
));
748 depth_output
= qir_SEL_X_0_NS(c
, one
);
751 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
752 depth_output
= qir_SEL_X_0_NS(c
, one
);
754 case PIPE_FUNC_LEQUAL
:
755 qir_SF(c
, qir_FSUB(c
, normalized
, compare
));
756 depth_output
= qir_SEL_X_0_NC(c
, one
);
760 depth_output
= normalized
;
763 for (int i
= 0; i
< 4; i
++)
764 unpacked
[i
] = depth_output
;
766 for (int i
= 0; i
< 4; i
++)
767 unpacked
[i
] = qir_R4_UNPACK(c
, r4
, i
);
770 const uint8_t *format_swiz
= vc4_get_format_swizzle(format
);
771 struct qreg texture_output
[4];
772 for (int i
= 0; i
< 4; i
++) {
773 texture_output
[i
] = get_swizzled_channel(c
, unpacked
,
777 if (util_format_is_srgb(format
)) {
778 for (int i
= 0; i
< 3; i
++)
779 texture_output
[i
] = qir_srgb_decode(c
,
783 for (int i
= 0; i
< 4; i
++) {
784 if (!(tgsi_inst
->Dst
[0].Register
.WriteMask
& (1 << i
)))
787 update_dst(c
, tgsi_inst
, i
,
788 get_swizzled_channel(c
, texture_output
,
789 c
->key
->tex
[unit
].swizzle
[i
]));
794 tgsi_to_qir_trunc(struct vc4_compile
*c
,
795 struct tgsi_full_instruction
*tgsi_inst
,
796 enum qop op
, struct qreg
*src
, int i
)
798 return qir_ITOF(c
, qir_FTOI(c
, src
[0 * 4 + i
]));
802 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
806 tgsi_to_qir_frc(struct vc4_compile
*c
,
807 struct tgsi_full_instruction
*tgsi_inst
,
808 enum qop op
, struct qreg
*src
, int i
)
810 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
[0 * 4 + i
]));
811 struct qreg diff
= qir_FSUB(c
, src
[0 * 4 + i
], trunc
);
813 return qir_SEL_X_Y_NS(c
,
814 qir_FADD(c
, diff
, qir_uniform_f(c
, 1.0)),
819 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
823 tgsi_to_qir_flr(struct vc4_compile
*c
,
824 struct tgsi_full_instruction
*tgsi_inst
,
825 enum qop op
, struct qreg
*src
, int i
)
827 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
[0 * 4 + i
]));
829 /* This will be < 0 if we truncated and the truncation was of a value
830 * that was < 0 in the first place.
832 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], trunc
));
834 return qir_SEL_X_Y_NS(c
,
835 qir_FSUB(c
, trunc
, qir_uniform_f(c
, 1.0)),
840 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
844 tgsi_to_qir_ceil(struct vc4_compile
*c
,
845 struct tgsi_full_instruction
*tgsi_inst
,
846 enum qop op
, struct qreg
*src
, int i
)
848 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
[0 * 4 + i
]));
850 /* This will be < 0 if we truncated and the truncation was of a value
851 * that was > 0 in the first place.
853 qir_SF(c
, qir_FSUB(c
, trunc
, src
[0 * 4 + i
]));
855 return qir_SEL_X_Y_NS(c
,
856 qir_FADD(c
, trunc
, qir_uniform_f(c
, 1.0)),
861 tgsi_to_qir_abs(struct vc4_compile
*c
,
862 struct tgsi_full_instruction
*tgsi_inst
,
863 enum qop op
, struct qreg
*src
, int i
)
865 struct qreg arg
= src
[0 * 4 + i
];
866 return qir_FMAXABS(c
, arg
, arg
);
869 /* Note that this instruction replicates its result from the x channel */
871 tgsi_to_qir_sin(struct vc4_compile
*c
,
872 struct tgsi_full_instruction
*tgsi_inst
,
873 enum qop op
, struct qreg
*src
, int i
)
877 pow(2.0 * M_PI
, 3) / (3 * 2 * 1),
878 -pow(2.0 * M_PI
, 5) / (5 * 4 * 3 * 2 * 1),
879 pow(2.0 * M_PI
, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
880 -pow(2.0 * M_PI
, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
883 struct qreg scaled_x
=
886 qir_uniform_f(c
, 1.0f
/ (M_PI
* 2.0f
)));
888 struct qreg x
= qir_FADD(c
,
889 tgsi_to_qir_frc(c
, NULL
, 0, &scaled_x
, 0),
890 qir_uniform_f(c
, -0.5));
891 struct qreg x2
= qir_FMUL(c
, x
, x
);
892 struct qreg sum
= qir_FMUL(c
, x
, qir_uniform_f(c
, coeff
[0]));
893 for (int i
= 1; i
< ARRAY_SIZE(coeff
); i
++) {
894 x
= qir_FMUL(c
, x
, x2
);
899 qir_uniform_f(c
, coeff
[i
])));
904 /* Note that this instruction replicates its result from the x channel */
906 tgsi_to_qir_cos(struct vc4_compile
*c
,
907 struct tgsi_full_instruction
*tgsi_inst
,
908 enum qop op
, struct qreg
*src
, int i
)
912 pow(2.0 * M_PI
, 2) / (2 * 1),
913 -pow(2.0 * M_PI
, 4) / (4 * 3 * 2 * 1),
914 pow(2.0 * M_PI
, 6) / (6 * 5 * 4 * 3 * 2 * 1),
915 -pow(2.0 * M_PI
, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
916 pow(2.0 * M_PI
, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
919 struct qreg scaled_x
=
920 qir_FMUL(c
, src
[0 * 4 + 0],
921 qir_uniform_f(c
, 1.0f
/ (M_PI
* 2.0f
)));
922 struct qreg x_frac
= qir_FADD(c
,
923 tgsi_to_qir_frc(c
, NULL
, 0, &scaled_x
, 0),
924 qir_uniform_f(c
, -0.5));
926 struct qreg sum
= qir_uniform_f(c
, coeff
[0]);
927 struct qreg x2
= qir_FMUL(c
, x_frac
, x_frac
);
928 struct qreg x
= x2
; /* Current x^2, x^4, or x^6 */
929 for (int i
= 1; i
< ARRAY_SIZE(coeff
); i
++) {
931 x
= qir_FMUL(c
, x
, x2
);
933 struct qreg mul
= qir_FMUL(c
,
935 qir_uniform_f(c
, coeff
[i
]));
939 sum
= qir_FADD(c
, sum
, mul
);
945 tgsi_to_qir_clamp(struct vc4_compile
*c
,
946 struct tgsi_full_instruction
*tgsi_inst
,
947 enum qop op
, struct qreg
*src
, int i
)
949 return qir_FMAX(c
, qir_FMIN(c
,
956 tgsi_to_qir_ssg(struct vc4_compile
*c
,
957 struct tgsi_full_instruction
*tgsi_inst
,
958 enum qop op
, struct qreg
*src
, int i
)
960 qir_SF(c
, src
[0 * 4 + i
]);
961 return qir_SEL_X_Y_NC(c
,
962 qir_SEL_X_0_ZC(c
, qir_uniform_f(c
, 1.0)),
963 qir_uniform_f(c
, -1.0));
966 /* Compare to tgsi_to_qir_flr() for the floor logic. */
968 tgsi_to_qir_arl(struct vc4_compile
*c
,
969 struct tgsi_full_instruction
*tgsi_inst
,
970 enum qop op
, struct qreg
*src
, int i
)
972 struct qreg trunc
= qir_FTOI(c
, src
[0 * 4 + i
]);
973 struct qreg scaled
= qir_SHL(c
, trunc
, qir_uniform_ui(c
, 4));
975 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], qir_ITOF(c
, trunc
)));
977 return qir_SEL_X_Y_NS(c
, qir_SUB(c
, scaled
, qir_uniform_ui(c
, 4)),
982 tgsi_to_qir_uarl(struct vc4_compile
*c
,
983 struct tgsi_full_instruction
*tgsi_inst
,
984 enum qop op
, struct qreg
*src
, int i
)
986 return qir_SHL(c
, src
[0 * 4 + i
], qir_uniform_ui(c
, 4));
990 get_channel_from_vpm(struct vc4_compile
*c
,
991 struct qreg
*vpm_reads
,
993 const struct util_format_description
*desc
)
995 const struct util_format_channel_description
*chan
=
996 &desc
->channel
[swiz
];
999 if (swiz
> UTIL_FORMAT_SWIZZLE_W
)
1000 return get_swizzled_channel(c
, vpm_reads
, swiz
);
1001 else if (chan
->size
== 32 &&
1002 chan
->type
== UTIL_FORMAT_TYPE_FLOAT
) {
1003 return get_swizzled_channel(c
, vpm_reads
, swiz
);
1004 } else if (chan
->size
== 32 &&
1005 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
1006 if (chan
->normalized
) {
1008 qir_ITOF(c
, vpm_reads
[swiz
]),
1012 return qir_ITOF(c
, vpm_reads
[swiz
]);
1014 } else if (chan
->size
== 8 &&
1015 (chan
->type
== UTIL_FORMAT_TYPE_UNSIGNED
||
1016 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
)) {
1017 struct qreg vpm
= vpm_reads
[0];
1018 if (chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
1019 temp
= qir_XOR(c
, vpm
, qir_uniform_ui(c
, 0x80808080));
1020 if (chan
->normalized
) {
1021 return qir_FSUB(c
, qir_FMUL(c
,
1022 qir_UNPACK_8_F(c
, temp
, swiz
),
1023 qir_uniform_f(c
, 2.0)),
1024 qir_uniform_f(c
, 1.0));
1028 qir_UNPACK_8_I(c
, temp
,
1030 qir_uniform_f(c
, -128.0));
1033 if (chan
->normalized
) {
1034 return qir_UNPACK_8_F(c
, vpm
, swiz
);
1036 return qir_ITOF(c
, qir_UNPACK_8_I(c
, vpm
, swiz
));
1039 } else if (chan
->size
== 16 &&
1040 (chan
->type
== UTIL_FORMAT_TYPE_UNSIGNED
||
1041 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
)) {
1042 struct qreg vpm
= vpm_reads
[swiz
/ 2];
1044 /* Note that UNPACK_16F eats a half float, not ints, so we use
1045 * UNPACK_16_I for all of these.
1047 if (chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
1048 temp
= qir_ITOF(c
, qir_UNPACK_16_I(c
, vpm
, swiz
% 2));
1049 if (chan
->normalized
) {
1050 return qir_FMUL(c
, temp
,
1051 qir_uniform_f(c
, 1/32768.0f
));
1056 /* UNPACK_16I sign-extends, so we have to emit ANDs. */
1058 if (swiz
== 1 || swiz
== 3)
1059 temp
= qir_UNPACK_16_I(c
, temp
, 1);
1060 temp
= qir_AND(c
, temp
, qir_uniform_ui(c
, 0xffff));
1061 temp
= qir_ITOF(c
, temp
);
1063 if (chan
->normalized
) {
1064 return qir_FMUL(c
, temp
,
1065 qir_uniform_f(c
, 1 / 65535.0));
1076 emit_vertex_input(struct vc4_compile
*c
, int attr
)
1078 enum pipe_format format
= c
->vs_key
->attr_formats
[attr
];
1079 struct qreg vpm_reads
[4];
1081 /* Right now, we're setting the VPM offsets to be 16 bytes wide every
1082 * time, so we always read 4 32-bit VPM entries.
1084 for (int i
= 0; i
< 4; i
++) {
1085 vpm_reads
[i
] = qir_get_temp(c
);
1086 qir_emit(c
, qir_inst(QOP_VPM_READ
,
1093 bool format_warned
= false;
1094 const struct util_format_description
*desc
=
1095 util_format_description(format
);
1097 for (int i
= 0; i
< 4; i
++) {
1098 uint8_t swiz
= desc
->swizzle
[i
];
1099 struct qreg result
= get_channel_from_vpm(c
, vpm_reads
,
1102 if (result
.file
== QFILE_NULL
) {
1103 if (!format_warned
) {
1105 "vtx element %d unsupported type: %s\n",
1106 attr
, util_format_name(format
));
1107 format_warned
= true;
1109 result
= qir_uniform_f(c
, 0.0);
1111 c
->inputs
[attr
* 4 + i
] = result
;
1116 tgsi_to_qir_kill_if(struct vc4_compile
*c
, struct qreg
*src
, int i
)
1118 if (c
->discard
.file
== QFILE_NULL
)
1119 c
->discard
= qir_uniform_f(c
, 0.0);
1120 qir_SF(c
, src
[0 * 4 + i
]);
1121 c
->discard
= qir_SEL_X_Y_NS(c
, qir_uniform_f(c
, 1.0),
1126 emit_fragcoord_input(struct vc4_compile
*c
, int attr
)
1128 c
->inputs
[attr
* 4 + 0] = qir_FRAG_X(c
);
1129 c
->inputs
[attr
* 4 + 1] = qir_FRAG_Y(c
);
1130 c
->inputs
[attr
* 4 + 2] =
1132 qir_ITOF(c
, qir_FRAG_Z(c
)),
1133 qir_uniform_f(c
, 1.0 / 0xffffff));
1134 c
->inputs
[attr
* 4 + 3] = qir_RCP(c
, qir_FRAG_W(c
));
1138 emit_point_coord_input(struct vc4_compile
*c
, int attr
)
1140 if (c
->point_x
.file
== QFILE_NULL
) {
1141 c
->point_x
= qir_uniform_f(c
, 0.0);
1142 c
->point_y
= qir_uniform_f(c
, 0.0);
1145 c
->inputs
[attr
* 4 + 0] = c
->point_x
;
1146 if (c
->fs_key
->point_coord_upper_left
) {
1147 c
->inputs
[attr
* 4 + 1] = qir_FSUB(c
,
1148 qir_uniform_f(c
, 1.0),
1151 c
->inputs
[attr
* 4 + 1] = c
->point_y
;
1153 c
->inputs
[attr
* 4 + 2] = qir_uniform_f(c
, 0.0);
1154 c
->inputs
[attr
* 4 + 3] = qir_uniform_f(c
, 1.0);
1158 emit_fragment_varying(struct vc4_compile
*c
, uint8_t semantic
,
1159 uint8_t index
, uint8_t swizzle
)
1161 uint32_t i
= c
->num_input_semantics
++;
1162 struct qreg vary
= {
1167 if (c
->num_input_semantics
>= c
->input_semantics_array_size
) {
1168 c
->input_semantics_array_size
=
1169 MAX2(4, c
->input_semantics_array_size
* 2);
1171 c
->input_semantics
= reralloc(c
, c
->input_semantics
,
1172 struct vc4_varying_semantic
,
1173 c
->input_semantics_array_size
);
1176 c
->input_semantics
[i
].semantic
= semantic
;
1177 c
->input_semantics
[i
].index
= index
;
1178 c
->input_semantics
[i
].swizzle
= swizzle
;
1180 return qir_VARY_ADD_C(c
, qir_FMUL(c
, vary
, qir_FRAG_W(c
)));
1184 emit_fragment_input(struct vc4_compile
*c
, int attr
,
1185 struct tgsi_full_declaration
*decl
)
1187 for (int i
= 0; i
< 4; i
++) {
1188 c
->inputs
[attr
* 4 + i
] =
1189 emit_fragment_varying(c
,
1190 decl
->Semantic
.Name
,
1191 decl
->Semantic
.Index
,
1198 emit_face_input(struct vc4_compile
*c
, int attr
)
1200 c
->inputs
[attr
* 4 + 0] = qir_FSUB(c
,
1201 qir_uniform_f(c
, 1.0),
1203 qir_ITOF(c
, qir_FRAG_REV_FLAG(c
)),
1204 qir_uniform_f(c
, 2.0)));
1205 c
->inputs
[attr
* 4 + 1] = qir_uniform_f(c
, 0.0);
1206 c
->inputs
[attr
* 4 + 2] = qir_uniform_f(c
, 0.0);
1207 c
->inputs
[attr
* 4 + 3] = qir_uniform_f(c
, 1.0);
1211 add_output(struct vc4_compile
*c
,
1212 uint32_t decl_offset
,
1213 uint8_t semantic_name
,
1214 uint8_t semantic_index
,
1215 uint8_t semantic_swizzle
)
1217 uint32_t old_array_size
= c
->outputs_array_size
;
1218 resize_qreg_array(c
, &c
->outputs
, &c
->outputs_array_size
,
1221 if (old_array_size
!= c
->outputs_array_size
) {
1222 c
->output_semantics
= reralloc(c
,
1223 c
->output_semantics
,
1224 struct vc4_varying_semantic
,
1225 c
->outputs_array_size
);
1228 c
->output_semantics
[decl_offset
].semantic
= semantic_name
;
1229 c
->output_semantics
[decl_offset
].index
= semantic_index
;
1230 c
->output_semantics
[decl_offset
].swizzle
= semantic_swizzle
;
1234 add_array_info(struct vc4_compile
*c
, uint32_t array_id
,
1235 uint32_t start
, uint32_t size
)
1237 if (array_id
>= c
->ubo_ranges_array_size
) {
1238 c
->ubo_ranges_array_size
= MAX2(c
->ubo_ranges_array_size
* 2,
1240 c
->ubo_ranges
= reralloc(c
, c
->ubo_ranges
,
1241 struct vc4_compiler_ubo_range
,
1242 c
->ubo_ranges_array_size
);
1245 c
->ubo_ranges
[array_id
].dst_offset
= 0;
1246 c
->ubo_ranges
[array_id
].src_offset
= start
;
1247 c
->ubo_ranges
[array_id
].size
= size
;
1248 c
->ubo_ranges
[array_id
].used
= false;
1252 emit_tgsi_declaration(struct vc4_compile
*c
,
1253 struct tgsi_full_declaration
*decl
)
1255 switch (decl
->Declaration
.File
) {
1256 case TGSI_FILE_TEMPORARY
: {
1257 uint32_t old_size
= c
->temps_array_size
;
1258 resize_qreg_array(c
, &c
->temps
, &c
->temps_array_size
,
1259 (decl
->Range
.Last
+ 1) * 4);
1261 for (int i
= old_size
; i
< c
->temps_array_size
; i
++)
1262 c
->temps
[i
] = qir_uniform_ui(c
, 0);
1266 case TGSI_FILE_INPUT
:
1267 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1268 (decl
->Range
.Last
+ 1) * 4);
1270 for (int i
= decl
->Range
.First
;
1271 i
<= decl
->Range
.Last
;
1273 if (c
->stage
== QSTAGE_FRAG
) {
1274 if (decl
->Semantic
.Name
==
1275 TGSI_SEMANTIC_POSITION
) {
1276 emit_fragcoord_input(c
, i
);
1277 } else if (decl
->Semantic
.Name
== TGSI_SEMANTIC_FACE
) {
1278 emit_face_input(c
, i
);
1279 } else if (decl
->Semantic
.Name
== TGSI_SEMANTIC_GENERIC
&&
1280 (c
->fs_key
->point_sprite_mask
&
1281 (1 << decl
->Semantic
.Index
))) {
1282 emit_point_coord_input(c
, i
);
1284 emit_fragment_input(c
, i
, decl
);
1287 emit_vertex_input(c
, i
);
1292 case TGSI_FILE_OUTPUT
: {
1293 for (int i
= 0; i
< 4; i
++) {
1295 decl
->Range
.First
* 4 + i
,
1296 decl
->Semantic
.Name
,
1297 decl
->Semantic
.Index
,
1301 switch (decl
->Semantic
.Name
) {
1302 case TGSI_SEMANTIC_POSITION
:
1303 c
->output_position_index
= decl
->Range
.First
* 4;
1305 case TGSI_SEMANTIC_CLIPVERTEX
:
1306 c
->output_clipvertex_index
= decl
->Range
.First
* 4;
1308 case TGSI_SEMANTIC_COLOR
:
1309 c
->output_color_index
= decl
->Range
.First
* 4;
1311 case TGSI_SEMANTIC_PSIZE
:
1312 c
->output_point_size_index
= decl
->Range
.First
* 4;
1318 case TGSI_FILE_CONSTANT
:
1320 decl
->Array
.ArrayID
,
1321 decl
->Range
.First
* 16,
1323 decl
->Range
.First
+ 1) * 16);
1330 emit_tgsi_instruction(struct vc4_compile
*c
,
1331 struct tgsi_full_instruction
*tgsi_inst
)
1333 static const struct {
1335 struct qreg (*func
)(struct vc4_compile
*c
,
1336 struct tgsi_full_instruction
*tgsi_inst
,
1338 struct qreg
*src
, int i
);
1340 [TGSI_OPCODE_MOV
] = { QOP_MOV
, tgsi_to_qir_alu
},
1341 [TGSI_OPCODE_ABS
] = { 0, tgsi_to_qir_abs
},
1342 [TGSI_OPCODE_MUL
] = { QOP_FMUL
, tgsi_to_qir_alu
},
1343 [TGSI_OPCODE_ADD
] = { QOP_FADD
, tgsi_to_qir_alu
},
1344 [TGSI_OPCODE_SUB
] = { QOP_FSUB
, tgsi_to_qir_alu
},
1345 [TGSI_OPCODE_MIN
] = { QOP_FMIN
, tgsi_to_qir_alu
},
1346 [TGSI_OPCODE_MAX
] = { QOP_FMAX
, tgsi_to_qir_alu
},
1347 [TGSI_OPCODE_F2I
] = { QOP_FTOI
, tgsi_to_qir_alu
},
1348 [TGSI_OPCODE_I2F
] = { QOP_ITOF
, tgsi_to_qir_alu
},
1349 [TGSI_OPCODE_UADD
] = { QOP_ADD
, tgsi_to_qir_alu
},
1350 [TGSI_OPCODE_USHR
] = { QOP_SHR
, tgsi_to_qir_alu
},
1351 [TGSI_OPCODE_ISHR
] = { QOP_ASR
, tgsi_to_qir_alu
},
1352 [TGSI_OPCODE_SHL
] = { QOP_SHL
, tgsi_to_qir_alu
},
1353 [TGSI_OPCODE_IMIN
] = { QOP_MIN
, tgsi_to_qir_alu
},
1354 [TGSI_OPCODE_IMAX
] = { QOP_MAX
, tgsi_to_qir_alu
},
1355 [TGSI_OPCODE_AND
] = { QOP_AND
, tgsi_to_qir_alu
},
1356 [TGSI_OPCODE_OR
] = { QOP_OR
, tgsi_to_qir_alu
},
1357 [TGSI_OPCODE_XOR
] = { QOP_XOR
, tgsi_to_qir_alu
},
1358 [TGSI_OPCODE_NOT
] = { QOP_NOT
, tgsi_to_qir_alu
},
1360 [TGSI_OPCODE_UMUL
] = { 0, tgsi_to_qir_umul
},
1361 [TGSI_OPCODE_UMAD
] = { 0, tgsi_to_qir_umad
},
1362 [TGSI_OPCODE_IDIV
] = { 0, tgsi_to_qir_idiv
},
1363 [TGSI_OPCODE_INEG
] = { 0, tgsi_to_qir_ineg
},
1365 [TGSI_OPCODE_SEQ
] = { 0, tgsi_to_qir_seq
},
1366 [TGSI_OPCODE_SNE
] = { 0, tgsi_to_qir_sne
},
1367 [TGSI_OPCODE_SGE
] = { 0, tgsi_to_qir_sge
},
1368 [TGSI_OPCODE_SLT
] = { 0, tgsi_to_qir_slt
},
1369 [TGSI_OPCODE_FSEQ
] = { 0, tgsi_to_qir_fseq
},
1370 [TGSI_OPCODE_FSNE
] = { 0, tgsi_to_qir_fsne
},
1371 [TGSI_OPCODE_FSGE
] = { 0, tgsi_to_qir_fsge
},
1372 [TGSI_OPCODE_FSLT
] = { 0, tgsi_to_qir_fslt
},
1373 [TGSI_OPCODE_USEQ
] = { 0, tgsi_to_qir_useq
},
1374 [TGSI_OPCODE_USNE
] = { 0, tgsi_to_qir_usne
},
1375 [TGSI_OPCODE_ISGE
] = { 0, tgsi_to_qir_isge
},
1376 [TGSI_OPCODE_ISLT
] = { 0, tgsi_to_qir_islt
},
1378 [TGSI_OPCODE_CMP
] = { 0, tgsi_to_qir_cmp
},
1379 [TGSI_OPCODE_UCMP
] = { 0, tgsi_to_qir_ucmp
},
1380 [TGSI_OPCODE_MAD
] = { 0, tgsi_to_qir_mad
},
1381 [TGSI_OPCODE_RCP
] = { QOP_RCP
, tgsi_to_qir_rcp
},
1382 [TGSI_OPCODE_RSQ
] = { QOP_RSQ
, tgsi_to_qir_rsq
},
1383 [TGSI_OPCODE_EX2
] = { QOP_EXP2
, tgsi_to_qir_scalar
},
1384 [TGSI_OPCODE_LG2
] = { QOP_LOG2
, tgsi_to_qir_scalar
},
1385 [TGSI_OPCODE_LRP
] = { 0, tgsi_to_qir_lrp
},
1386 [TGSI_OPCODE_TRUNC
] = { 0, tgsi_to_qir_trunc
},
1387 [TGSI_OPCODE_CEIL
] = { 0, tgsi_to_qir_ceil
},
1388 [TGSI_OPCODE_FRC
] = { 0, tgsi_to_qir_frc
},
1389 [TGSI_OPCODE_FLR
] = { 0, tgsi_to_qir_flr
},
1390 [TGSI_OPCODE_SIN
] = { 0, tgsi_to_qir_sin
},
1391 [TGSI_OPCODE_COS
] = { 0, tgsi_to_qir_cos
},
1392 [TGSI_OPCODE_CLAMP
] = { 0, tgsi_to_qir_clamp
},
1393 [TGSI_OPCODE_SSG
] = { 0, tgsi_to_qir_ssg
},
1394 [TGSI_OPCODE_ARL
] = { 0, tgsi_to_qir_arl
},
1395 [TGSI_OPCODE_UARL
] = { 0, tgsi_to_qir_uarl
},
1397 static int asdf
= 0;
1398 uint32_t tgsi_op
= tgsi_inst
->Instruction
.Opcode
;
1400 if (tgsi_op
== TGSI_OPCODE_END
)
1403 struct qreg src_regs
[12];
1404 for (int s
= 0; s
< 3; s
++) {
1405 for (int i
= 0; i
< 4; i
++) {
1406 src_regs
[4 * s
+ i
] =
1407 get_src(c
, tgsi_inst
->Instruction
.Opcode
,
1408 &tgsi_inst
->Src
[s
], i
);
1413 case TGSI_OPCODE_TEX
:
1414 case TGSI_OPCODE_TXP
:
1415 case TGSI_OPCODE_TXB
:
1416 case TGSI_OPCODE_TXL
:
1417 tgsi_to_qir_tex(c
, tgsi_inst
,
1418 op_trans
[tgsi_op
].op
, src_regs
);
1420 case TGSI_OPCODE_KILL
:
1421 c
->discard
= qir_uniform_f(c
, 1.0);
1423 case TGSI_OPCODE_KILL_IF
:
1424 for (int i
= 0; i
< 4; i
++)
1425 tgsi_to_qir_kill_if(c
, src_regs
, i
);
1431 if (tgsi_op
> ARRAY_SIZE(op_trans
) || !(op_trans
[tgsi_op
].func
)) {
1432 fprintf(stderr
, "unknown tgsi inst: ");
1433 tgsi_dump_instruction(tgsi_inst
, asdf
++);
1434 fprintf(stderr
, "\n");
1438 for (int i
= 0; i
< 4; i
++) {
1439 if (!(tgsi_inst
->Dst
[0].Register
.WriteMask
& (1 << i
)))
1444 result
= op_trans
[tgsi_op
].func(c
, tgsi_inst
,
1445 op_trans
[tgsi_op
].op
,
1448 if (tgsi_inst
->Instruction
.Saturate
) {
1449 float low
= (tgsi_inst
->Instruction
.Saturate
==
1450 TGSI_SAT_MINUS_PLUS_ONE
? -1.0 : 0.0);
1451 result
= qir_FMAX(c
,
1454 qir_uniform_f(c
, 1.0)),
1455 qir_uniform_f(c
, low
));
1458 update_dst(c
, tgsi_inst
, i
, result
);
1463 parse_tgsi_immediate(struct vc4_compile
*c
, struct tgsi_full_immediate
*imm
)
1465 for (int i
= 0; i
< 4; i
++) {
1466 unsigned n
= c
->num_consts
++;
1467 resize_qreg_array(c
, &c
->consts
, &c
->consts_array_size
, n
+ 1);
1468 c
->consts
[n
] = qir_uniform_ui(c
, imm
->u
[i
].Uint
);
1473 vc4_blend_channel(struct vc4_compile
*c
,
1481 case PIPE_BLENDFACTOR_ONE
:
1483 case PIPE_BLENDFACTOR_SRC_COLOR
:
1484 return qir_FMUL(c
, val
, src
[channel
]);
1485 case PIPE_BLENDFACTOR_SRC_ALPHA
:
1486 return qir_FMUL(c
, val
, src
[3]);
1487 case PIPE_BLENDFACTOR_DST_ALPHA
:
1488 return qir_FMUL(c
, val
, dst
[3]);
1489 case PIPE_BLENDFACTOR_DST_COLOR
:
1490 return qir_FMUL(c
, val
, dst
[channel
]);
1491 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE
:
1498 qir_uniform_f(c
, 1.0),
1503 case PIPE_BLENDFACTOR_CONST_COLOR
:
1504 return qir_FMUL(c
, val
,
1505 get_temp_for_uniform(c
,
1506 QUNIFORM_BLEND_CONST_COLOR
,
1508 case PIPE_BLENDFACTOR_CONST_ALPHA
:
1509 return qir_FMUL(c
, val
,
1510 get_temp_for_uniform(c
,
1511 QUNIFORM_BLEND_CONST_COLOR
,
1513 case PIPE_BLENDFACTOR_ZERO
:
1514 return qir_uniform_f(c
, 0.0);
1515 case PIPE_BLENDFACTOR_INV_SRC_COLOR
:
1516 return qir_FMUL(c
, val
, qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1518 case PIPE_BLENDFACTOR_INV_SRC_ALPHA
:
1519 return qir_FMUL(c
, val
, qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1521 case PIPE_BLENDFACTOR_INV_DST_ALPHA
:
1522 return qir_FMUL(c
, val
, qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1524 case PIPE_BLENDFACTOR_INV_DST_COLOR
:
1525 return qir_FMUL(c
, val
, qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1527 case PIPE_BLENDFACTOR_INV_CONST_COLOR
:
1528 return qir_FMUL(c
, val
,
1529 qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1530 get_temp_for_uniform(c
,
1531 QUNIFORM_BLEND_CONST_COLOR
,
1533 case PIPE_BLENDFACTOR_INV_CONST_ALPHA
:
1534 return qir_FMUL(c
, val
,
1535 qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1536 get_temp_for_uniform(c
,
1537 QUNIFORM_BLEND_CONST_COLOR
,
1541 case PIPE_BLENDFACTOR_SRC1_COLOR
:
1542 case PIPE_BLENDFACTOR_SRC1_ALPHA
:
1543 case PIPE_BLENDFACTOR_INV_SRC1_COLOR
:
1544 case PIPE_BLENDFACTOR_INV_SRC1_ALPHA
:
1546 fprintf(stderr
, "Unknown blend factor %d\n", factor
);
1552 vc4_blend_func(struct vc4_compile
*c
,
1553 struct qreg src
, struct qreg dst
,
1557 case PIPE_BLEND_ADD
:
1558 return qir_FADD(c
, src
, dst
);
1559 case PIPE_BLEND_SUBTRACT
:
1560 return qir_FSUB(c
, src
, dst
);
1561 case PIPE_BLEND_REVERSE_SUBTRACT
:
1562 return qir_FSUB(c
, dst
, src
);
1563 case PIPE_BLEND_MIN
:
1564 return qir_FMIN(c
, src
, dst
);
1565 case PIPE_BLEND_MAX
:
1566 return qir_FMAX(c
, src
, dst
);
1570 fprintf(stderr
, "Unknown blend func %d\n", func
);
1577 * Implements fixed function blending in shader code.
1579 * VC4 doesn't have any hardware support for blending. Instead, you read the
1580 * current contents of the destination from the tile buffer after having
1581 * waited for the scoreboard (which is handled by vc4_qpu_emit.c), then do
1582 * math using your output color and that destination value, and update the
1583 * output color appropriately.
1586 vc4_blend(struct vc4_compile
*c
, struct qreg
*result
,
1587 struct qreg
*dst_color
, struct qreg
*src_color
)
1589 struct pipe_rt_blend_state
*blend
= &c
->fs_key
->blend
;
1591 if (!blend
->blend_enable
) {
1592 for (int i
= 0; i
< 4; i
++)
1593 result
[i
] = src_color
[i
];
1597 struct qreg src_blend
[4], dst_blend
[4];
1598 for (int i
= 0; i
< 3; i
++) {
1599 src_blend
[i
] = vc4_blend_channel(c
,
1600 dst_color
, src_color
,
1602 blend
->rgb_src_factor
, i
);
1603 dst_blend
[i
] = vc4_blend_channel(c
,
1604 dst_color
, src_color
,
1606 blend
->rgb_dst_factor
, i
);
1608 src_blend
[3] = vc4_blend_channel(c
,
1609 dst_color
, src_color
,
1611 blend
->alpha_src_factor
, 3);
1612 dst_blend
[3] = vc4_blend_channel(c
,
1613 dst_color
, src_color
,
1615 blend
->alpha_dst_factor
, 3);
1617 for (int i
= 0; i
< 3; i
++) {
1618 result
[i
] = vc4_blend_func(c
,
1619 src_blend
[i
], dst_blend
[i
],
1622 result
[3] = vc4_blend_func(c
,
1623 src_blend
[3], dst_blend
[3],
1628 clip_distance_discard(struct vc4_compile
*c
)
1630 for (int i
= 0; i
< PIPE_MAX_CLIP_PLANES
; i
++) {
1631 if (!(c
->key
->ucp_enables
& (1 << i
)))
1634 struct qreg dist
= emit_fragment_varying(c
,
1635 TGSI_SEMANTIC_CLIPDIST
,
1641 if (c
->discard
.file
== QFILE_NULL
)
1642 c
->discard
= qir_uniform_f(c
, 0.0);
1644 c
->discard
= qir_SEL_X_Y_NS(c
, qir_uniform_f(c
, 1.0),
1650 alpha_test_discard(struct vc4_compile
*c
)
1652 struct qreg src_alpha
;
1653 struct qreg alpha_ref
= get_temp_for_uniform(c
, QUNIFORM_ALPHA_REF
, 0);
1655 if (!c
->fs_key
->alpha_test
)
1658 if (c
->output_color_index
!= -1)
1659 src_alpha
= c
->outputs
[c
->output_color_index
+ 3];
1661 src_alpha
= qir_uniform_f(c
, 1.0);
1663 if (c
->discard
.file
== QFILE_NULL
)
1664 c
->discard
= qir_uniform_f(c
, 0.0);
1666 switch (c
->fs_key
->alpha_test_func
) {
1667 case PIPE_FUNC_NEVER
:
1668 c
->discard
= qir_uniform_f(c
, 1.0);
1670 case PIPE_FUNC_ALWAYS
:
1672 case PIPE_FUNC_EQUAL
:
1673 qir_SF(c
, qir_FSUB(c
, src_alpha
, alpha_ref
));
1674 c
->discard
= qir_SEL_X_Y_ZS(c
, c
->discard
,
1675 qir_uniform_f(c
, 1.0));
1677 case PIPE_FUNC_NOTEQUAL
:
1678 qir_SF(c
, qir_FSUB(c
, src_alpha
, alpha_ref
));
1679 c
->discard
= qir_SEL_X_Y_ZC(c
, c
->discard
,
1680 qir_uniform_f(c
, 1.0));
1682 case PIPE_FUNC_GREATER
:
1683 qir_SF(c
, qir_FSUB(c
, src_alpha
, alpha_ref
));
1684 c
->discard
= qir_SEL_X_Y_NC(c
, c
->discard
,
1685 qir_uniform_f(c
, 1.0));
1687 case PIPE_FUNC_GEQUAL
:
1688 qir_SF(c
, qir_FSUB(c
, alpha_ref
, src_alpha
));
1689 c
->discard
= qir_SEL_X_Y_NS(c
, c
->discard
,
1690 qir_uniform_f(c
, 1.0));
1692 case PIPE_FUNC_LESS
:
1693 qir_SF(c
, qir_FSUB(c
, src_alpha
, alpha_ref
));
1694 c
->discard
= qir_SEL_X_Y_NS(c
, c
->discard
,
1695 qir_uniform_f(c
, 1.0));
1697 case PIPE_FUNC_LEQUAL
:
1698 qir_SF(c
, qir_FSUB(c
, alpha_ref
, src_alpha
));
1699 c
->discard
= qir_SEL_X_Y_NC(c
, c
->discard
,
1700 qir_uniform_f(c
, 1.0));
1706 vc4_logicop(struct vc4_compile
*c
, struct qreg src
, struct qreg dst
)
1708 switch (c
->fs_key
->logicop_func
) {
1709 case PIPE_LOGICOP_CLEAR
:
1710 return qir_uniform_f(c
, 0.0);
1711 case PIPE_LOGICOP_NOR
:
1712 return qir_NOT(c
, qir_OR(c
, src
, dst
));
1713 case PIPE_LOGICOP_AND_INVERTED
:
1714 return qir_AND(c
, qir_NOT(c
, src
), dst
);
1715 case PIPE_LOGICOP_COPY_INVERTED
:
1716 return qir_NOT(c
, src
);
1717 case PIPE_LOGICOP_AND_REVERSE
:
1718 return qir_AND(c
, src
, qir_NOT(c
, dst
));
1719 case PIPE_LOGICOP_INVERT
:
1720 return qir_NOT(c
, dst
);
1721 case PIPE_LOGICOP_XOR
:
1722 return qir_XOR(c
, src
, dst
);
1723 case PIPE_LOGICOP_NAND
:
1724 return qir_NOT(c
, qir_AND(c
, src
, dst
));
1725 case PIPE_LOGICOP_AND
:
1726 return qir_AND(c
, src
, dst
);
1727 case PIPE_LOGICOP_EQUIV
:
1728 return qir_NOT(c
, qir_XOR(c
, src
, dst
));
1729 case PIPE_LOGICOP_NOOP
:
1731 case PIPE_LOGICOP_OR_INVERTED
:
1732 return qir_OR(c
, qir_NOT(c
, src
), dst
);
1733 case PIPE_LOGICOP_OR_REVERSE
:
1734 return qir_OR(c
, src
, qir_NOT(c
, dst
));
1735 case PIPE_LOGICOP_OR
:
1736 return qir_OR(c
, src
, dst
);
1737 case PIPE_LOGICOP_SET
:
1738 return qir_uniform_ui(c
, ~0);
1739 case PIPE_LOGICOP_COPY
:
1746 emit_frag_end(struct vc4_compile
*c
)
1748 clip_distance_discard(c
);
1749 alpha_test_discard(c
);
1751 enum pipe_format color_format
= c
->fs_key
->color_format
;
1752 const uint8_t *format_swiz
= vc4_get_format_swizzle(color_format
);
1753 struct qreg tlb_read_color
[4] = { c
->undef
, c
->undef
, c
->undef
, c
->undef
};
1754 struct qreg dst_color
[4] = { c
->undef
, c
->undef
, c
->undef
, c
->undef
};
1755 struct qreg linear_dst_color
[4] = { c
->undef
, c
->undef
, c
->undef
, c
->undef
};
1756 struct qreg packed_dst_color
= c
->undef
;
1758 if (c
->fs_key
->blend
.blend_enable
||
1759 c
->fs_key
->blend
.colormask
!= 0xf ||
1760 c
->fs_key
->logicop_func
!= PIPE_LOGICOP_COPY
) {
1761 struct qreg r4
= qir_TLB_COLOR_READ(c
);
1762 for (int i
= 0; i
< 4; i
++)
1763 tlb_read_color
[i
] = qir_R4_UNPACK(c
, r4
, i
);
1764 for (int i
= 0; i
< 4; i
++) {
1765 dst_color
[i
] = get_swizzled_channel(c
,
1768 if (util_format_is_srgb(color_format
) && i
!= 3) {
1769 linear_dst_color
[i
] =
1770 qir_srgb_decode(c
, dst_color
[i
]);
1772 linear_dst_color
[i
] = dst_color
[i
];
1776 /* Save the packed value for logic ops. Can't reuse r4
1777 * becuase other things might smash it (like sRGB)
1779 packed_dst_color
= qir_MOV(c
, r4
);
1782 struct qreg blend_color
[4];
1783 struct qreg undef_array
[4] = {
1784 c
->undef
, c
->undef
, c
->undef
, c
->undef
1786 vc4_blend(c
, blend_color
, linear_dst_color
,
1787 (c
->output_color_index
!= -1 ?
1788 c
->outputs
+ c
->output_color_index
:
1791 if (util_format_is_srgb(color_format
)) {
1792 for (int i
= 0; i
< 3; i
++)
1793 blend_color
[i
] = qir_srgb_encode(c
, blend_color
[i
]);
1796 /* If the bit isn't set in the color mask, then just return the
1797 * original dst color, instead.
1799 for (int i
= 0; i
< 4; i
++) {
1800 if (!(c
->fs_key
->blend
.colormask
& (1 << i
))) {
1801 blend_color
[i
] = dst_color
[i
];
1805 /* Debug: Sometimes you're getting a black output and just want to see
1806 * if the FS is getting executed at all. Spam magenta into the color
1810 blend_color
[0] = qir_uniform_f(c
, 1.0);
1811 blend_color
[1] = qir_uniform_f(c
, 0.0);
1812 blend_color
[2] = qir_uniform_f(c
, 1.0);
1813 blend_color
[3] = qir_uniform_f(c
, 0.5);
1816 struct qreg swizzled_outputs
[4];
1817 for (int i
= 0; i
< 4; i
++) {
1818 swizzled_outputs
[i
] = get_swizzled_channel(c
, blend_color
,
1822 if (c
->discard
.file
!= QFILE_NULL
)
1823 qir_TLB_DISCARD_SETUP(c
, c
->discard
);
1825 if (c
->fs_key
->stencil_enabled
) {
1826 qir_TLB_STENCIL_SETUP(c
, add_uniform(c
, QUNIFORM_STENCIL
, 0));
1827 if (c
->fs_key
->stencil_twoside
) {
1828 qir_TLB_STENCIL_SETUP(c
, add_uniform(c
, QUNIFORM_STENCIL
, 1));
1830 if (c
->fs_key
->stencil_full_writemasks
) {
1831 qir_TLB_STENCIL_SETUP(c
, add_uniform(c
, QUNIFORM_STENCIL
, 2));
1835 if (c
->fs_key
->depth_enabled
) {
1837 if (c
->output_position_index
!= -1) {
1838 z
= qir_FTOI(c
, qir_FMUL(c
, c
->outputs
[c
->output_position_index
+ 2],
1839 qir_uniform_f(c
, 0xffffff)));
1843 qir_TLB_Z_WRITE(c
, z
);
1846 bool color_written
= false;
1847 for (int i
= 0; i
< 4; i
++) {
1848 if (swizzled_outputs
[i
].file
!= QFILE_NULL
)
1849 color_written
= true;
1852 struct qreg packed_color
;
1853 if (color_written
) {
1854 /* Fill in any undefined colors. The simulator will assertion
1855 * fail if we read something that wasn't written, and I don't
1856 * know what hardware does.
1858 for (int i
= 0; i
< 4; i
++) {
1859 if (swizzled_outputs
[i
].file
== QFILE_NULL
)
1860 swizzled_outputs
[i
] = qir_uniform_f(c
, 0.0);
1862 packed_color
= qir_get_temp(c
);
1863 qir_emit(c
, qir_inst4(QOP_PACK_COLORS
, packed_color
,
1864 swizzled_outputs
[0],
1865 swizzled_outputs
[1],
1866 swizzled_outputs
[2],
1867 swizzled_outputs
[3]));
1869 packed_color
= qir_uniform_ui(c
, 0);
1873 if (c
->fs_key
->logicop_func
!= PIPE_LOGICOP_COPY
) {
1874 packed_color
= vc4_logicop(c
, packed_color
, packed_dst_color
);
1877 qir_emit(c
, qir_inst(QOP_TLB_COLOR_WRITE
, c
->undef
,
1878 packed_color
, c
->undef
));
1882 emit_scaled_viewport_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1886 for (int i
= 0; i
< 2; i
++) {
1888 add_uniform(c
, QUNIFORM_VIEWPORT_X_SCALE
+ i
, 0);
1890 xyi
[i
] = qir_FTOI(c
, qir_FMUL(c
,
1892 c
->outputs
[c
->output_position_index
+ i
],
1897 qir_VPM_WRITE(c
, qir_PACK_SCALED(c
, xyi
[0], xyi
[1]));
1901 emit_zs_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1903 struct qreg zscale
= add_uniform(c
, QUNIFORM_VIEWPORT_Z_SCALE
, 0);
1904 struct qreg zoffset
= add_uniform(c
, QUNIFORM_VIEWPORT_Z_OFFSET
, 0);
1906 qir_VPM_WRITE(c
, qir_FMUL(c
, qir_FADD(c
, qir_FMUL(c
,
1907 c
->outputs
[c
->output_position_index
+ 2],
1914 emit_rcp_wc_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1916 qir_VPM_WRITE(c
, rcp_w
);
1920 emit_point_size_write(struct vc4_compile
*c
)
1922 struct qreg point_size
;
1924 if (c
->output_point_size_index
)
1925 point_size
= c
->outputs
[c
->output_point_size_index
+ 3];
1927 point_size
= qir_uniform_f(c
, 1.0);
1929 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1932 point_size
= qir_FMAX(c
, point_size
, qir_uniform_f(c
, .125));
1934 qir_VPM_WRITE(c
, point_size
);
1938 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1940 * The simulator insists that there be at least one vertex attribute, so
1941 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1942 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1943 * to consume it here.
1946 emit_stub_vpm_read(struct vc4_compile
*c
)
1951 for (int i
= 0; i
< 4; i
++) {
1952 qir_emit(c
, qir_inst(QOP_VPM_READ
,
1961 emit_ucp_clipdistance(struct vc4_compile
*c
)
1964 if (c
->output_clipvertex_index
!= -1)
1965 cv
= c
->output_clipvertex_index
;
1966 else if (c
->output_position_index
!= -1)
1967 cv
= c
->output_position_index
;
1971 for (int plane
= 0; plane
< PIPE_MAX_CLIP_PLANES
; plane
++) {
1972 if (!(c
->key
->ucp_enables
& (1 << plane
)))
1975 /* Pick the next outputs[] that hasn't been written to, since
1976 * there are no other program writes left to be processed at
1977 * this point. If something had been declared but not written
1978 * (like a w component), we'll just smash over the top of it.
1980 uint32_t output_index
= c
->num_outputs
++;
1981 add_output(c
, output_index
,
1982 TGSI_SEMANTIC_CLIPDIST
,
1987 struct qreg dist
= qir_uniform_f(c
, 0.0);
1988 for (int i
= 0; i
< 4; i
++) {
1989 struct qreg pos_chan
= c
->outputs
[cv
+ i
];
1991 add_uniform(c
, QUNIFORM_USER_CLIP_PLANE
,
1993 dist
= qir_FADD(c
, dist
, qir_FMUL(c
, pos_chan
, ucp
));
1996 c
->outputs
[output_index
] = dist
;
2001 emit_vert_end(struct vc4_compile
*c
,
2002 struct vc4_varying_semantic
*fs_inputs
,
2003 uint32_t num_fs_inputs
)
2005 struct qreg rcp_w
= qir_RCP(c
, c
->outputs
[c
->output_position_index
+ 3]);
2007 emit_stub_vpm_read(c
);
2008 emit_ucp_clipdistance(c
);
2010 emit_scaled_viewport_write(c
, rcp_w
);
2011 emit_zs_write(c
, rcp_w
);
2012 emit_rcp_wc_write(c
, rcp_w
);
2013 if (c
->vs_key
->per_vertex_point_size
)
2014 emit_point_size_write(c
);
2016 for (int i
= 0; i
< num_fs_inputs
; i
++) {
2017 struct vc4_varying_semantic
*input
= &fs_inputs
[i
];
2020 for (j
= 0; j
< c
->num_outputs
; j
++) {
2021 struct vc4_varying_semantic
*output
=
2022 &c
->output_semantics
[j
];
2024 if (input
->semantic
== output
->semantic
&&
2025 input
->index
== output
->index
&&
2026 input
->swizzle
== output
->swizzle
) {
2027 qir_VPM_WRITE(c
, c
->outputs
[j
]);
2031 /* Emit padding if we didn't find a declared VS output for
2034 if (j
== c
->num_outputs
)
2035 qir_VPM_WRITE(c
, qir_uniform_f(c
, 0.0));
2040 emit_coord_end(struct vc4_compile
*c
)
2042 struct qreg rcp_w
= qir_RCP(c
, c
->outputs
[c
->output_position_index
+ 3]);
2044 emit_stub_vpm_read(c
);
2046 for (int i
= 0; i
< 4; i
++)
2047 qir_VPM_WRITE(c
, c
->outputs
[c
->output_position_index
+ i
]);
2049 emit_scaled_viewport_write(c
, rcp_w
);
2050 emit_zs_write(c
, rcp_w
);
2051 emit_rcp_wc_write(c
, rcp_w
);
2052 if (c
->vs_key
->per_vertex_point_size
)
2053 emit_point_size_write(c
);
2056 static struct vc4_compile
*
2057 vc4_shader_tgsi_to_qir(struct vc4_context
*vc4
, enum qstage stage
,
2058 struct vc4_key
*key
)
2060 struct vc4_compile
*c
= qir_compile_init();
2064 for (int i
= 0; i
< 4; i
++)
2065 c
->addr
[i
] = qir_uniform_f(c
, 0.0);
2067 c
->shader_state
= &key
->shader_state
->base
;
2068 c
->program_id
= key
->shader_state
->program_id
;
2069 c
->variant_id
= key
->shader_state
->compiled_variant_count
++;
2074 c
->fs_key
= (struct vc4_fs_key
*)key
;
2075 if (c
->fs_key
->is_points
) {
2076 c
->point_x
= emit_fragment_varying(c
, ~0, ~0, 0);
2077 c
->point_y
= emit_fragment_varying(c
, ~0, ~0, 0);
2078 } else if (c
->fs_key
->is_lines
) {
2079 c
->line_x
= emit_fragment_varying(c
, ~0, ~0, 0);
2083 c
->vs_key
= (struct vc4_vs_key
*)key
;
2086 c
->vs_key
= (struct vc4_vs_key
*)key
;
2090 const struct tgsi_token
*tokens
= key
->shader_state
->base
.tokens
;
2091 if (c
->fs_key
&& c
->fs_key
->light_twoside
) {
2092 if (!key
->shader_state
->twoside_tokens
) {
2093 const struct tgsi_lowering_config lowering_config
= {
2094 .color_two_side
= true,
2096 struct tgsi_shader_info info
;
2097 key
->shader_state
->twoside_tokens
=
2098 tgsi_transform_lowering(&lowering_config
,
2099 key
->shader_state
->base
.tokens
,
2102 /* If no transformation occurred, then NULL is
2103 * returned and we just use our original tokens.
2105 if (!key
->shader_state
->twoside_tokens
) {
2106 key
->shader_state
->twoside_tokens
=
2107 key
->shader_state
->base
.tokens
;
2110 tokens
= key
->shader_state
->twoside_tokens
;
2113 ret
= tgsi_parse_init(&c
->parser
, tokens
);
2114 assert(ret
== TGSI_PARSE_OK
);
2116 if (vc4_debug
& VC4_DEBUG_TGSI
) {
2117 fprintf(stderr
, "%s prog %d/%d TGSI:\n",
2118 qir_get_stage_name(c
->stage
),
2119 c
->program_id
, c
->variant_id
);
2120 tgsi_dump(tokens
, 0);
2123 while (!tgsi_parse_end_of_tokens(&c
->parser
)) {
2124 tgsi_parse_token(&c
->parser
);
2126 switch (c
->parser
.FullToken
.Token
.Type
) {
2127 case TGSI_TOKEN_TYPE_DECLARATION
:
2128 emit_tgsi_declaration(c
,
2129 &c
->parser
.FullToken
.FullDeclaration
);
2132 case TGSI_TOKEN_TYPE_INSTRUCTION
:
2133 emit_tgsi_instruction(c
,
2134 &c
->parser
.FullToken
.FullInstruction
);
2137 case TGSI_TOKEN_TYPE_IMMEDIATE
:
2138 parse_tgsi_immediate(c
,
2139 &c
->parser
.FullToken
.FullImmediate
);
2150 vc4
->prog
.fs
->input_semantics
,
2151 vc4
->prog
.fs
->num_inputs
);
2158 tgsi_parse_free(&c
->parser
);
2162 if (vc4_debug
& VC4_DEBUG_QIR
) {
2163 fprintf(stderr
, "%s prog %d/%d QIR:\n",
2164 qir_get_stage_name(c
->stage
),
2165 c
->program_id
, c
->variant_id
);
2168 qir_reorder_uniforms(c
);
2169 vc4_generate_code(vc4
, c
);
2171 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
2172 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d instructions\n",
2173 qir_get_stage_name(c
->stage
),
2174 c
->program_id
, c
->variant_id
,
2176 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
2177 qir_get_stage_name(c
->stage
),
2178 c
->program_id
, c
->variant_id
,
2186 vc4_shader_state_create(struct pipe_context
*pctx
,
2187 const struct pipe_shader_state
*cso
)
2189 struct vc4_context
*vc4
= vc4_context(pctx
);
2190 struct vc4_uncompiled_shader
*so
= CALLOC_STRUCT(vc4_uncompiled_shader
);
2194 const struct tgsi_lowering_config lowering_config
= {
2209 struct tgsi_shader_info info
;
2210 so
->base
.tokens
= tgsi_transform_lowering(&lowering_config
, cso
->tokens
, &info
);
2211 if (!so
->base
.tokens
)
2212 so
->base
.tokens
= tgsi_dup_tokens(cso
->tokens
);
2213 so
->program_id
= vc4
->next_uncompiled_program_id
++;
2219 copy_uniform_state_to_shader(struct vc4_compiled_shader
*shader
,
2220 struct vc4_compile
*c
)
2222 int count
= c
->num_uniforms
;
2223 struct vc4_shader_uniform_info
*uinfo
= &shader
->uniforms
;
2225 uinfo
->count
= count
;
2226 uinfo
->data
= ralloc_array(shader
, uint32_t, count
);
2227 memcpy(uinfo
->data
, c
->uniform_data
,
2228 count
* sizeof(*uinfo
->data
));
2229 uinfo
->contents
= ralloc_array(shader
, enum quniform_contents
, count
);
2230 memcpy(uinfo
->contents
, c
->uniform_contents
,
2231 count
* sizeof(*uinfo
->contents
));
2232 uinfo
->num_texture_samples
= c
->num_texture_samples
;
2235 static struct vc4_compiled_shader
*
2236 vc4_get_compiled_shader(struct vc4_context
*vc4
, enum qstage stage
,
2237 struct vc4_key
*key
)
2239 struct hash_table
*ht
;
2241 if (stage
== QSTAGE_FRAG
) {
2243 key_size
= sizeof(struct vc4_fs_key
);
2246 key_size
= sizeof(struct vc4_vs_key
);
2249 struct vc4_compiled_shader
*shader
;
2250 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, key
);
2254 struct vc4_compile
*c
= vc4_shader_tgsi_to_qir(vc4
, stage
, key
);
2255 shader
= rzalloc(NULL
, struct vc4_compiled_shader
);
2257 shader
->program_id
= vc4
->next_compiled_program_id
++;
2258 if (stage
== QSTAGE_FRAG
) {
2259 bool input_live
[c
->num_input_semantics
];
2260 struct simple_node
*node
;
2262 memset(input_live
, 0, sizeof(input_live
));
2263 foreach(node
, &c
->instructions
) {
2264 struct qinst
*inst
= (struct qinst
*)node
;
2265 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
2266 if (inst
->src
[i
].file
== QFILE_VARY
)
2267 input_live
[inst
->src
[i
].index
] = true;
2271 shader
->input_semantics
= ralloc_array(shader
,
2272 struct vc4_varying_semantic
,
2273 c
->num_input_semantics
);
2275 for (int i
= 0; i
< c
->num_input_semantics
; i
++) {
2276 struct vc4_varying_semantic
*sem
= &c
->input_semantics
[i
];
2281 /* Skip non-VS-output inputs. */
2282 if (sem
->semantic
== (uint8_t)~0)
2285 if (sem
->semantic
== TGSI_SEMANTIC_COLOR
||
2286 sem
->semantic
== TGSI_SEMANTIC_BCOLOR
) {
2287 shader
->color_inputs
|= (1 << shader
->num_inputs
);
2290 shader
->input_semantics
[shader
->num_inputs
] = *sem
;
2291 shader
->num_inputs
++;
2294 shader
->num_inputs
= c
->num_inputs
;
2297 copy_uniform_state_to_shader(shader
, c
);
2298 shader
->bo
= vc4_bo_alloc_mem(vc4
->screen
, c
->qpu_insts
,
2299 c
->qpu_inst_count
* sizeof(uint64_t),
2302 /* Copy the compiler UBO range state to the compiled shader, dropping
2303 * out arrays that were never referenced by an indirect load.
2305 * (Note that QIR dead code elimination of an array access still
2306 * leaves that array alive, though)
2308 if (c
->num_ubo_ranges
) {
2309 shader
->num_ubo_ranges
= c
->num_ubo_ranges
;
2310 shader
->ubo_ranges
= ralloc_array(shader
, struct vc4_ubo_range
,
2313 for (int i
= 0; i
< c
->ubo_ranges_array_size
; i
++) {
2314 struct vc4_compiler_ubo_range
*range
=
2319 shader
->ubo_ranges
[j
].dst_offset
= range
->dst_offset
;
2320 shader
->ubo_ranges
[j
].src_offset
= range
->src_offset
;
2321 shader
->ubo_ranges
[j
].size
= range
->size
;
2322 shader
->ubo_size
+= c
->ubo_ranges
[i
].size
;
2327 qir_compile_destroy(c
);
2329 struct vc4_key
*dup_key
;
2330 dup_key
= ralloc_size(shader
, key_size
);
2331 memcpy(dup_key
, key
, key_size
);
2332 _mesa_hash_table_insert(ht
, dup_key
, shader
);
2338 vc4_setup_shared_key(struct vc4_context
*vc4
, struct vc4_key
*key
,
2339 struct vc4_texture_stateobj
*texstate
)
2341 for (int i
= 0; i
< texstate
->num_textures
; i
++) {
2342 struct pipe_sampler_view
*sampler
= texstate
->textures
[i
];
2343 struct pipe_sampler_state
*sampler_state
=
2344 texstate
->samplers
[i
];
2347 key
->tex
[i
].format
= sampler
->format
;
2348 key
->tex
[i
].swizzle
[0] = sampler
->swizzle_r
;
2349 key
->tex
[i
].swizzle
[1] = sampler
->swizzle_g
;
2350 key
->tex
[i
].swizzle
[2] = sampler
->swizzle_b
;
2351 key
->tex
[i
].swizzle
[3] = sampler
->swizzle_a
;
2352 key
->tex
[i
].compare_mode
= sampler_state
->compare_mode
;
2353 key
->tex
[i
].compare_func
= sampler_state
->compare_func
;
2354 key
->tex
[i
].wrap_s
= sampler_state
->wrap_s
;
2355 key
->tex
[i
].wrap_t
= sampler_state
->wrap_t
;
2359 key
->ucp_enables
= vc4
->rasterizer
->base
.clip_plane_enable
;
2363 vc4_update_compiled_fs(struct vc4_context
*vc4
, uint8_t prim_mode
)
2365 struct vc4_fs_key local_key
;
2366 struct vc4_fs_key
*key
= &local_key
;
2368 if (!(vc4
->dirty
& (VC4_DIRTY_PRIM_MODE
|
2370 VC4_DIRTY_FRAMEBUFFER
|
2372 VC4_DIRTY_RASTERIZER
|
2374 VC4_DIRTY_TEXSTATE
|
2379 memset(key
, 0, sizeof(*key
));
2380 vc4_setup_shared_key(vc4
, &key
->base
, &vc4
->fragtex
);
2381 key
->base
.shader_state
= vc4
->prog
.bind_fs
;
2382 key
->is_points
= (prim_mode
== PIPE_PRIM_POINTS
);
2383 key
->is_lines
= (prim_mode
>= PIPE_PRIM_LINES
&&
2384 prim_mode
<= PIPE_PRIM_LINE_STRIP
);
2385 key
->blend
= vc4
->blend
->rt
[0];
2386 if (vc4
->blend
->logicop_enable
) {
2387 key
->logicop_func
= vc4
->blend
->logicop_func
;
2389 key
->logicop_func
= PIPE_LOGICOP_COPY
;
2391 if (vc4
->framebuffer
.cbufs
[0])
2392 key
->color_format
= vc4
->framebuffer
.cbufs
[0]->format
;
2394 key
->stencil_enabled
= vc4
->zsa
->stencil_uniforms
[0] != 0;
2395 key
->stencil_twoside
= vc4
->zsa
->stencil_uniforms
[1] != 0;
2396 key
->stencil_full_writemasks
= vc4
->zsa
->stencil_uniforms
[2] != 0;
2397 key
->depth_enabled
= (vc4
->zsa
->base
.depth
.enabled
||
2398 key
->stencil_enabled
);
2399 if (vc4
->zsa
->base
.alpha
.enabled
) {
2400 key
->alpha_test
= true;
2401 key
->alpha_test_func
= vc4
->zsa
->base
.alpha
.func
;
2404 if (key
->is_points
) {
2405 key
->point_sprite_mask
=
2406 vc4
->rasterizer
->base
.sprite_coord_enable
;
2407 key
->point_coord_upper_left
=
2408 (vc4
->rasterizer
->base
.sprite_coord_mode
==
2409 PIPE_SPRITE_COORD_UPPER_LEFT
);
2412 key
->light_twoside
= vc4
->rasterizer
->base
.light_twoside
;
2414 struct vc4_compiled_shader
*old_fs
= vc4
->prog
.fs
;
2415 vc4
->prog
.fs
= vc4_get_compiled_shader(vc4
, QSTAGE_FRAG
, &key
->base
);
2416 if (vc4
->prog
.fs
== old_fs
)
2419 if (vc4
->rasterizer
->base
.flatshade
&&
2420 old_fs
&& vc4
->prog
.fs
->color_inputs
!= old_fs
->color_inputs
) {
2421 vc4
->dirty
|= VC4_DIRTY_FLAT_SHADE_FLAGS
;
2426 vc4_update_compiled_vs(struct vc4_context
*vc4
, uint8_t prim_mode
)
2428 struct vc4_vs_key local_key
;
2429 struct vc4_vs_key
*key
= &local_key
;
2431 if (!(vc4
->dirty
& (VC4_DIRTY_PRIM_MODE
|
2432 VC4_DIRTY_RASTERIZER
|
2434 VC4_DIRTY_TEXSTATE
|
2435 VC4_DIRTY_VTXSTATE
|
2440 memset(key
, 0, sizeof(*key
));
2441 vc4_setup_shared_key(vc4
, &key
->base
, &vc4
->verttex
);
2442 key
->base
.shader_state
= vc4
->prog
.bind_vs
;
2443 key
->compiled_fs_id
= vc4
->prog
.fs
->program_id
;
2445 for (int i
= 0; i
< ARRAY_SIZE(key
->attr_formats
); i
++)
2446 key
->attr_formats
[i
] = vc4
->vtx
->pipe
[i
].src_format
;
2448 key
->per_vertex_point_size
=
2449 (prim_mode
== PIPE_PRIM_POINTS
&&
2450 vc4
->rasterizer
->base
.point_size_per_vertex
);
2452 vc4
->prog
.vs
= vc4_get_compiled_shader(vc4
, QSTAGE_VERT
, &key
->base
);
2453 key
->is_coord
= true;
2454 vc4
->prog
.cs
= vc4_get_compiled_shader(vc4
, QSTAGE_COORD
, &key
->base
);
2458 vc4_update_compiled_shaders(struct vc4_context
*vc4
, uint8_t prim_mode
)
2460 vc4_update_compiled_fs(vc4
, prim_mode
);
2461 vc4_update_compiled_vs(vc4
, prim_mode
);
2465 fs_cache_hash(const void *key
)
2467 return _mesa_hash_data(key
, sizeof(struct vc4_fs_key
));
2471 vs_cache_hash(const void *key
)
2473 return _mesa_hash_data(key
, sizeof(struct vc4_vs_key
));
2477 fs_cache_compare(const void *key1
, const void *key2
)
2479 return memcmp(key1
, key2
, sizeof(struct vc4_fs_key
)) == 0;
2483 vs_cache_compare(const void *key1
, const void *key2
)
2485 return memcmp(key1
, key2
, sizeof(struct vc4_vs_key
)) == 0;
2489 delete_from_cache_if_matches(struct hash_table
*ht
,
2490 struct hash_entry
*entry
,
2491 struct vc4_uncompiled_shader
*so
)
2493 struct vc4_key
*key
= entry
->data
;
2495 if (key
->shader_state
== so
) {
2496 struct vc4_compiled_shader
*shader
= entry
->data
;
2497 _mesa_hash_table_remove(ht
, entry
);
2498 vc4_bo_unreference(&shader
->bo
);
2499 ralloc_free(shader
);
2504 vc4_shader_state_delete(struct pipe_context
*pctx
, void *hwcso
)
2506 struct vc4_context
*vc4
= vc4_context(pctx
);
2507 struct vc4_uncompiled_shader
*so
= hwcso
;
2509 struct hash_entry
*entry
;
2510 hash_table_foreach(vc4
->fs_cache
, entry
)
2511 delete_from_cache_if_matches(vc4
->fs_cache
, entry
, so
);
2512 hash_table_foreach(vc4
->vs_cache
, entry
)
2513 delete_from_cache_if_matches(vc4
->vs_cache
, entry
, so
);
2515 if (so
->twoside_tokens
!= so
->base
.tokens
)
2516 free((void *)so
->twoside_tokens
);
2517 free((void *)so
->base
.tokens
);
2521 static uint32_t translate_wrap(uint32_t p_wrap
, bool using_nearest
)
2524 case PIPE_TEX_WRAP_REPEAT
:
2526 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
2528 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
2530 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
2532 case PIPE_TEX_WRAP_CLAMP
:
2533 return (using_nearest
? 1 : 3);
2535 fprintf(stderr
, "Unknown wrap mode %d\n", p_wrap
);
2536 assert(!"not reached");
2542 write_texture_p0(struct vc4_context
*vc4
,
2543 struct vc4_texture_stateobj
*texstate
,
2546 struct pipe_sampler_view
*texture
= texstate
->textures
[unit
];
2547 struct vc4_resource
*rsc
= vc4_resource(texture
->texture
);
2549 cl_reloc(vc4
, &vc4
->uniforms
, rsc
->bo
,
2550 VC4_SET_FIELD(rsc
->slices
[0].offset
>> 12, VC4_TEX_P0_OFFSET
) |
2551 VC4_SET_FIELD(texture
->u
.tex
.last_level
-
2552 texture
->u
.tex
.first_level
, VC4_TEX_P0_MIPLVLS
) |
2553 VC4_SET_FIELD(texture
->target
== PIPE_TEXTURE_CUBE
,
2554 VC4_TEX_P0_CMMODE
) |
2555 VC4_SET_FIELD(rsc
->vc4_format
& 7, VC4_TEX_P0_TYPE
));
2559 write_texture_p1(struct vc4_context
*vc4
,
2560 struct vc4_texture_stateobj
*texstate
,
2563 struct pipe_sampler_view
*texture
= texstate
->textures
[unit
];
2564 struct vc4_resource
*rsc
= vc4_resource(texture
->texture
);
2565 struct pipe_sampler_state
*sampler
= texstate
->samplers
[unit
];
2566 static const uint8_t minfilter_map
[6] = {
2567 VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR
,
2568 VC4_TEX_P1_MINFILT_LIN_MIP_NEAR
,
2569 VC4_TEX_P1_MINFILT_NEAR_MIP_LIN
,
2570 VC4_TEX_P1_MINFILT_LIN_MIP_LIN
,
2571 VC4_TEX_P1_MINFILT_NEAREST
,
2572 VC4_TEX_P1_MINFILT_LINEAR
,
2574 static const uint32_t magfilter_map
[] = {
2575 [PIPE_TEX_FILTER_NEAREST
] = VC4_TEX_P1_MAGFILT_NEAREST
,
2576 [PIPE_TEX_FILTER_LINEAR
] = VC4_TEX_P1_MAGFILT_LINEAR
,
2579 bool either_nearest
=
2580 (sampler
->mag_img_filter
== PIPE_TEX_MIPFILTER_NEAREST
||
2581 sampler
->min_img_filter
== PIPE_TEX_MIPFILTER_NEAREST
);
2583 cl_u32(&vc4
->uniforms
,
2584 VC4_SET_FIELD(rsc
->vc4_format
>> 4, VC4_TEX_P1_TYPE4
) |
2585 VC4_SET_FIELD(texture
->texture
->height0
& 2047,
2586 VC4_TEX_P1_HEIGHT
) |
2587 VC4_SET_FIELD(texture
->texture
->width0
& 2047,
2589 VC4_SET_FIELD(magfilter_map
[sampler
->mag_img_filter
],
2590 VC4_TEX_P1_MAGFILT
) |
2591 VC4_SET_FIELD(minfilter_map
[sampler
->min_mip_filter
* 2 +
2592 sampler
->min_img_filter
],
2593 VC4_TEX_P1_MINFILT
) |
2594 VC4_SET_FIELD(translate_wrap(sampler
->wrap_s
, either_nearest
),
2595 VC4_TEX_P1_WRAP_S
) |
2596 VC4_SET_FIELD(translate_wrap(sampler
->wrap_t
, either_nearest
),
2597 VC4_TEX_P1_WRAP_T
));
2601 write_texture_p2(struct vc4_context
*vc4
,
2602 struct vc4_texture_stateobj
*texstate
,
2605 uint32_t unit
= data
& 0xffff;
2606 struct pipe_sampler_view
*texture
= texstate
->textures
[unit
];
2607 struct vc4_resource
*rsc
= vc4_resource(texture
->texture
);
2609 cl_u32(&vc4
->uniforms
,
2610 VC4_SET_FIELD(VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE
,
2612 VC4_SET_FIELD(rsc
->cube_map_stride
>> 12, VC4_TEX_P2_CMST
) |
2613 VC4_SET_FIELD((data
>> 16) & 1, VC4_TEX_P2_BSLOD
));
2617 #define SWIZ(x,y,z,w) { \
2618 UTIL_FORMAT_SWIZZLE_##x, \
2619 UTIL_FORMAT_SWIZZLE_##y, \
2620 UTIL_FORMAT_SWIZZLE_##z, \
2621 UTIL_FORMAT_SWIZZLE_##w \
2625 write_texture_border_color(struct vc4_context
*vc4
,
2626 struct vc4_texture_stateobj
*texstate
,
2629 struct pipe_sampler_state
*sampler
= texstate
->samplers
[unit
];
2630 struct pipe_sampler_view
*texture
= texstate
->textures
[unit
];
2631 struct vc4_resource
*rsc
= vc4_resource(texture
->texture
);
2632 union util_color uc
;
2634 const struct util_format_description
*tex_format_desc
=
2635 util_format_description(texture
->format
);
2637 float border_color
[4];
2638 for (int i
= 0; i
< 4; i
++)
2639 border_color
[i
] = sampler
->border_color
.f
[i
];
2640 if (util_format_is_srgb(texture
->format
)) {
2641 for (int i
= 0; i
< 3; i
++)
2643 util_format_linear_to_srgb_float(border_color
[i
]);
2646 /* Turn the border color into the layout of channels that it would
2647 * have when stored as texture contents.
2649 float storage_color
[4];
2650 util_format_unswizzle_4f(storage_color
,
2652 tex_format_desc
->swizzle
);
2654 /* Now, pack so that when the vc4_format-sampled texture contents are
2655 * replaced with our border color, the vc4_get_format_swizzle()
2656 * swizzling will get the right channels.
2658 if (util_format_is_depth_or_stencil(texture
->format
)) {
2659 uc
.ui
[0] = util_pack_z(PIPE_FORMAT_Z24X8_UNORM
,
2660 sampler
->border_color
.f
[0]) << 8;
2662 switch (rsc
->vc4_format
) {
2664 case VC4_TEXTURE_TYPE_RGBA8888
:
2665 util_pack_color(storage_color
,
2666 PIPE_FORMAT_R8G8B8A8_UNORM
, &uc
);
2668 case VC4_TEXTURE_TYPE_RGBA4444
:
2669 util_pack_color(storage_color
,
2670 PIPE_FORMAT_A8B8G8R8_UNORM
, &uc
);
2672 case VC4_TEXTURE_TYPE_RGB565
:
2673 util_pack_color(storage_color
,
2674 PIPE_FORMAT_B8G8R8A8_UNORM
, &uc
);
2676 case VC4_TEXTURE_TYPE_ALPHA
:
2677 uc
.ui
[0] = float_to_ubyte(storage_color
[0]) << 24;
2679 case VC4_TEXTURE_TYPE_LUMALPHA
:
2680 uc
.ui
[0] = ((float_to_ubyte(storage_color
[1]) << 24) |
2681 (float_to_ubyte(storage_color
[0]) << 0));
2686 cl_u32(&vc4
->uniforms
, uc
.ui
[0]);
2690 get_texrect_scale(struct vc4_texture_stateobj
*texstate
,
2691 enum quniform_contents contents
,
2694 struct pipe_sampler_view
*texture
= texstate
->textures
[data
];
2697 if (contents
== QUNIFORM_TEXRECT_SCALE_X
)
2698 dim
= texture
->texture
->width0
;
2700 dim
= texture
->texture
->height0
;
2702 return fui(1.0f
/ dim
);
2705 static struct vc4_bo
*
2706 vc4_upload_ubo(struct vc4_context
*vc4
, struct vc4_compiled_shader
*shader
,
2707 const uint32_t *gallium_uniforms
)
2709 if (!shader
->ubo_size
)
2712 struct vc4_bo
*ubo
= vc4_bo_alloc(vc4
->screen
, shader
->ubo_size
, "ubo");
2713 uint32_t *data
= vc4_bo_map(ubo
);
2714 for (uint32_t i
= 0; i
< shader
->num_ubo_ranges
; i
++) {
2715 memcpy(data
+ shader
->ubo_ranges
[i
].dst_offset
,
2716 gallium_uniforms
+ shader
->ubo_ranges
[i
].src_offset
,
2717 shader
->ubo_ranges
[i
].size
);
2724 vc4_write_uniforms(struct vc4_context
*vc4
, struct vc4_compiled_shader
*shader
,
2725 struct vc4_constbuf_stateobj
*cb
,
2726 struct vc4_texture_stateobj
*texstate
)
2728 struct vc4_shader_uniform_info
*uinfo
= &shader
->uniforms
;
2729 const uint32_t *gallium_uniforms
= cb
->cb
[0].user_buffer
;
2730 struct vc4_bo
*ubo
= vc4_upload_ubo(vc4
, shader
, gallium_uniforms
);
2732 cl_ensure_space(&vc4
->uniforms
, (uinfo
->count
+
2733 uinfo
->num_texture_samples
) * 4);
2735 cl_start_shader_reloc(&vc4
->uniforms
, uinfo
->num_texture_samples
);
2737 for (int i
= 0; i
< uinfo
->count
; i
++) {
2739 switch (uinfo
->contents
[i
]) {
2740 case QUNIFORM_CONSTANT
:
2741 cl_u32(&vc4
->uniforms
, uinfo
->data
[i
]);
2743 case QUNIFORM_UNIFORM
:
2744 cl_u32(&vc4
->uniforms
,
2745 gallium_uniforms
[uinfo
->data
[i
]]);
2747 case QUNIFORM_VIEWPORT_X_SCALE
:
2748 cl_f(&vc4
->uniforms
, vc4
->viewport
.scale
[0] * 16.0f
);
2750 case QUNIFORM_VIEWPORT_Y_SCALE
:
2751 cl_f(&vc4
->uniforms
, vc4
->viewport
.scale
[1] * 16.0f
);
2754 case QUNIFORM_VIEWPORT_Z_OFFSET
:
2755 cl_f(&vc4
->uniforms
, vc4
->viewport
.translate
[2]);
2757 case QUNIFORM_VIEWPORT_Z_SCALE
:
2758 cl_f(&vc4
->uniforms
, vc4
->viewport
.scale
[2]);
2761 case QUNIFORM_USER_CLIP_PLANE
:
2762 cl_f(&vc4
->uniforms
,
2763 vc4
->clip
.ucp
[uinfo
->data
[i
] / 4][uinfo
->data
[i
] % 4]);
2766 case QUNIFORM_TEXTURE_CONFIG_P0
:
2767 write_texture_p0(vc4
, texstate
, uinfo
->data
[i
]);
2770 case QUNIFORM_TEXTURE_CONFIG_P1
:
2771 write_texture_p1(vc4
, texstate
, uinfo
->data
[i
]);
2774 case QUNIFORM_TEXTURE_CONFIG_P2
:
2775 write_texture_p2(vc4
, texstate
, uinfo
->data
[i
]);
2778 case QUNIFORM_UBO_ADDR
:
2779 cl_reloc(vc4
, &vc4
->uniforms
, ubo
, 0);
2782 case QUNIFORM_TEXTURE_BORDER_COLOR
:
2783 write_texture_border_color(vc4
, texstate
, uinfo
->data
[i
]);
2786 case QUNIFORM_TEXRECT_SCALE_X
:
2787 case QUNIFORM_TEXRECT_SCALE_Y
:
2788 cl_u32(&vc4
->uniforms
,
2789 get_texrect_scale(texstate
,
2794 case QUNIFORM_BLEND_CONST_COLOR
:
2795 cl_f(&vc4
->uniforms
,
2796 vc4
->blend_color
.color
[uinfo
->data
[i
]]);
2799 case QUNIFORM_STENCIL
:
2800 cl_u32(&vc4
->uniforms
,
2801 vc4
->zsa
->stencil_uniforms
[uinfo
->data
[i
]] |
2802 (uinfo
->data
[i
] <= 1 ?
2803 (vc4
->stencil_ref
.ref_value
[uinfo
->data
[i
]] << 8) :
2807 case QUNIFORM_ALPHA_REF
:
2808 cl_f(&vc4
->uniforms
, vc4
->zsa
->base
.alpha
.ref_value
);
2812 uint32_t written_val
= *(uint32_t *)(vc4
->uniforms
.next
- 4);
2813 fprintf(stderr
, "%p: %d / 0x%08x (%f)\n",
2814 shader
, i
, written_val
, uif(written_val
));
2820 vc4_fp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
2822 struct vc4_context
*vc4
= vc4_context(pctx
);
2823 vc4
->prog
.bind_fs
= hwcso
;
2824 vc4
->prog
.dirty
|= VC4_SHADER_DIRTY_FP
;
2825 vc4
->dirty
|= VC4_DIRTY_PROG
;
2829 vc4_vp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
2831 struct vc4_context
*vc4
= vc4_context(pctx
);
2832 vc4
->prog
.bind_vs
= hwcso
;
2833 vc4
->prog
.dirty
|= VC4_SHADER_DIRTY_VP
;
2834 vc4
->dirty
|= VC4_DIRTY_PROG
;
2838 vc4_program_init(struct pipe_context
*pctx
)
2840 struct vc4_context
*vc4
= vc4_context(pctx
);
2842 pctx
->create_vs_state
= vc4_shader_state_create
;
2843 pctx
->delete_vs_state
= vc4_shader_state_delete
;
2845 pctx
->create_fs_state
= vc4_shader_state_create
;
2846 pctx
->delete_fs_state
= vc4_shader_state_delete
;
2848 pctx
->bind_fs_state
= vc4_fp_state_bind
;
2849 pctx
->bind_vs_state
= vc4_vp_state_bind
;
2851 vc4
->fs_cache
= _mesa_hash_table_create(pctx
, fs_cache_hash
,
2853 vc4
->vs_cache
= _mesa_hash_table_create(pctx
, vs_cache_hash
,
2858 vc4_program_fini(struct pipe_context
*pctx
)
2860 struct vc4_context
*vc4
= vc4_context(pctx
);
2862 struct hash_entry
*entry
;
2863 hash_table_foreach(vc4
->fs_cache
, entry
) {
2864 struct vc4_compiled_shader
*shader
= entry
->data
;
2865 vc4_bo_unreference(&shader
->bo
);
2866 ralloc_free(shader
);
2867 _mesa_hash_table_remove(vc4
->fs_cache
, entry
);
2870 hash_table_foreach(vc4
->vs_cache
, entry
) {
2871 struct vc4_compiled_shader
*shader
= entry
->data
;
2872 vc4_bo_unreference(&shader
->bo
);
2873 ralloc_free(shader
);
2874 _mesa_hash_table_remove(vc4
->vs_cache
, entry
);