2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "pipe/p_state.h"
27 #include "util/u_format.h"
28 #include "util/u_hash.h"
29 #include "util/u_memory.h"
30 #include "util/u_pack_color.h"
31 #include "util/format_srgb.h"
32 #include "util/ralloc.h"
33 #include "util/hash_table.h"
34 #include "tgsi/tgsi_dump.h"
35 #include "tgsi/tgsi_info.h"
36 #include "tgsi/tgsi_lowering.h"
38 #include "vc4_context.h"
41 #ifdef USE_VC4_SIMULATOR
42 #include "simpenrose/simpenrose.h"
46 struct vc4_uncompiled_shader
*shader_state
;
48 enum pipe_format format
;
49 unsigned compare_mode
:1;
50 unsigned compare_func
:3;
54 } tex
[VC4_MAX_TEXTURE_SAMPLERS
];
60 enum pipe_format color_format
;
64 bool stencil_full_writemasks
;
68 bool point_coord_upper_left
;
70 uint8_t alpha_test_func
;
72 uint32_t point_sprite_mask
;
74 struct pipe_rt_blend_state blend
;
81 * This is a proxy for the array of FS input semantics, which is
82 * larger than we would want to put in the key.
84 uint64_t compiled_fs_id
;
86 enum pipe_format attr_formats
[8];
88 bool per_vertex_point_size
;
92 resize_qreg_array(struct vc4_compile
*c
,
97 if (*size
>= decl_size
)
100 uint32_t old_size
= *size
;
101 *size
= MAX2(*size
* 2, decl_size
);
102 *regs
= reralloc(c
, *regs
, struct qreg
, *size
);
104 fprintf(stderr
, "Malloc failure\n");
108 for (uint32_t i
= old_size
; i
< *size
; i
++)
109 (*regs
)[i
] = c
->undef
;
113 add_uniform(struct vc4_compile
*c
,
114 enum quniform_contents contents
,
117 for (int i
= 0; i
< c
->num_uniforms
; i
++) {
118 if (c
->uniform_contents
[i
] == contents
&&
119 c
->uniform_data
[i
] == data
) {
120 return (struct qreg
) { QFILE_UNIF
, i
};
124 uint32_t uniform
= c
->num_uniforms
++;
125 struct qreg u
= { QFILE_UNIF
, uniform
};
127 if (uniform
>= c
->uniform_array_size
) {
128 c
->uniform_array_size
= MAX2(MAX2(16, uniform
+ 1),
129 c
->uniform_array_size
* 2);
131 c
->uniform_data
= reralloc(c
, c
->uniform_data
,
133 c
->uniform_array_size
);
134 c
->uniform_contents
= reralloc(c
, c
->uniform_contents
,
135 enum quniform_contents
,
136 c
->uniform_array_size
);
139 c
->uniform_contents
[uniform
] = contents
;
140 c
->uniform_data
[uniform
] = data
;
146 get_temp_for_uniform(struct vc4_compile
*c
, enum quniform_contents contents
,
149 struct qreg u
= add_uniform(c
, contents
, data
);
150 struct qreg t
= qir_MOV(c
, u
);
155 qir_uniform_ui(struct vc4_compile
*c
, uint32_t ui
)
157 return get_temp_for_uniform(c
, QUNIFORM_CONSTANT
, ui
);
161 qir_uniform_f(struct vc4_compile
*c
, float f
)
163 return qir_uniform_ui(c
, fui(f
));
167 indirect_uniform_load(struct vc4_compile
*c
,
168 struct tgsi_full_src_register
*src
, int swiz
)
170 struct tgsi_ind_register
*indirect
= &src
->Indirect
;
171 struct vc4_compiler_ubo_range
*range
= &c
->ubo_ranges
[indirect
->ArrayID
];
174 range
->dst_offset
= c
->next_ubo_dst_offset
;
175 c
->next_ubo_dst_offset
+= range
->size
;
179 assert(src
->Register
.Indirect
);
180 assert(indirect
->File
== TGSI_FILE_ADDRESS
);
182 struct qreg addr_val
= c
->addr
[indirect
->Swizzle
];
183 struct qreg indirect_offset
=
184 qir_ADD(c
, addr_val
, qir_uniform_ui(c
,
186 (src
->Register
.Index
* 16)+
188 indirect_offset
= qir_MIN(c
, indirect_offset
, qir_uniform_ui(c
, (range
->dst_offset
+
191 qir_TEX_DIRECT(c
, indirect_offset
, add_uniform(c
, QUNIFORM_UBO_ADDR
, 0));
192 struct qreg r4
= qir_TEX_RESULT(c
);
193 c
->num_texture_samples
++;
194 return qir_MOV(c
, r4
);
198 get_src(struct vc4_compile
*c
, unsigned tgsi_op
,
199 struct tgsi_full_src_register
*full_src
, int i
)
201 struct tgsi_src_register
*src
= &full_src
->Register
;
202 struct qreg r
= c
->undef
;
225 case TGSI_FILE_TEMPORARY
:
226 r
= c
->temps
[src
->Index
* 4 + s
];
228 case TGSI_FILE_IMMEDIATE
:
229 r
= c
->consts
[src
->Index
* 4 + s
];
231 case TGSI_FILE_CONSTANT
:
233 r
= indirect_uniform_load(c
, full_src
, s
);
235 r
= get_temp_for_uniform(c
, QUNIFORM_UNIFORM
,
239 case TGSI_FILE_INPUT
:
240 r
= c
->inputs
[src
->Index
* 4 + s
];
242 case TGSI_FILE_SAMPLER
:
243 case TGSI_FILE_SAMPLER_VIEW
:
247 fprintf(stderr
, "unknown src file %d\n", src
->File
);
252 r
= qir_FMAXABS(c
, r
, r
);
255 switch (tgsi_opcode_infer_src_type(tgsi_op
)) {
256 case TGSI_TYPE_SIGNED
:
257 case TGSI_TYPE_UNSIGNED
:
258 r
= qir_SUB(c
, qir_uniform_ui(c
, 0), r
);
261 r
= qir_FSUB(c
, qir_uniform_f(c
, 0.0), r
);
271 update_dst(struct vc4_compile
*c
, struct tgsi_full_instruction
*tgsi_inst
,
272 int i
, struct qreg val
)
274 struct tgsi_dst_register
*tgsi_dst
= &tgsi_inst
->Dst
[0].Register
;
276 assert(!tgsi_dst
->Indirect
);
278 switch (tgsi_dst
->File
) {
279 case TGSI_FILE_TEMPORARY
:
280 c
->temps
[tgsi_dst
->Index
* 4 + i
] = val
;
282 case TGSI_FILE_OUTPUT
:
283 c
->outputs
[tgsi_dst
->Index
* 4 + i
] = val
;
284 c
->num_outputs
= MAX2(c
->num_outputs
,
285 tgsi_dst
->Index
* 4 + i
+ 1);
287 case TGSI_FILE_ADDRESS
:
288 assert(tgsi_dst
->Index
== 0);
292 fprintf(stderr
, "unknown dst file %d\n", tgsi_dst
->File
);
298 get_swizzled_channel(struct vc4_compile
*c
,
299 struct qreg
*srcs
, int swiz
)
303 case UTIL_FORMAT_SWIZZLE_NONE
:
304 fprintf(stderr
, "warning: unknown swizzle\n");
306 case UTIL_FORMAT_SWIZZLE_0
:
307 return qir_uniform_f(c
, 0.0);
308 case UTIL_FORMAT_SWIZZLE_1
:
309 return qir_uniform_f(c
, 1.0);
310 case UTIL_FORMAT_SWIZZLE_X
:
311 case UTIL_FORMAT_SWIZZLE_Y
:
312 case UTIL_FORMAT_SWIZZLE_Z
:
313 case UTIL_FORMAT_SWIZZLE_W
:
318 static inline struct qreg
319 qir_SAT(struct vc4_compile
*c
, struct qreg val
)
322 qir_FMIN(c
, val
, qir_uniform_f(c
, 1.0)),
323 qir_uniform_f(c
, 0.0));
327 tgsi_to_qir_alu(struct vc4_compile
*c
,
328 struct tgsi_full_instruction
*tgsi_inst
,
329 enum qop op
, struct qreg
*src
, int i
)
331 struct qreg dst
= qir_get_temp(c
);
332 qir_emit(c
, qir_inst4(op
, dst
,
341 tgsi_to_qir_scalar(struct vc4_compile
*c
,
342 struct tgsi_full_instruction
*tgsi_inst
,
343 enum qop op
, struct qreg
*src
, int i
)
345 struct qreg dst
= qir_get_temp(c
);
346 qir_emit(c
, qir_inst(op
, dst
,
353 tgsi_to_qir_rcp(struct vc4_compile
*c
,
354 struct tgsi_full_instruction
*tgsi_inst
,
355 enum qop op
, struct qreg
*src
, int i
)
357 struct qreg x
= src
[0 * 4 + 0];
358 struct qreg r
= qir_RCP(c
, x
);
360 /* Apply a Newton-Raphson step to improve the accuracy. */
361 r
= qir_FMUL(c
, r
, qir_FSUB(c
,
362 qir_uniform_f(c
, 2.0),
369 tgsi_to_qir_rsq(struct vc4_compile
*c
,
370 struct tgsi_full_instruction
*tgsi_inst
,
371 enum qop op
, struct qreg
*src
, int i
)
373 struct qreg x
= src
[0 * 4 + 0];
374 struct qreg r
= qir_RSQ(c
, x
);
376 /* Apply a Newton-Raphson step to improve the accuracy. */
377 r
= qir_FMUL(c
, r
, qir_FSUB(c
,
378 qir_uniform_f(c
, 1.5),
380 qir_uniform_f(c
, 0.5),
382 qir_FMUL(c
, r
, r
)))));
388 qir_srgb_decode(struct vc4_compile
*c
, struct qreg srgb
)
390 struct qreg low
= qir_FMUL(c
, srgb
, qir_uniform_f(c
, 1.0 / 12.92));
391 struct qreg high
= qir_POW(c
,
395 qir_uniform_f(c
, 0.055)),
396 qir_uniform_f(c
, 1.0 / 1.055)),
397 qir_uniform_f(c
, 2.4));
399 qir_SF(c
, qir_FSUB(c
, srgb
, qir_uniform_f(c
, 0.04045)));
400 return qir_SEL_X_Y_NS(c
, low
, high
);
404 qir_srgb_encode(struct vc4_compile
*c
, struct qreg linear
)
406 struct qreg low
= qir_FMUL(c
, linear
, qir_uniform_f(c
, 12.92));
407 struct qreg high
= qir_FSUB(c
,
409 qir_uniform_f(c
, 1.055),
412 qir_uniform_f(c
, 0.41666))),
413 qir_uniform_f(c
, 0.055));
415 qir_SF(c
, qir_FSUB(c
, linear
, qir_uniform_f(c
, 0.0031308)));
416 return qir_SEL_X_Y_NS(c
, low
, high
);
420 tgsi_to_qir_umul(struct vc4_compile
*c
,
421 struct tgsi_full_instruction
*tgsi_inst
,
422 enum qop op
, struct qreg
*src
, int i
)
424 struct qreg src0_hi
= qir_SHR(c
, src
[0 * 4 + i
],
425 qir_uniform_ui(c
, 16));
426 struct qreg src0_lo
= qir_AND(c
, src
[0 * 4 + i
],
427 qir_uniform_ui(c
, 0xffff));
428 struct qreg src1_hi
= qir_SHR(c
, src
[1 * 4 + i
],
429 qir_uniform_ui(c
, 16));
430 struct qreg src1_lo
= qir_AND(c
, src
[1 * 4 + i
],
431 qir_uniform_ui(c
, 0xffff));
433 struct qreg hilo
= qir_MUL24(c
, src0_hi
, src1_lo
);
434 struct qreg lohi
= qir_MUL24(c
, src0_lo
, src1_hi
);
435 struct qreg lolo
= qir_MUL24(c
, src0_lo
, src1_lo
);
437 return qir_ADD(c
, lolo
, qir_SHL(c
,
438 qir_ADD(c
, hilo
, lohi
),
439 qir_uniform_ui(c
, 16)));
443 tgsi_to_qir_umad(struct vc4_compile
*c
,
444 struct tgsi_full_instruction
*tgsi_inst
,
445 enum qop op
, struct qreg
*src
, int i
)
447 return qir_ADD(c
, tgsi_to_qir_umul(c
, NULL
, 0, src
, i
), src
[2 * 4 + i
]);
451 tgsi_to_qir_idiv(struct vc4_compile
*c
,
452 struct tgsi_full_instruction
*tgsi_inst
,
453 enum qop op
, struct qreg
*src
, int i
)
455 return qir_FTOI(c
, qir_FMUL(c
,
456 qir_ITOF(c
, src
[0 * 4 + i
]),
457 qir_RCP(c
, qir_ITOF(c
, src
[1 * 4 + i
]))));
461 tgsi_to_qir_ineg(struct vc4_compile
*c
,
462 struct tgsi_full_instruction
*tgsi_inst
,
463 enum qop op
, struct qreg
*src
, int i
)
465 return qir_SUB(c
, qir_uniform_ui(c
, 0), src
[0 * 4 + i
]);
469 tgsi_to_qir_seq(struct vc4_compile
*c
,
470 struct tgsi_full_instruction
*tgsi_inst
,
471 enum qop op
, struct qreg
*src
, int i
)
473 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
474 return qir_SEL_X_0_ZS(c
, qir_uniform_f(c
, 1.0));
478 tgsi_to_qir_sne(struct vc4_compile
*c
,
479 struct tgsi_full_instruction
*tgsi_inst
,
480 enum qop op
, struct qreg
*src
, int i
)
482 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
483 return qir_SEL_X_0_ZC(c
, qir_uniform_f(c
, 1.0));
487 tgsi_to_qir_slt(struct vc4_compile
*c
,
488 struct tgsi_full_instruction
*tgsi_inst
,
489 enum qop op
, struct qreg
*src
, int i
)
491 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
492 return qir_SEL_X_0_NS(c
, qir_uniform_f(c
, 1.0));
496 tgsi_to_qir_sge(struct vc4_compile
*c
,
497 struct tgsi_full_instruction
*tgsi_inst
,
498 enum qop op
, struct qreg
*src
, int i
)
500 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
501 return qir_SEL_X_0_NC(c
, qir_uniform_f(c
, 1.0));
505 tgsi_to_qir_fseq(struct vc4_compile
*c
,
506 struct tgsi_full_instruction
*tgsi_inst
,
507 enum qop op
, struct qreg
*src
, int i
)
509 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
510 return qir_SEL_X_0_ZS(c
, qir_uniform_ui(c
, ~0));
514 tgsi_to_qir_fsne(struct vc4_compile
*c
,
515 struct tgsi_full_instruction
*tgsi_inst
,
516 enum qop op
, struct qreg
*src
, int i
)
518 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
519 return qir_SEL_X_0_ZC(c
, qir_uniform_ui(c
, ~0));
523 tgsi_to_qir_fslt(struct vc4_compile
*c
,
524 struct tgsi_full_instruction
*tgsi_inst
,
525 enum qop op
, struct qreg
*src
, int i
)
527 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
528 return qir_SEL_X_0_NS(c
, qir_uniform_ui(c
, ~0));
532 tgsi_to_qir_fsge(struct vc4_compile
*c
,
533 struct tgsi_full_instruction
*tgsi_inst
,
534 enum qop op
, struct qreg
*src
, int i
)
536 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
537 return qir_SEL_X_0_NC(c
, qir_uniform_ui(c
, ~0));
541 tgsi_to_qir_useq(struct vc4_compile
*c
,
542 struct tgsi_full_instruction
*tgsi_inst
,
543 enum qop op
, struct qreg
*src
, int i
)
545 qir_SF(c
, qir_SUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
546 return qir_SEL_X_0_ZS(c
, qir_uniform_ui(c
, ~0));
550 tgsi_to_qir_usne(struct vc4_compile
*c
,
551 struct tgsi_full_instruction
*tgsi_inst
,
552 enum qop op
, struct qreg
*src
, int i
)
554 qir_SF(c
, qir_SUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
555 return qir_SEL_X_0_ZC(c
, qir_uniform_ui(c
, ~0));
559 tgsi_to_qir_islt(struct vc4_compile
*c
,
560 struct tgsi_full_instruction
*tgsi_inst
,
561 enum qop op
, struct qreg
*src
, int i
)
563 qir_SF(c
, qir_SUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
564 return qir_SEL_X_0_NS(c
, qir_uniform_ui(c
, ~0));
568 tgsi_to_qir_isge(struct vc4_compile
*c
,
569 struct tgsi_full_instruction
*tgsi_inst
,
570 enum qop op
, struct qreg
*src
, int i
)
572 qir_SF(c
, qir_SUB(c
, src
[0 * 4 + i
], src
[1 * 4 + i
]));
573 return qir_SEL_X_0_NC(c
, qir_uniform_ui(c
, ~0));
577 tgsi_to_qir_cmp(struct vc4_compile
*c
,
578 struct tgsi_full_instruction
*tgsi_inst
,
579 enum qop op
, struct qreg
*src
, int i
)
581 qir_SF(c
, src
[0 * 4 + i
]);
582 return qir_SEL_X_Y_NS(c
,
588 tgsi_to_qir_ucmp(struct vc4_compile
*c
,
589 struct tgsi_full_instruction
*tgsi_inst
,
590 enum qop op
, struct qreg
*src
, int i
)
592 qir_SF(c
, src
[0 * 4 + i
]);
593 return qir_SEL_X_Y_ZC(c
,
599 tgsi_to_qir_mad(struct vc4_compile
*c
,
600 struct tgsi_full_instruction
*tgsi_inst
,
601 enum qop op
, struct qreg
*src
, int i
)
611 tgsi_to_qir_lrp(struct vc4_compile
*c
,
612 struct tgsi_full_instruction
*tgsi_inst
,
613 enum qop op
, struct qreg
*src
, int i
)
615 struct qreg src0
= src
[0 * 4 + i
];
616 struct qreg src1
= src
[1 * 4 + i
];
617 struct qreg src2
= src
[2 * 4 + i
];
620 * src0 * src1 + (1 - src0) * src2.
621 * -> src0 * src1 + src2 - src0 * src2
622 * -> src2 + src0 * (src1 - src2)
624 return qir_FADD(c
, src2
, qir_FMUL(c
, src0
, qir_FSUB(c
, src1
, src2
)));
629 tgsi_to_qir_tex(struct vc4_compile
*c
,
630 struct tgsi_full_instruction
*tgsi_inst
,
631 enum qop op
, struct qreg
*src
)
633 assert(!tgsi_inst
->Instruction
.Saturate
);
635 struct qreg s
= src
[0 * 4 + 0];
636 struct qreg t
= src
[0 * 4 + 1];
637 struct qreg r
= src
[0 * 4 + 2];
638 uint32_t unit
= tgsi_inst
->Src
[1].Register
.Index
;
639 bool is_txl
= tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL
;
641 struct qreg proj
= c
->undef
;
642 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXP
) {
643 proj
= qir_RCP(c
, src
[0 * 4 + 3]);
644 s
= qir_FMUL(c
, s
, proj
);
645 t
= qir_FMUL(c
, t
, proj
);
648 struct qreg texture_u
[] = {
649 add_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P0
, unit
),
650 add_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P1
, unit
),
651 add_uniform(c
, QUNIFORM_CONSTANT
, 0),
652 add_uniform(c
, QUNIFORM_CONSTANT
, 0),
654 uint32_t next_texture_u
= 0;
656 /* There is no native support for GL texture rectangle coordinates, so
657 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
660 if (tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_RECT
||
661 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWRECT
) {
663 get_temp_for_uniform(c
,
664 QUNIFORM_TEXRECT_SCALE_X
,
667 get_temp_for_uniform(c
,
668 QUNIFORM_TEXRECT_SCALE_Y
,
672 if (tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_CUBE
||
673 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE
||
675 texture_u
[2] = add_uniform(c
, QUNIFORM_TEXTURE_CONFIG_P2
,
676 unit
| (is_txl
<< 16));
679 if (tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_CUBE
||
680 tgsi_inst
->Texture
.Texture
== TGSI_TEXTURE_SHADOWCUBE
) {
681 struct qreg ma
= qir_FMAXABS(c
, qir_FMAXABS(c
, s
, t
), r
);
682 struct qreg rcp_ma
= qir_RCP(c
, ma
);
683 s
= qir_FMUL(c
, s
, rcp_ma
);
684 t
= qir_FMUL(c
, t
, rcp_ma
);
685 r
= qir_FMUL(c
, r
, rcp_ma
);
687 qir_TEX_R(c
, r
, texture_u
[next_texture_u
++]);
688 } else if (c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
689 c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP
||
690 c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
691 c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP
) {
692 qir_TEX_R(c
, get_temp_for_uniform(c
, QUNIFORM_TEXTURE_BORDER_COLOR
, unit
),
693 texture_u
[next_texture_u
++]);
696 if (c
->key
->tex
[unit
].wrap_s
== PIPE_TEX_WRAP_CLAMP
) {
700 if (c
->key
->tex
[unit
].wrap_t
== PIPE_TEX_WRAP_CLAMP
) {
704 qir_TEX_T(c
, t
, texture_u
[next_texture_u
++]);
706 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXB
||
707 tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXL
)
708 qir_TEX_B(c
, src
[0 * 4 + 3], texture_u
[next_texture_u
++]);
710 qir_TEX_S(c
, s
, texture_u
[next_texture_u
++]);
712 c
->num_texture_samples
++;
713 struct qreg r4
= qir_TEX_RESULT(c
);
715 enum pipe_format format
= c
->key
->tex
[unit
].format
;
717 struct qreg unpacked
[4];
718 if (util_format_is_depth_or_stencil(format
)) {
719 struct qreg depthf
= qir_ITOF(c
, qir_SHR(c
, r4
,
720 qir_uniform_ui(c
, 8)));
721 struct qreg normalized
= qir_FMUL(c
, depthf
,
722 qir_uniform_f(c
, 1.0f
/0xffffff));
724 struct qreg depth_output
;
726 struct qreg one
= qir_uniform_f(c
, 1.0f
);
727 if (c
->key
->tex
[unit
].compare_mode
) {
728 struct qreg compare
= src
[0 * 4 + 2];
730 if (tgsi_inst
->Instruction
.Opcode
== TGSI_OPCODE_TXP
)
731 compare
= qir_FMUL(c
, compare
, proj
);
733 switch (c
->key
->tex
[unit
].compare_func
) {
734 case PIPE_FUNC_NEVER
:
735 depth_output
= qir_uniform_f(c
, 0.0f
);
737 case PIPE_FUNC_ALWAYS
:
740 case PIPE_FUNC_EQUAL
:
741 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
742 depth_output
= qir_SEL_X_0_ZS(c
, one
);
744 case PIPE_FUNC_NOTEQUAL
:
745 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
746 depth_output
= qir_SEL_X_0_ZC(c
, one
);
748 case PIPE_FUNC_GREATER
:
749 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
750 depth_output
= qir_SEL_X_0_NC(c
, one
);
752 case PIPE_FUNC_GEQUAL
:
753 qir_SF(c
, qir_FSUB(c
, normalized
, compare
));
754 depth_output
= qir_SEL_X_0_NS(c
, one
);
757 qir_SF(c
, qir_FSUB(c
, compare
, normalized
));
758 depth_output
= qir_SEL_X_0_NS(c
, one
);
760 case PIPE_FUNC_LEQUAL
:
761 qir_SF(c
, qir_FSUB(c
, normalized
, compare
));
762 depth_output
= qir_SEL_X_0_NC(c
, one
);
766 depth_output
= normalized
;
769 for (int i
= 0; i
< 4; i
++)
770 unpacked
[i
] = depth_output
;
772 for (int i
= 0; i
< 4; i
++)
773 unpacked
[i
] = qir_R4_UNPACK(c
, r4
, i
);
776 const uint8_t *format_swiz
= vc4_get_format_swizzle(format
);
777 struct qreg texture_output
[4];
778 for (int i
= 0; i
< 4; i
++) {
779 texture_output
[i
] = get_swizzled_channel(c
, unpacked
,
783 if (util_format_is_srgb(format
)) {
784 for (int i
= 0; i
< 3; i
++)
785 texture_output
[i
] = qir_srgb_decode(c
,
789 for (int i
= 0; i
< 4; i
++) {
790 if (!(tgsi_inst
->Dst
[0].Register
.WriteMask
& (1 << i
)))
793 update_dst(c
, tgsi_inst
, i
,
794 get_swizzled_channel(c
, texture_output
,
795 c
->key
->tex
[unit
].swizzle
[i
]));
800 tgsi_to_qir_trunc(struct vc4_compile
*c
,
801 struct tgsi_full_instruction
*tgsi_inst
,
802 enum qop op
, struct qreg
*src
, int i
)
804 return qir_ITOF(c
, qir_FTOI(c
, src
[0 * 4 + i
]));
808 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
812 tgsi_to_qir_frc(struct vc4_compile
*c
,
813 struct tgsi_full_instruction
*tgsi_inst
,
814 enum qop op
, struct qreg
*src
, int i
)
816 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
[0 * 4 + i
]));
817 struct qreg diff
= qir_FSUB(c
, src
[0 * 4 + i
], trunc
);
819 return qir_SEL_X_Y_NS(c
,
820 qir_FADD(c
, diff
, qir_uniform_f(c
, 1.0)),
825 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
829 tgsi_to_qir_flr(struct vc4_compile
*c
,
830 struct tgsi_full_instruction
*tgsi_inst
,
831 enum qop op
, struct qreg
*src
, int i
)
833 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
[0 * 4 + i
]));
835 /* This will be < 0 if we truncated and the truncation was of a value
836 * that was < 0 in the first place.
838 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], trunc
));
840 return qir_SEL_X_Y_NS(c
,
841 qir_FSUB(c
, trunc
, qir_uniform_f(c
, 1.0)),
846 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
850 tgsi_to_qir_ceil(struct vc4_compile
*c
,
851 struct tgsi_full_instruction
*tgsi_inst
,
852 enum qop op
, struct qreg
*src
, int i
)
854 struct qreg trunc
= qir_ITOF(c
, qir_FTOI(c
, src
[0 * 4 + i
]));
856 /* This will be < 0 if we truncated and the truncation was of a value
857 * that was > 0 in the first place.
859 qir_SF(c
, qir_FSUB(c
, trunc
, src
[0 * 4 + i
]));
861 return qir_SEL_X_Y_NS(c
,
862 qir_FADD(c
, trunc
, qir_uniform_f(c
, 1.0)),
867 tgsi_to_qir_abs(struct vc4_compile
*c
,
868 struct tgsi_full_instruction
*tgsi_inst
,
869 enum qop op
, struct qreg
*src
, int i
)
871 struct qreg arg
= src
[0 * 4 + i
];
872 return qir_FMAXABS(c
, arg
, arg
);
875 /* Note that this instruction replicates its result from the x channel */
877 tgsi_to_qir_sin(struct vc4_compile
*c
,
878 struct tgsi_full_instruction
*tgsi_inst
,
879 enum qop op
, struct qreg
*src
, int i
)
883 pow(2.0 * M_PI
, 3) / (3 * 2 * 1),
884 -pow(2.0 * M_PI
, 5) / (5 * 4 * 3 * 2 * 1),
885 pow(2.0 * M_PI
, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
886 -pow(2.0 * M_PI
, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
889 struct qreg scaled_x
=
892 qir_uniform_f(c
, 1.0f
/ (M_PI
* 2.0f
)));
894 struct qreg x
= qir_FADD(c
,
895 tgsi_to_qir_frc(c
, NULL
, 0, &scaled_x
, 0),
896 qir_uniform_f(c
, -0.5));
897 struct qreg x2
= qir_FMUL(c
, x
, x
);
898 struct qreg sum
= qir_FMUL(c
, x
, qir_uniform_f(c
, coeff
[0]));
899 for (int i
= 1; i
< ARRAY_SIZE(coeff
); i
++) {
900 x
= qir_FMUL(c
, x
, x2
);
905 qir_uniform_f(c
, coeff
[i
])));
910 /* Note that this instruction replicates its result from the x channel */
912 tgsi_to_qir_cos(struct vc4_compile
*c
,
913 struct tgsi_full_instruction
*tgsi_inst
,
914 enum qop op
, struct qreg
*src
, int i
)
918 pow(2.0 * M_PI
, 2) / (2 * 1),
919 -pow(2.0 * M_PI
, 4) / (4 * 3 * 2 * 1),
920 pow(2.0 * M_PI
, 6) / (6 * 5 * 4 * 3 * 2 * 1),
921 -pow(2.0 * M_PI
, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
922 pow(2.0 * M_PI
, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
925 struct qreg scaled_x
=
926 qir_FMUL(c
, src
[0 * 4 + 0],
927 qir_uniform_f(c
, 1.0f
/ (M_PI
* 2.0f
)));
928 struct qreg x_frac
= qir_FADD(c
,
929 tgsi_to_qir_frc(c
, NULL
, 0, &scaled_x
, 0),
930 qir_uniform_f(c
, -0.5));
932 struct qreg sum
= qir_uniform_f(c
, coeff
[0]);
933 struct qreg x2
= qir_FMUL(c
, x_frac
, x_frac
);
934 struct qreg x
= x2
; /* Current x^2, x^4, or x^6 */
935 for (int i
= 1; i
< ARRAY_SIZE(coeff
); i
++) {
937 x
= qir_FMUL(c
, x
, x2
);
939 struct qreg mul
= qir_FMUL(c
,
941 qir_uniform_f(c
, coeff
[i
]));
945 sum
= qir_FADD(c
, sum
, mul
);
951 tgsi_to_qir_clamp(struct vc4_compile
*c
,
952 struct tgsi_full_instruction
*tgsi_inst
,
953 enum qop op
, struct qreg
*src
, int i
)
955 return qir_FMAX(c
, qir_FMIN(c
,
962 tgsi_to_qir_ssg(struct vc4_compile
*c
,
963 struct tgsi_full_instruction
*tgsi_inst
,
964 enum qop op
, struct qreg
*src
, int i
)
966 qir_SF(c
, src
[0 * 4 + i
]);
967 return qir_SEL_X_Y_NC(c
,
968 qir_SEL_X_0_ZC(c
, qir_uniform_f(c
, 1.0)),
969 qir_uniform_f(c
, -1.0));
972 /* Compare to tgsi_to_qir_flr() for the floor logic. */
974 tgsi_to_qir_arl(struct vc4_compile
*c
,
975 struct tgsi_full_instruction
*tgsi_inst
,
976 enum qop op
, struct qreg
*src
, int i
)
978 struct qreg trunc
= qir_FTOI(c
, src
[0 * 4 + i
]);
979 struct qreg scaled
= qir_SHL(c
, trunc
, qir_uniform_ui(c
, 4));
981 qir_SF(c
, qir_FSUB(c
, src
[0 * 4 + i
], qir_ITOF(c
, trunc
)));
983 return qir_SEL_X_Y_NS(c
, qir_SUB(c
, scaled
, qir_uniform_ui(c
, 4)),
988 tgsi_to_qir_uarl(struct vc4_compile
*c
,
989 struct tgsi_full_instruction
*tgsi_inst
,
990 enum qop op
, struct qreg
*src
, int i
)
992 return qir_SHL(c
, src
[0 * 4 + i
], qir_uniform_ui(c
, 4));
996 get_channel_from_vpm(struct vc4_compile
*c
,
997 struct qreg
*vpm_reads
,
999 const struct util_format_description
*desc
)
1001 const struct util_format_channel_description
*chan
=
1002 &desc
->channel
[swiz
];
1005 if (swiz
> UTIL_FORMAT_SWIZZLE_W
)
1006 return get_swizzled_channel(c
, vpm_reads
, swiz
);
1007 else if (chan
->size
== 32 &&
1008 chan
->type
== UTIL_FORMAT_TYPE_FLOAT
) {
1009 return get_swizzled_channel(c
, vpm_reads
, swiz
);
1010 } else if (chan
->size
== 32 &&
1011 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
1012 if (chan
->normalized
) {
1014 qir_ITOF(c
, vpm_reads
[swiz
]),
1018 return qir_ITOF(c
, vpm_reads
[swiz
]);
1020 } else if (chan
->size
== 8 &&
1021 (chan
->type
== UTIL_FORMAT_TYPE_UNSIGNED
||
1022 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
)) {
1023 struct qreg vpm
= vpm_reads
[0];
1024 if (chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
1025 temp
= qir_XOR(c
, vpm
, qir_uniform_ui(c
, 0x80808080));
1026 if (chan
->normalized
) {
1027 return qir_FSUB(c
, qir_FMUL(c
,
1028 qir_UNPACK_8_F(c
, temp
, swiz
),
1029 qir_uniform_f(c
, 2.0)),
1030 qir_uniform_f(c
, 1.0));
1034 qir_UNPACK_8_I(c
, temp
,
1036 qir_uniform_f(c
, -128.0));
1039 if (chan
->normalized
) {
1040 return qir_UNPACK_8_F(c
, vpm
, swiz
);
1042 return qir_ITOF(c
, qir_UNPACK_8_I(c
, vpm
, swiz
));
1045 } else if (chan
->size
== 16 &&
1046 (chan
->type
== UTIL_FORMAT_TYPE_UNSIGNED
||
1047 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
)) {
1048 struct qreg vpm
= vpm_reads
[swiz
/ 2];
1050 /* Note that UNPACK_16F eats a half float, not ints, so we use
1051 * UNPACK_16_I for all of these.
1053 if (chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
1054 temp
= qir_ITOF(c
, qir_UNPACK_16_I(c
, vpm
, swiz
% 2));
1055 if (chan
->normalized
) {
1056 return qir_FMUL(c
, temp
,
1057 qir_uniform_f(c
, 1/32768.0f
));
1062 /* UNPACK_16I sign-extends, so we have to emit ANDs. */
1064 if (swiz
== 1 || swiz
== 3)
1065 temp
= qir_UNPACK_16_I(c
, temp
, 1);
1066 temp
= qir_AND(c
, temp
, qir_uniform_ui(c
, 0xffff));
1067 temp
= qir_ITOF(c
, temp
);
1069 if (chan
->normalized
) {
1070 return qir_FMUL(c
, temp
,
1071 qir_uniform_f(c
, 1 / 65535.0));
1082 emit_vertex_input(struct vc4_compile
*c
, int attr
)
1084 enum pipe_format format
= c
->vs_key
->attr_formats
[attr
];
1085 uint32_t attr_size
= util_format_get_blocksize(format
);
1086 struct qreg vpm_reads
[4];
1088 c
->vattr_sizes
[attr
] = align(attr_size
, 4);
1089 for (int i
= 0; i
< align(attr_size
, 4) / 4; i
++) {
1090 struct qreg vpm
= { QFILE_VPM
, attr
* 4 + i
};
1091 vpm_reads
[i
] = qir_MOV(c
, vpm
);
1095 bool format_warned
= false;
1096 const struct util_format_description
*desc
=
1097 util_format_description(format
);
1099 for (int i
= 0; i
< 4; i
++) {
1100 uint8_t swiz
= desc
->swizzle
[i
];
1101 struct qreg result
= get_channel_from_vpm(c
, vpm_reads
,
1104 if (result
.file
== QFILE_NULL
) {
1105 if (!format_warned
) {
1107 "vtx element %d unsupported type: %s\n",
1108 attr
, util_format_name(format
));
1109 format_warned
= true;
1111 result
= qir_uniform_f(c
, 0.0);
1113 c
->inputs
[attr
* 4 + i
] = result
;
1118 tgsi_to_qir_kill_if(struct vc4_compile
*c
, struct qreg
*src
, int i
)
1120 if (c
->discard
.file
== QFILE_NULL
)
1121 c
->discard
= qir_uniform_f(c
, 0.0);
1122 qir_SF(c
, src
[0 * 4 + i
]);
1123 c
->discard
= qir_SEL_X_Y_NS(c
, qir_uniform_f(c
, 1.0),
1128 emit_fragcoord_input(struct vc4_compile
*c
, int attr
)
1130 c
->inputs
[attr
* 4 + 0] = qir_FRAG_X(c
);
1131 c
->inputs
[attr
* 4 + 1] = qir_FRAG_Y(c
);
1132 c
->inputs
[attr
* 4 + 2] =
1134 qir_ITOF(c
, qir_FRAG_Z(c
)),
1135 qir_uniform_f(c
, 1.0 / 0xffffff));
1136 c
->inputs
[attr
* 4 + 3] = qir_RCP(c
, qir_FRAG_W(c
));
1140 emit_point_coord_input(struct vc4_compile
*c
, int attr
)
1142 if (c
->point_x
.file
== QFILE_NULL
) {
1143 c
->point_x
= qir_uniform_f(c
, 0.0);
1144 c
->point_y
= qir_uniform_f(c
, 0.0);
1147 c
->inputs
[attr
* 4 + 0] = c
->point_x
;
1148 if (c
->fs_key
->point_coord_upper_left
) {
1149 c
->inputs
[attr
* 4 + 1] = qir_FSUB(c
,
1150 qir_uniform_f(c
, 1.0),
1153 c
->inputs
[attr
* 4 + 1] = c
->point_y
;
1155 c
->inputs
[attr
* 4 + 2] = qir_uniform_f(c
, 0.0);
1156 c
->inputs
[attr
* 4 + 3] = qir_uniform_f(c
, 1.0);
1160 emit_fragment_varying(struct vc4_compile
*c
, uint8_t semantic
,
1161 uint8_t index
, uint8_t swizzle
)
1163 uint32_t i
= c
->num_input_semantics
++;
1164 struct qreg vary
= {
1169 if (c
->num_input_semantics
>= c
->input_semantics_array_size
) {
1170 c
->input_semantics_array_size
=
1171 MAX2(4, c
->input_semantics_array_size
* 2);
1173 c
->input_semantics
= reralloc(c
, c
->input_semantics
,
1174 struct vc4_varying_semantic
,
1175 c
->input_semantics_array_size
);
1178 c
->input_semantics
[i
].semantic
= semantic
;
1179 c
->input_semantics
[i
].index
= index
;
1180 c
->input_semantics
[i
].swizzle
= swizzle
;
1182 return qir_VARY_ADD_C(c
, qir_FMUL(c
, vary
, qir_FRAG_W(c
)));
1186 emit_fragment_input(struct vc4_compile
*c
, int attr
,
1187 struct tgsi_full_declaration
*decl
)
1189 for (int i
= 0; i
< 4; i
++) {
1190 c
->inputs
[attr
* 4 + i
] =
1191 emit_fragment_varying(c
,
1192 decl
->Semantic
.Name
,
1193 decl
->Semantic
.Index
,
1200 emit_face_input(struct vc4_compile
*c
, int attr
)
1202 c
->inputs
[attr
* 4 + 0] = qir_FSUB(c
,
1203 qir_uniform_f(c
, 1.0),
1205 qir_ITOF(c
, qir_FRAG_REV_FLAG(c
)),
1206 qir_uniform_f(c
, 2.0)));
1207 c
->inputs
[attr
* 4 + 1] = qir_uniform_f(c
, 0.0);
1208 c
->inputs
[attr
* 4 + 2] = qir_uniform_f(c
, 0.0);
1209 c
->inputs
[attr
* 4 + 3] = qir_uniform_f(c
, 1.0);
1213 add_output(struct vc4_compile
*c
,
1214 uint32_t decl_offset
,
1215 uint8_t semantic_name
,
1216 uint8_t semantic_index
,
1217 uint8_t semantic_swizzle
)
1219 uint32_t old_array_size
= c
->outputs_array_size
;
1220 resize_qreg_array(c
, &c
->outputs
, &c
->outputs_array_size
,
1223 if (old_array_size
!= c
->outputs_array_size
) {
1224 c
->output_semantics
= reralloc(c
,
1225 c
->output_semantics
,
1226 struct vc4_varying_semantic
,
1227 c
->outputs_array_size
);
1230 c
->output_semantics
[decl_offset
].semantic
= semantic_name
;
1231 c
->output_semantics
[decl_offset
].index
= semantic_index
;
1232 c
->output_semantics
[decl_offset
].swizzle
= semantic_swizzle
;
1236 add_array_info(struct vc4_compile
*c
, uint32_t array_id
,
1237 uint32_t start
, uint32_t size
)
1239 if (array_id
>= c
->ubo_ranges_array_size
) {
1240 c
->ubo_ranges_array_size
= MAX2(c
->ubo_ranges_array_size
* 2,
1242 c
->ubo_ranges
= reralloc(c
, c
->ubo_ranges
,
1243 struct vc4_compiler_ubo_range
,
1244 c
->ubo_ranges_array_size
);
1247 c
->ubo_ranges
[array_id
].dst_offset
= 0;
1248 c
->ubo_ranges
[array_id
].src_offset
= start
;
1249 c
->ubo_ranges
[array_id
].size
= size
;
1250 c
->ubo_ranges
[array_id
].used
= false;
1254 emit_tgsi_declaration(struct vc4_compile
*c
,
1255 struct tgsi_full_declaration
*decl
)
1257 switch (decl
->Declaration
.File
) {
1258 case TGSI_FILE_TEMPORARY
: {
1259 uint32_t old_size
= c
->temps_array_size
;
1260 resize_qreg_array(c
, &c
->temps
, &c
->temps_array_size
,
1261 (decl
->Range
.Last
+ 1) * 4);
1263 for (int i
= old_size
; i
< c
->temps_array_size
; i
++)
1264 c
->temps
[i
] = qir_uniform_ui(c
, 0);
1268 case TGSI_FILE_INPUT
:
1269 resize_qreg_array(c
, &c
->inputs
, &c
->inputs_array_size
,
1270 (decl
->Range
.Last
+ 1) * 4);
1272 for (int i
= decl
->Range
.First
;
1273 i
<= decl
->Range
.Last
;
1275 if (c
->stage
== QSTAGE_FRAG
) {
1276 if (decl
->Semantic
.Name
==
1277 TGSI_SEMANTIC_POSITION
) {
1278 emit_fragcoord_input(c
, i
);
1279 } else if (decl
->Semantic
.Name
== TGSI_SEMANTIC_FACE
) {
1280 emit_face_input(c
, i
);
1281 } else if (decl
->Semantic
.Name
== TGSI_SEMANTIC_GENERIC
&&
1282 (c
->fs_key
->point_sprite_mask
&
1283 (1 << decl
->Semantic
.Index
))) {
1284 emit_point_coord_input(c
, i
);
1286 emit_fragment_input(c
, i
, decl
);
1289 emit_vertex_input(c
, i
);
1294 case TGSI_FILE_OUTPUT
: {
1295 for (int i
= 0; i
< 4; i
++) {
1297 decl
->Range
.First
* 4 + i
,
1298 decl
->Semantic
.Name
,
1299 decl
->Semantic
.Index
,
1303 switch (decl
->Semantic
.Name
) {
1304 case TGSI_SEMANTIC_POSITION
:
1305 c
->output_position_index
= decl
->Range
.First
* 4;
1307 case TGSI_SEMANTIC_CLIPVERTEX
:
1308 c
->output_clipvertex_index
= decl
->Range
.First
* 4;
1310 case TGSI_SEMANTIC_COLOR
:
1311 c
->output_color_index
= decl
->Range
.First
* 4;
1313 case TGSI_SEMANTIC_PSIZE
:
1314 c
->output_point_size_index
= decl
->Range
.First
* 4;
1320 case TGSI_FILE_CONSTANT
:
1322 decl
->Array
.ArrayID
,
1323 decl
->Range
.First
* 16,
1325 decl
->Range
.First
+ 1) * 16);
1332 emit_tgsi_instruction(struct vc4_compile
*c
,
1333 struct tgsi_full_instruction
*tgsi_inst
)
1335 static const struct {
1337 struct qreg (*func
)(struct vc4_compile
*c
,
1338 struct tgsi_full_instruction
*tgsi_inst
,
1340 struct qreg
*src
, int i
);
1342 [TGSI_OPCODE_MOV
] = { QOP_MOV
, tgsi_to_qir_alu
},
1343 [TGSI_OPCODE_ABS
] = { 0, tgsi_to_qir_abs
},
1344 [TGSI_OPCODE_MUL
] = { QOP_FMUL
, tgsi_to_qir_alu
},
1345 [TGSI_OPCODE_ADD
] = { QOP_FADD
, tgsi_to_qir_alu
},
1346 [TGSI_OPCODE_SUB
] = { QOP_FSUB
, tgsi_to_qir_alu
},
1347 [TGSI_OPCODE_MIN
] = { QOP_FMIN
, tgsi_to_qir_alu
},
1348 [TGSI_OPCODE_MAX
] = { QOP_FMAX
, tgsi_to_qir_alu
},
1349 [TGSI_OPCODE_F2I
] = { QOP_FTOI
, tgsi_to_qir_alu
},
1350 [TGSI_OPCODE_I2F
] = { QOP_ITOF
, tgsi_to_qir_alu
},
1351 [TGSI_OPCODE_UADD
] = { QOP_ADD
, tgsi_to_qir_alu
},
1352 [TGSI_OPCODE_USHR
] = { QOP_SHR
, tgsi_to_qir_alu
},
1353 [TGSI_OPCODE_ISHR
] = { QOP_ASR
, tgsi_to_qir_alu
},
1354 [TGSI_OPCODE_SHL
] = { QOP_SHL
, tgsi_to_qir_alu
},
1355 [TGSI_OPCODE_IMIN
] = { QOP_MIN
, tgsi_to_qir_alu
},
1356 [TGSI_OPCODE_IMAX
] = { QOP_MAX
, tgsi_to_qir_alu
},
1357 [TGSI_OPCODE_AND
] = { QOP_AND
, tgsi_to_qir_alu
},
1358 [TGSI_OPCODE_OR
] = { QOP_OR
, tgsi_to_qir_alu
},
1359 [TGSI_OPCODE_XOR
] = { QOP_XOR
, tgsi_to_qir_alu
},
1360 [TGSI_OPCODE_NOT
] = { QOP_NOT
, tgsi_to_qir_alu
},
1362 [TGSI_OPCODE_UMUL
] = { 0, tgsi_to_qir_umul
},
1363 [TGSI_OPCODE_UMAD
] = { 0, tgsi_to_qir_umad
},
1364 [TGSI_OPCODE_IDIV
] = { 0, tgsi_to_qir_idiv
},
1365 [TGSI_OPCODE_INEG
] = { 0, tgsi_to_qir_ineg
},
1367 [TGSI_OPCODE_SEQ
] = { 0, tgsi_to_qir_seq
},
1368 [TGSI_OPCODE_SNE
] = { 0, tgsi_to_qir_sne
},
1369 [TGSI_OPCODE_SGE
] = { 0, tgsi_to_qir_sge
},
1370 [TGSI_OPCODE_SLT
] = { 0, tgsi_to_qir_slt
},
1371 [TGSI_OPCODE_FSEQ
] = { 0, tgsi_to_qir_fseq
},
1372 [TGSI_OPCODE_FSNE
] = { 0, tgsi_to_qir_fsne
},
1373 [TGSI_OPCODE_FSGE
] = { 0, tgsi_to_qir_fsge
},
1374 [TGSI_OPCODE_FSLT
] = { 0, tgsi_to_qir_fslt
},
1375 [TGSI_OPCODE_USEQ
] = { 0, tgsi_to_qir_useq
},
1376 [TGSI_OPCODE_USNE
] = { 0, tgsi_to_qir_usne
},
1377 [TGSI_OPCODE_ISGE
] = { 0, tgsi_to_qir_isge
},
1378 [TGSI_OPCODE_ISLT
] = { 0, tgsi_to_qir_islt
},
1380 [TGSI_OPCODE_CMP
] = { 0, tgsi_to_qir_cmp
},
1381 [TGSI_OPCODE_UCMP
] = { 0, tgsi_to_qir_ucmp
},
1382 [TGSI_OPCODE_MAD
] = { 0, tgsi_to_qir_mad
},
1383 [TGSI_OPCODE_RCP
] = { QOP_RCP
, tgsi_to_qir_rcp
},
1384 [TGSI_OPCODE_RSQ
] = { QOP_RSQ
, tgsi_to_qir_rsq
},
1385 [TGSI_OPCODE_EX2
] = { QOP_EXP2
, tgsi_to_qir_scalar
},
1386 [TGSI_OPCODE_LG2
] = { QOP_LOG2
, tgsi_to_qir_scalar
},
1387 [TGSI_OPCODE_LRP
] = { 0, tgsi_to_qir_lrp
},
1388 [TGSI_OPCODE_TRUNC
] = { 0, tgsi_to_qir_trunc
},
1389 [TGSI_OPCODE_CEIL
] = { 0, tgsi_to_qir_ceil
},
1390 [TGSI_OPCODE_FRC
] = { 0, tgsi_to_qir_frc
},
1391 [TGSI_OPCODE_FLR
] = { 0, tgsi_to_qir_flr
},
1392 [TGSI_OPCODE_SIN
] = { 0, tgsi_to_qir_sin
},
1393 [TGSI_OPCODE_COS
] = { 0, tgsi_to_qir_cos
},
1394 [TGSI_OPCODE_CLAMP
] = { 0, tgsi_to_qir_clamp
},
1395 [TGSI_OPCODE_SSG
] = { 0, tgsi_to_qir_ssg
},
1396 [TGSI_OPCODE_ARL
] = { 0, tgsi_to_qir_arl
},
1397 [TGSI_OPCODE_UARL
] = { 0, tgsi_to_qir_uarl
},
1399 static int asdf
= 0;
1400 uint32_t tgsi_op
= tgsi_inst
->Instruction
.Opcode
;
1402 if (tgsi_op
== TGSI_OPCODE_END
)
1405 struct qreg src_regs
[12];
1406 for (int s
= 0; s
< 3; s
++) {
1407 for (int i
= 0; i
< 4; i
++) {
1408 src_regs
[4 * s
+ i
] =
1409 get_src(c
, tgsi_inst
->Instruction
.Opcode
,
1410 &tgsi_inst
->Src
[s
], i
);
1415 case TGSI_OPCODE_TEX
:
1416 case TGSI_OPCODE_TXP
:
1417 case TGSI_OPCODE_TXB
:
1418 case TGSI_OPCODE_TXL
:
1419 tgsi_to_qir_tex(c
, tgsi_inst
,
1420 op_trans
[tgsi_op
].op
, src_regs
);
1422 case TGSI_OPCODE_KILL
:
1423 c
->discard
= qir_uniform_f(c
, 1.0);
1425 case TGSI_OPCODE_KILL_IF
:
1426 for (int i
= 0; i
< 4; i
++)
1427 tgsi_to_qir_kill_if(c
, src_regs
, i
);
1433 if (tgsi_op
> ARRAY_SIZE(op_trans
) || !(op_trans
[tgsi_op
].func
)) {
1434 fprintf(stderr
, "unknown tgsi inst: ");
1435 tgsi_dump_instruction(tgsi_inst
, asdf
++);
1436 fprintf(stderr
, "\n");
1440 for (int i
= 0; i
< 4; i
++) {
1441 if (!(tgsi_inst
->Dst
[0].Register
.WriteMask
& (1 << i
)))
1446 result
= op_trans
[tgsi_op
].func(c
, tgsi_inst
,
1447 op_trans
[tgsi_op
].op
,
1450 if (tgsi_inst
->Instruction
.Saturate
) {
1451 float low
= (tgsi_inst
->Instruction
.Saturate
==
1452 TGSI_SAT_MINUS_PLUS_ONE
? -1.0 : 0.0);
1453 result
= qir_FMAX(c
,
1456 qir_uniform_f(c
, 1.0)),
1457 qir_uniform_f(c
, low
));
1460 update_dst(c
, tgsi_inst
, i
, result
);
1465 parse_tgsi_immediate(struct vc4_compile
*c
, struct tgsi_full_immediate
*imm
)
1467 for (int i
= 0; i
< 4; i
++) {
1468 unsigned n
= c
->num_consts
++;
1469 resize_qreg_array(c
, &c
->consts
, &c
->consts_array_size
, n
+ 1);
1470 c
->consts
[n
] = qir_uniform_ui(c
, imm
->u
[i
].Uint
);
1475 vc4_blend_channel(struct vc4_compile
*c
,
1483 case PIPE_BLENDFACTOR_ONE
:
1485 case PIPE_BLENDFACTOR_SRC_COLOR
:
1486 return qir_FMUL(c
, val
, src
[channel
]);
1487 case PIPE_BLENDFACTOR_SRC_ALPHA
:
1488 return qir_FMUL(c
, val
, src
[3]);
1489 case PIPE_BLENDFACTOR_DST_ALPHA
:
1490 return qir_FMUL(c
, val
, dst
[3]);
1491 case PIPE_BLENDFACTOR_DST_COLOR
:
1492 return qir_FMUL(c
, val
, dst
[channel
]);
1493 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE
:
1500 qir_uniform_f(c
, 1.0),
1505 case PIPE_BLENDFACTOR_CONST_COLOR
:
1506 return qir_FMUL(c
, val
,
1507 get_temp_for_uniform(c
,
1508 QUNIFORM_BLEND_CONST_COLOR
,
1510 case PIPE_BLENDFACTOR_CONST_ALPHA
:
1511 return qir_FMUL(c
, val
,
1512 get_temp_for_uniform(c
,
1513 QUNIFORM_BLEND_CONST_COLOR
,
1515 case PIPE_BLENDFACTOR_ZERO
:
1516 return qir_uniform_f(c
, 0.0);
1517 case PIPE_BLENDFACTOR_INV_SRC_COLOR
:
1518 return qir_FMUL(c
, val
, qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1520 case PIPE_BLENDFACTOR_INV_SRC_ALPHA
:
1521 return qir_FMUL(c
, val
, qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1523 case PIPE_BLENDFACTOR_INV_DST_ALPHA
:
1524 return qir_FMUL(c
, val
, qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1526 case PIPE_BLENDFACTOR_INV_DST_COLOR
:
1527 return qir_FMUL(c
, val
, qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1529 case PIPE_BLENDFACTOR_INV_CONST_COLOR
:
1530 return qir_FMUL(c
, val
,
1531 qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1532 get_temp_for_uniform(c
,
1533 QUNIFORM_BLEND_CONST_COLOR
,
1535 case PIPE_BLENDFACTOR_INV_CONST_ALPHA
:
1536 return qir_FMUL(c
, val
,
1537 qir_FSUB(c
, qir_uniform_f(c
, 1.0),
1538 get_temp_for_uniform(c
,
1539 QUNIFORM_BLEND_CONST_COLOR
,
1543 case PIPE_BLENDFACTOR_SRC1_COLOR
:
1544 case PIPE_BLENDFACTOR_SRC1_ALPHA
:
1545 case PIPE_BLENDFACTOR_INV_SRC1_COLOR
:
1546 case PIPE_BLENDFACTOR_INV_SRC1_ALPHA
:
1548 fprintf(stderr
, "Unknown blend factor %d\n", factor
);
1554 vc4_blend_func(struct vc4_compile
*c
,
1555 struct qreg src
, struct qreg dst
,
1559 case PIPE_BLEND_ADD
:
1560 return qir_FADD(c
, src
, dst
);
1561 case PIPE_BLEND_SUBTRACT
:
1562 return qir_FSUB(c
, src
, dst
);
1563 case PIPE_BLEND_REVERSE_SUBTRACT
:
1564 return qir_FSUB(c
, dst
, src
);
1565 case PIPE_BLEND_MIN
:
1566 return qir_FMIN(c
, src
, dst
);
1567 case PIPE_BLEND_MAX
:
1568 return qir_FMAX(c
, src
, dst
);
1572 fprintf(stderr
, "Unknown blend func %d\n", func
);
1579 * Implements fixed function blending in shader code.
1581 * VC4 doesn't have any hardware support for blending. Instead, you read the
1582 * current contents of the destination from the tile buffer after having
1583 * waited for the scoreboard (which is handled by vc4_qpu_emit.c), then do
1584 * math using your output color and that destination value, and update the
1585 * output color appropriately.
1588 vc4_blend(struct vc4_compile
*c
, struct qreg
*result
,
1589 struct qreg
*dst_color
, struct qreg
*src_color
)
1591 struct pipe_rt_blend_state
*blend
= &c
->fs_key
->blend
;
1593 if (!blend
->blend_enable
) {
1594 for (int i
= 0; i
< 4; i
++)
1595 result
[i
] = src_color
[i
];
1599 struct qreg clamped_src
[4];
1600 struct qreg clamped_dst
[4];
1601 for (int i
= 0; i
< 4; i
++) {
1602 clamped_src
[i
] = qir_SAT(c
, src_color
[i
]);
1603 clamped_dst
[i
] = qir_SAT(c
, dst_color
[i
]);
1605 src_color
= clamped_src
;
1606 dst_color
= clamped_dst
;
1608 struct qreg src_blend
[4], dst_blend
[4];
1609 for (int i
= 0; i
< 3; i
++) {
1610 src_blend
[i
] = vc4_blend_channel(c
,
1611 dst_color
, src_color
,
1613 blend
->rgb_src_factor
, i
);
1614 dst_blend
[i
] = vc4_blend_channel(c
,
1615 dst_color
, src_color
,
1617 blend
->rgb_dst_factor
, i
);
1619 src_blend
[3] = vc4_blend_channel(c
,
1620 dst_color
, src_color
,
1622 blend
->alpha_src_factor
, 3);
1623 dst_blend
[3] = vc4_blend_channel(c
,
1624 dst_color
, src_color
,
1626 blend
->alpha_dst_factor
, 3);
1628 for (int i
= 0; i
< 3; i
++) {
1629 result
[i
] = vc4_blend_func(c
,
1630 src_blend
[i
], dst_blend
[i
],
1633 result
[3] = vc4_blend_func(c
,
1634 src_blend
[3], dst_blend
[3],
1639 clip_distance_discard(struct vc4_compile
*c
)
1641 for (int i
= 0; i
< PIPE_MAX_CLIP_PLANES
; i
++) {
1642 if (!(c
->key
->ucp_enables
& (1 << i
)))
1645 struct qreg dist
= emit_fragment_varying(c
,
1646 TGSI_SEMANTIC_CLIPDIST
,
1652 if (c
->discard
.file
== QFILE_NULL
)
1653 c
->discard
= qir_uniform_f(c
, 0.0);
1655 c
->discard
= qir_SEL_X_Y_NS(c
, qir_uniform_f(c
, 1.0),
1661 alpha_test_discard(struct vc4_compile
*c
)
1663 struct qreg src_alpha
;
1664 struct qreg alpha_ref
= get_temp_for_uniform(c
, QUNIFORM_ALPHA_REF
, 0);
1666 if (!c
->fs_key
->alpha_test
)
1669 if (c
->output_color_index
!= -1)
1670 src_alpha
= c
->outputs
[c
->output_color_index
+ 3];
1672 src_alpha
= qir_uniform_f(c
, 1.0);
1674 if (c
->discard
.file
== QFILE_NULL
)
1675 c
->discard
= qir_uniform_f(c
, 0.0);
1677 switch (c
->fs_key
->alpha_test_func
) {
1678 case PIPE_FUNC_NEVER
:
1679 c
->discard
= qir_uniform_f(c
, 1.0);
1681 case PIPE_FUNC_ALWAYS
:
1683 case PIPE_FUNC_EQUAL
:
1684 qir_SF(c
, qir_FSUB(c
, src_alpha
, alpha_ref
));
1685 c
->discard
= qir_SEL_X_Y_ZS(c
, c
->discard
,
1686 qir_uniform_f(c
, 1.0));
1688 case PIPE_FUNC_NOTEQUAL
:
1689 qir_SF(c
, qir_FSUB(c
, src_alpha
, alpha_ref
));
1690 c
->discard
= qir_SEL_X_Y_ZC(c
, c
->discard
,
1691 qir_uniform_f(c
, 1.0));
1693 case PIPE_FUNC_GREATER
:
1694 qir_SF(c
, qir_FSUB(c
, src_alpha
, alpha_ref
));
1695 c
->discard
= qir_SEL_X_Y_NC(c
, c
->discard
,
1696 qir_uniform_f(c
, 1.0));
1698 case PIPE_FUNC_GEQUAL
:
1699 qir_SF(c
, qir_FSUB(c
, alpha_ref
, src_alpha
));
1700 c
->discard
= qir_SEL_X_Y_NS(c
, c
->discard
,
1701 qir_uniform_f(c
, 1.0));
1703 case PIPE_FUNC_LESS
:
1704 qir_SF(c
, qir_FSUB(c
, src_alpha
, alpha_ref
));
1705 c
->discard
= qir_SEL_X_Y_NS(c
, c
->discard
,
1706 qir_uniform_f(c
, 1.0));
1708 case PIPE_FUNC_LEQUAL
:
1709 qir_SF(c
, qir_FSUB(c
, alpha_ref
, src_alpha
));
1710 c
->discard
= qir_SEL_X_Y_NC(c
, c
->discard
,
1711 qir_uniform_f(c
, 1.0));
1717 vc4_logicop(struct vc4_compile
*c
, struct qreg src
, struct qreg dst
)
1719 switch (c
->fs_key
->logicop_func
) {
1720 case PIPE_LOGICOP_CLEAR
:
1721 return qir_uniform_f(c
, 0.0);
1722 case PIPE_LOGICOP_NOR
:
1723 return qir_NOT(c
, qir_OR(c
, src
, dst
));
1724 case PIPE_LOGICOP_AND_INVERTED
:
1725 return qir_AND(c
, qir_NOT(c
, src
), dst
);
1726 case PIPE_LOGICOP_COPY_INVERTED
:
1727 return qir_NOT(c
, src
);
1728 case PIPE_LOGICOP_AND_REVERSE
:
1729 return qir_AND(c
, src
, qir_NOT(c
, dst
));
1730 case PIPE_LOGICOP_INVERT
:
1731 return qir_NOT(c
, dst
);
1732 case PIPE_LOGICOP_XOR
:
1733 return qir_XOR(c
, src
, dst
);
1734 case PIPE_LOGICOP_NAND
:
1735 return qir_NOT(c
, qir_AND(c
, src
, dst
));
1736 case PIPE_LOGICOP_AND
:
1737 return qir_AND(c
, src
, dst
);
1738 case PIPE_LOGICOP_EQUIV
:
1739 return qir_NOT(c
, qir_XOR(c
, src
, dst
));
1740 case PIPE_LOGICOP_NOOP
:
1742 case PIPE_LOGICOP_OR_INVERTED
:
1743 return qir_OR(c
, qir_NOT(c
, src
), dst
);
1744 case PIPE_LOGICOP_OR_REVERSE
:
1745 return qir_OR(c
, src
, qir_NOT(c
, dst
));
1746 case PIPE_LOGICOP_OR
:
1747 return qir_OR(c
, src
, dst
);
1748 case PIPE_LOGICOP_SET
:
1749 return qir_uniform_ui(c
, ~0);
1750 case PIPE_LOGICOP_COPY
:
1757 emit_frag_end(struct vc4_compile
*c
)
1759 clip_distance_discard(c
);
1760 alpha_test_discard(c
);
1762 enum pipe_format color_format
= c
->fs_key
->color_format
;
1763 const uint8_t *format_swiz
= vc4_get_format_swizzle(color_format
);
1764 struct qreg tlb_read_color
[4] = { c
->undef
, c
->undef
, c
->undef
, c
->undef
};
1765 struct qreg dst_color
[4] = { c
->undef
, c
->undef
, c
->undef
, c
->undef
};
1766 struct qreg linear_dst_color
[4] = { c
->undef
, c
->undef
, c
->undef
, c
->undef
};
1767 struct qreg packed_dst_color
= c
->undef
;
1769 if (c
->fs_key
->blend
.blend_enable
||
1770 c
->fs_key
->blend
.colormask
!= 0xf ||
1771 c
->fs_key
->logicop_func
!= PIPE_LOGICOP_COPY
) {
1772 struct qreg r4
= qir_TLB_COLOR_READ(c
);
1773 for (int i
= 0; i
< 4; i
++)
1774 tlb_read_color
[i
] = qir_R4_UNPACK(c
, r4
, i
);
1775 for (int i
= 0; i
< 4; i
++) {
1776 dst_color
[i
] = get_swizzled_channel(c
,
1779 if (util_format_is_srgb(color_format
) && i
!= 3) {
1780 linear_dst_color
[i
] =
1781 qir_srgb_decode(c
, dst_color
[i
]);
1783 linear_dst_color
[i
] = dst_color
[i
];
1787 /* Save the packed value for logic ops. Can't reuse r4
1788 * becuase other things might smash it (like sRGB)
1790 packed_dst_color
= qir_MOV(c
, r4
);
1793 struct qreg blend_color
[4];
1794 struct qreg undef_array
[4] = {
1795 c
->undef
, c
->undef
, c
->undef
, c
->undef
1797 vc4_blend(c
, blend_color
, linear_dst_color
,
1798 (c
->output_color_index
!= -1 ?
1799 c
->outputs
+ c
->output_color_index
:
1802 if (util_format_is_srgb(color_format
)) {
1803 for (int i
= 0; i
< 3; i
++)
1804 blend_color
[i
] = qir_srgb_encode(c
, blend_color
[i
]);
1807 /* Debug: Sometimes you're getting a black output and just want to see
1808 * if the FS is getting executed at all. Spam magenta into the color
1812 blend_color
[0] = qir_uniform_f(c
, 1.0);
1813 blend_color
[1] = qir_uniform_f(c
, 0.0);
1814 blend_color
[2] = qir_uniform_f(c
, 1.0);
1815 blend_color
[3] = qir_uniform_f(c
, 0.5);
1818 struct qreg swizzled_outputs
[4];
1819 for (int i
= 0; i
< 4; i
++) {
1820 swizzled_outputs
[i
] = get_swizzled_channel(c
, blend_color
,
1824 if (c
->discard
.file
!= QFILE_NULL
)
1825 qir_TLB_DISCARD_SETUP(c
, c
->discard
);
1827 if (c
->fs_key
->stencil_enabled
) {
1828 qir_TLB_STENCIL_SETUP(c
, add_uniform(c
, QUNIFORM_STENCIL
, 0));
1829 if (c
->fs_key
->stencil_twoside
) {
1830 qir_TLB_STENCIL_SETUP(c
, add_uniform(c
, QUNIFORM_STENCIL
, 1));
1832 if (c
->fs_key
->stencil_full_writemasks
) {
1833 qir_TLB_STENCIL_SETUP(c
, add_uniform(c
, QUNIFORM_STENCIL
, 2));
1837 if (c
->fs_key
->depth_enabled
) {
1839 if (c
->output_position_index
!= -1) {
1840 z
= qir_FTOI(c
, qir_FMUL(c
, c
->outputs
[c
->output_position_index
+ 2],
1841 qir_uniform_f(c
, 0xffffff)));
1845 qir_TLB_Z_WRITE(c
, z
);
1848 struct qreg packed_color
= c
->undef
;
1849 for (int i
= 0; i
< 4; i
++) {
1850 if (swizzled_outputs
[i
].file
== QFILE_NULL
)
1852 if (packed_color
.file
== QFILE_NULL
) {
1853 packed_color
= qir_PACK_8888_F(c
, swizzled_outputs
[i
]);
1855 packed_color
= qir_PACK_8_F(c
,
1857 swizzled_outputs
[i
],
1862 if (packed_color
.file
== QFILE_NULL
)
1863 packed_color
= qir_uniform_ui(c
, 0);
1865 if (c
->fs_key
->logicop_func
!= PIPE_LOGICOP_COPY
) {
1866 packed_color
= vc4_logicop(c
, packed_color
, packed_dst_color
);
1869 /* If the bit isn't set in the color mask, then just return the
1870 * original dst color, instead.
1872 uint32_t colormask
= 0xffffffff;
1873 for (int i
= 0; i
< 4; i
++) {
1874 if (format_swiz
[i
] < 4 &&
1875 !(c
->fs_key
->blend
.colormask
& (1 << format_swiz
[i
]))) {
1876 colormask
&= ~(0xff << (i
* 8));
1879 if (colormask
!= 0xffffffff) {
1880 packed_color
= qir_OR(c
,
1881 qir_AND(c
, packed_color
,
1882 qir_uniform_ui(c
, colormask
)),
1883 qir_AND(c
, packed_dst_color
,
1884 qir_uniform_ui(c
, ~colormask
)));
1887 qir_emit(c
, qir_inst(QOP_TLB_COLOR_WRITE
, c
->undef
,
1888 packed_color
, c
->undef
));
1892 emit_scaled_viewport_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1896 for (int i
= 0; i
< 2; i
++) {
1898 add_uniform(c
, QUNIFORM_VIEWPORT_X_SCALE
+ i
, 0);
1900 xyi
[i
] = qir_FTOI(c
, qir_FMUL(c
,
1902 c
->outputs
[c
->output_position_index
+ i
],
1907 qir_VPM_WRITE(c
, qir_PACK_SCALED(c
, xyi
[0], xyi
[1]));
1911 emit_zs_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1913 struct qreg zscale
= add_uniform(c
, QUNIFORM_VIEWPORT_Z_SCALE
, 0);
1914 struct qreg zoffset
= add_uniform(c
, QUNIFORM_VIEWPORT_Z_OFFSET
, 0);
1916 qir_VPM_WRITE(c
, qir_FADD(c
, qir_FMUL(c
, qir_FMUL(c
,
1917 c
->outputs
[c
->output_position_index
+ 2],
1924 emit_rcp_wc_write(struct vc4_compile
*c
, struct qreg rcp_w
)
1926 qir_VPM_WRITE(c
, rcp_w
);
1930 emit_point_size_write(struct vc4_compile
*c
)
1932 struct qreg point_size
;
1934 if (c
->output_point_size_index
!= -1)
1935 point_size
= c
->outputs
[c
->output_point_size_index
+ 3];
1937 point_size
= qir_uniform_f(c
, 1.0);
1939 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1942 point_size
= qir_FMAX(c
, point_size
, qir_uniform_f(c
, .125));
1944 qir_VPM_WRITE(c
, point_size
);
1948 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1950 * The simulator insists that there be at least one vertex attribute, so
1951 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1952 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1953 * to consume it here.
1956 emit_stub_vpm_read(struct vc4_compile
*c
)
1961 c
->vattr_sizes
[0] = 4;
1962 struct qreg vpm
= { QFILE_VPM
, 0 };
1963 (void)qir_MOV(c
, vpm
);
1968 emit_ucp_clipdistance(struct vc4_compile
*c
)
1971 if (c
->output_clipvertex_index
!= -1)
1972 cv
= c
->output_clipvertex_index
;
1973 else if (c
->output_position_index
!= -1)
1974 cv
= c
->output_position_index
;
1978 for (int plane
= 0; plane
< PIPE_MAX_CLIP_PLANES
; plane
++) {
1979 if (!(c
->key
->ucp_enables
& (1 << plane
)))
1982 /* Pick the next outputs[] that hasn't been written to, since
1983 * there are no other program writes left to be processed at
1984 * this point. If something had been declared but not written
1985 * (like a w component), we'll just smash over the top of it.
1987 uint32_t output_index
= c
->num_outputs
++;
1988 add_output(c
, output_index
,
1989 TGSI_SEMANTIC_CLIPDIST
,
1994 struct qreg dist
= qir_uniform_f(c
, 0.0);
1995 for (int i
= 0; i
< 4; i
++) {
1996 struct qreg pos_chan
= c
->outputs
[cv
+ i
];
1998 add_uniform(c
, QUNIFORM_USER_CLIP_PLANE
,
2000 dist
= qir_FADD(c
, dist
, qir_FMUL(c
, pos_chan
, ucp
));
2003 c
->outputs
[output_index
] = dist
;
2008 emit_vert_end(struct vc4_compile
*c
,
2009 struct vc4_varying_semantic
*fs_inputs
,
2010 uint32_t num_fs_inputs
)
2012 struct qreg rcp_w
= qir_RCP(c
, c
->outputs
[c
->output_position_index
+ 3]);
2014 emit_stub_vpm_read(c
);
2015 emit_ucp_clipdistance(c
);
2017 emit_scaled_viewport_write(c
, rcp_w
);
2018 emit_zs_write(c
, rcp_w
);
2019 emit_rcp_wc_write(c
, rcp_w
);
2020 if (c
->vs_key
->per_vertex_point_size
)
2021 emit_point_size_write(c
);
2023 for (int i
= 0; i
< num_fs_inputs
; i
++) {
2024 struct vc4_varying_semantic
*input
= &fs_inputs
[i
];
2027 for (j
= 0; j
< c
->num_outputs
; j
++) {
2028 struct vc4_varying_semantic
*output
=
2029 &c
->output_semantics
[j
];
2031 if (input
->semantic
== output
->semantic
&&
2032 input
->index
== output
->index
&&
2033 input
->swizzle
== output
->swizzle
) {
2034 qir_VPM_WRITE(c
, c
->outputs
[j
]);
2038 /* Emit padding if we didn't find a declared VS output for
2041 if (j
== c
->num_outputs
)
2042 qir_VPM_WRITE(c
, qir_uniform_f(c
, 0.0));
2047 emit_coord_end(struct vc4_compile
*c
)
2049 struct qreg rcp_w
= qir_RCP(c
, c
->outputs
[c
->output_position_index
+ 3]);
2051 emit_stub_vpm_read(c
);
2053 for (int i
= 0; i
< 4; i
++)
2054 qir_VPM_WRITE(c
, c
->outputs
[c
->output_position_index
+ i
]);
2056 emit_scaled_viewport_write(c
, rcp_w
);
2057 emit_zs_write(c
, rcp_w
);
2058 emit_rcp_wc_write(c
, rcp_w
);
2059 if (c
->vs_key
->per_vertex_point_size
)
2060 emit_point_size_write(c
);
2063 static struct vc4_compile
*
2064 vc4_shader_tgsi_to_qir(struct vc4_context
*vc4
, enum qstage stage
,
2065 struct vc4_key
*key
)
2067 struct vc4_compile
*c
= qir_compile_init();
2071 for (int i
= 0; i
< 4; i
++)
2072 c
->addr
[i
] = qir_uniform_f(c
, 0.0);
2074 c
->shader_state
= &key
->shader_state
->base
;
2075 c
->program_id
= key
->shader_state
->program_id
;
2076 c
->variant_id
= key
->shader_state
->compiled_variant_count
++;
2081 c
->fs_key
= (struct vc4_fs_key
*)key
;
2082 if (c
->fs_key
->is_points
) {
2083 c
->point_x
= emit_fragment_varying(c
, ~0, ~0, 0);
2084 c
->point_y
= emit_fragment_varying(c
, ~0, ~0, 0);
2085 } else if (c
->fs_key
->is_lines
) {
2086 c
->line_x
= emit_fragment_varying(c
, ~0, ~0, 0);
2090 c
->vs_key
= (struct vc4_vs_key
*)key
;
2093 c
->vs_key
= (struct vc4_vs_key
*)key
;
2097 const struct tgsi_token
*tokens
= key
->shader_state
->base
.tokens
;
2098 if (c
->fs_key
&& c
->fs_key
->light_twoside
) {
2099 if (!key
->shader_state
->twoside_tokens
) {
2100 const struct tgsi_lowering_config lowering_config
= {
2101 .color_two_side
= true,
2103 struct tgsi_shader_info info
;
2104 key
->shader_state
->twoside_tokens
=
2105 tgsi_transform_lowering(&lowering_config
,
2106 key
->shader_state
->base
.tokens
,
2109 /* If no transformation occurred, then NULL is
2110 * returned and we just use our original tokens.
2112 if (!key
->shader_state
->twoside_tokens
) {
2113 key
->shader_state
->twoside_tokens
=
2114 key
->shader_state
->base
.tokens
;
2117 tokens
= key
->shader_state
->twoside_tokens
;
2120 ret
= tgsi_parse_init(&c
->parser
, tokens
);
2121 assert(ret
== TGSI_PARSE_OK
);
2123 if (vc4_debug
& VC4_DEBUG_TGSI
) {
2124 fprintf(stderr
, "%s prog %d/%d TGSI:\n",
2125 qir_get_stage_name(c
->stage
),
2126 c
->program_id
, c
->variant_id
);
2127 tgsi_dump(tokens
, 0);
2130 while (!tgsi_parse_end_of_tokens(&c
->parser
)) {
2131 tgsi_parse_token(&c
->parser
);
2133 switch (c
->parser
.FullToken
.Token
.Type
) {
2134 case TGSI_TOKEN_TYPE_DECLARATION
:
2135 emit_tgsi_declaration(c
,
2136 &c
->parser
.FullToken
.FullDeclaration
);
2139 case TGSI_TOKEN_TYPE_INSTRUCTION
:
2140 emit_tgsi_instruction(c
,
2141 &c
->parser
.FullToken
.FullInstruction
);
2144 case TGSI_TOKEN_TYPE_IMMEDIATE
:
2145 parse_tgsi_immediate(c
,
2146 &c
->parser
.FullToken
.FullImmediate
);
2157 vc4
->prog
.fs
->input_semantics
,
2158 vc4
->prog
.fs
->num_inputs
);
2165 tgsi_parse_free(&c
->parser
);
2166 if (vc4_debug
& VC4_DEBUG_QIR
) {
2167 fprintf(stderr
, "%s prog %d/%d pre-opt QIR:\n",
2168 qir_get_stage_name(c
->stage
),
2169 c
->program_id
, c
->variant_id
);
2175 if (vc4_debug
& VC4_DEBUG_QIR
) {
2176 fprintf(stderr
, "%s prog %d/%d QIR:\n",
2177 qir_get_stage_name(c
->stage
),
2178 c
->program_id
, c
->variant_id
);
2181 qir_reorder_uniforms(c
);
2182 vc4_generate_code(vc4
, c
);
2184 if (vc4_debug
& VC4_DEBUG_SHADERDB
) {
2185 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d instructions\n",
2186 qir_get_stage_name(c
->stage
),
2187 c
->program_id
, c
->variant_id
,
2189 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
2190 qir_get_stage_name(c
->stage
),
2191 c
->program_id
, c
->variant_id
,
2199 vc4_shader_state_create(struct pipe_context
*pctx
,
2200 const struct pipe_shader_state
*cso
)
2202 struct vc4_context
*vc4
= vc4_context(pctx
);
2203 struct vc4_uncompiled_shader
*so
= CALLOC_STRUCT(vc4_uncompiled_shader
);
2207 const struct tgsi_lowering_config lowering_config
= {
2222 struct tgsi_shader_info info
;
2223 so
->base
.tokens
= tgsi_transform_lowering(&lowering_config
, cso
->tokens
, &info
);
2224 if (!so
->base
.tokens
)
2225 so
->base
.tokens
= tgsi_dup_tokens(cso
->tokens
);
2226 so
->program_id
= vc4
->next_uncompiled_program_id
++;
2232 copy_uniform_state_to_shader(struct vc4_compiled_shader
*shader
,
2233 struct vc4_compile
*c
)
2235 int count
= c
->num_uniforms
;
2236 struct vc4_shader_uniform_info
*uinfo
= &shader
->uniforms
;
2238 uinfo
->count
= count
;
2239 uinfo
->data
= ralloc_array(shader
, uint32_t, count
);
2240 memcpy(uinfo
->data
, c
->uniform_data
,
2241 count
* sizeof(*uinfo
->data
));
2242 uinfo
->contents
= ralloc_array(shader
, enum quniform_contents
, count
);
2243 memcpy(uinfo
->contents
, c
->uniform_contents
,
2244 count
* sizeof(*uinfo
->contents
));
2245 uinfo
->num_texture_samples
= c
->num_texture_samples
;
2248 static struct vc4_compiled_shader
*
2249 vc4_get_compiled_shader(struct vc4_context
*vc4
, enum qstage stage
,
2250 struct vc4_key
*key
)
2252 struct hash_table
*ht
;
2254 if (stage
== QSTAGE_FRAG
) {
2256 key_size
= sizeof(struct vc4_fs_key
);
2259 key_size
= sizeof(struct vc4_vs_key
);
2262 struct vc4_compiled_shader
*shader
;
2263 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, key
);
2267 struct vc4_compile
*c
= vc4_shader_tgsi_to_qir(vc4
, stage
, key
);
2268 shader
= rzalloc(NULL
, struct vc4_compiled_shader
);
2270 shader
->program_id
= vc4
->next_compiled_program_id
++;
2271 if (stage
== QSTAGE_FRAG
) {
2272 bool input_live
[c
->num_input_semantics
];
2273 struct simple_node
*node
;
2275 memset(input_live
, 0, sizeof(input_live
));
2276 foreach(node
, &c
->instructions
) {
2277 struct qinst
*inst
= (struct qinst
*)node
;
2278 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
2279 if (inst
->src
[i
].file
== QFILE_VARY
)
2280 input_live
[inst
->src
[i
].index
] = true;
2284 shader
->input_semantics
= ralloc_array(shader
,
2285 struct vc4_varying_semantic
,
2286 c
->num_input_semantics
);
2288 for (int i
= 0; i
< c
->num_input_semantics
; i
++) {
2289 struct vc4_varying_semantic
*sem
= &c
->input_semantics
[i
];
2294 /* Skip non-VS-output inputs. */
2295 if (sem
->semantic
== (uint8_t)~0)
2298 if (sem
->semantic
== TGSI_SEMANTIC_COLOR
||
2299 sem
->semantic
== TGSI_SEMANTIC_BCOLOR
) {
2300 shader
->color_inputs
|= (1 << shader
->num_inputs
);
2303 shader
->input_semantics
[shader
->num_inputs
] = *sem
;
2304 shader
->num_inputs
++;
2307 shader
->num_inputs
= c
->num_inputs
;
2309 shader
->vattr_offsets
[0] = 0;
2310 for (int i
= 0; i
< 8; i
++) {
2311 shader
->vattr_offsets
[i
+ 1] =
2312 shader
->vattr_offsets
[i
] + c
->vattr_sizes
[i
];
2314 if (c
->vattr_sizes
[i
])
2315 shader
->vattrs_live
|= (1 << i
);
2319 copy_uniform_state_to_shader(shader
, c
);
2320 shader
->bo
= vc4_bo_alloc_mem(vc4
->screen
, c
->qpu_insts
,
2321 c
->qpu_inst_count
* sizeof(uint64_t),
2324 /* Copy the compiler UBO range state to the compiled shader, dropping
2325 * out arrays that were never referenced by an indirect load.
2327 * (Note that QIR dead code elimination of an array access still
2328 * leaves that array alive, though)
2330 if (c
->num_ubo_ranges
) {
2331 shader
->num_ubo_ranges
= c
->num_ubo_ranges
;
2332 shader
->ubo_ranges
= ralloc_array(shader
, struct vc4_ubo_range
,
2335 for (int i
= 0; i
< c
->ubo_ranges_array_size
; i
++) {
2336 struct vc4_compiler_ubo_range
*range
=
2341 shader
->ubo_ranges
[j
].dst_offset
= range
->dst_offset
;
2342 shader
->ubo_ranges
[j
].src_offset
= range
->src_offset
;
2343 shader
->ubo_ranges
[j
].size
= range
->size
;
2344 shader
->ubo_size
+= c
->ubo_ranges
[i
].size
;
2349 qir_compile_destroy(c
);
2351 struct vc4_key
*dup_key
;
2352 dup_key
= ralloc_size(shader
, key_size
);
2353 memcpy(dup_key
, key
, key_size
);
2354 _mesa_hash_table_insert(ht
, dup_key
, shader
);
2360 vc4_setup_shared_key(struct vc4_context
*vc4
, struct vc4_key
*key
,
2361 struct vc4_texture_stateobj
*texstate
)
2363 for (int i
= 0; i
< texstate
->num_textures
; i
++) {
2364 struct pipe_sampler_view
*sampler
= texstate
->textures
[i
];
2365 struct pipe_sampler_state
*sampler_state
=
2366 texstate
->samplers
[i
];
2369 key
->tex
[i
].format
= sampler
->format
;
2370 key
->tex
[i
].swizzle
[0] = sampler
->swizzle_r
;
2371 key
->tex
[i
].swizzle
[1] = sampler
->swizzle_g
;
2372 key
->tex
[i
].swizzle
[2] = sampler
->swizzle_b
;
2373 key
->tex
[i
].swizzle
[3] = sampler
->swizzle_a
;
2374 key
->tex
[i
].compare_mode
= sampler_state
->compare_mode
;
2375 key
->tex
[i
].compare_func
= sampler_state
->compare_func
;
2376 key
->tex
[i
].wrap_s
= sampler_state
->wrap_s
;
2377 key
->tex
[i
].wrap_t
= sampler_state
->wrap_t
;
2381 key
->ucp_enables
= vc4
->rasterizer
->base
.clip_plane_enable
;
2385 vc4_update_compiled_fs(struct vc4_context
*vc4
, uint8_t prim_mode
)
2387 struct vc4_fs_key local_key
;
2388 struct vc4_fs_key
*key
= &local_key
;
2390 if (!(vc4
->dirty
& (VC4_DIRTY_PRIM_MODE
|
2392 VC4_DIRTY_FRAMEBUFFER
|
2394 VC4_DIRTY_RASTERIZER
|
2396 VC4_DIRTY_TEXSTATE
|
2397 VC4_DIRTY_UNCOMPILED_FS
))) {
2401 memset(key
, 0, sizeof(*key
));
2402 vc4_setup_shared_key(vc4
, &key
->base
, &vc4
->fragtex
);
2403 key
->base
.shader_state
= vc4
->prog
.bind_fs
;
2404 key
->is_points
= (prim_mode
== PIPE_PRIM_POINTS
);
2405 key
->is_lines
= (prim_mode
>= PIPE_PRIM_LINES
&&
2406 prim_mode
<= PIPE_PRIM_LINE_STRIP
);
2407 key
->blend
= vc4
->blend
->rt
[0];
2408 if (vc4
->blend
->logicop_enable
) {
2409 key
->logicop_func
= vc4
->blend
->logicop_func
;
2411 key
->logicop_func
= PIPE_LOGICOP_COPY
;
2413 if (vc4
->framebuffer
.cbufs
[0])
2414 key
->color_format
= vc4
->framebuffer
.cbufs
[0]->format
;
2416 key
->stencil_enabled
= vc4
->zsa
->stencil_uniforms
[0] != 0;
2417 key
->stencil_twoside
= vc4
->zsa
->stencil_uniforms
[1] != 0;
2418 key
->stencil_full_writemasks
= vc4
->zsa
->stencil_uniforms
[2] != 0;
2419 key
->depth_enabled
= (vc4
->zsa
->base
.depth
.enabled
||
2420 key
->stencil_enabled
);
2421 if (vc4
->zsa
->base
.alpha
.enabled
) {
2422 key
->alpha_test
= true;
2423 key
->alpha_test_func
= vc4
->zsa
->base
.alpha
.func
;
2426 if (key
->is_points
) {
2427 key
->point_sprite_mask
=
2428 vc4
->rasterizer
->base
.sprite_coord_enable
;
2429 key
->point_coord_upper_left
=
2430 (vc4
->rasterizer
->base
.sprite_coord_mode
==
2431 PIPE_SPRITE_COORD_UPPER_LEFT
);
2434 key
->light_twoside
= vc4
->rasterizer
->base
.light_twoside
;
2436 struct vc4_compiled_shader
*old_fs
= vc4
->prog
.fs
;
2437 vc4
->prog
.fs
= vc4_get_compiled_shader(vc4
, QSTAGE_FRAG
, &key
->base
);
2438 if (vc4
->prog
.fs
== old_fs
)
2441 vc4
->dirty
|= VC4_DIRTY_COMPILED_FS
;
2442 if (vc4
->rasterizer
->base
.flatshade
&&
2443 old_fs
&& vc4
->prog
.fs
->color_inputs
!= old_fs
->color_inputs
) {
2444 vc4
->dirty
|= VC4_DIRTY_FLAT_SHADE_FLAGS
;
2449 vc4_update_compiled_vs(struct vc4_context
*vc4
, uint8_t prim_mode
)
2451 struct vc4_vs_key local_key
;
2452 struct vc4_vs_key
*key
= &local_key
;
2454 if (!(vc4
->dirty
& (VC4_DIRTY_PRIM_MODE
|
2455 VC4_DIRTY_RASTERIZER
|
2457 VC4_DIRTY_TEXSTATE
|
2458 VC4_DIRTY_VTXSTATE
|
2459 VC4_DIRTY_UNCOMPILED_VS
|
2460 VC4_DIRTY_COMPILED_FS
))) {
2464 memset(key
, 0, sizeof(*key
));
2465 vc4_setup_shared_key(vc4
, &key
->base
, &vc4
->verttex
);
2466 key
->base
.shader_state
= vc4
->prog
.bind_vs
;
2467 key
->compiled_fs_id
= vc4
->prog
.fs
->program_id
;
2469 for (int i
= 0; i
< ARRAY_SIZE(key
->attr_formats
); i
++)
2470 key
->attr_formats
[i
] = vc4
->vtx
->pipe
[i
].src_format
;
2472 key
->per_vertex_point_size
=
2473 (prim_mode
== PIPE_PRIM_POINTS
&&
2474 vc4
->rasterizer
->base
.point_size_per_vertex
);
2476 vc4
->prog
.vs
= vc4_get_compiled_shader(vc4
, QSTAGE_VERT
, &key
->base
);
2477 key
->is_coord
= true;
2478 vc4
->prog
.cs
= vc4_get_compiled_shader(vc4
, QSTAGE_COORD
, &key
->base
);
2482 vc4_update_compiled_shaders(struct vc4_context
*vc4
, uint8_t prim_mode
)
2484 vc4_update_compiled_fs(vc4
, prim_mode
);
2485 vc4_update_compiled_vs(vc4
, prim_mode
);
2489 fs_cache_hash(const void *key
)
2491 return _mesa_hash_data(key
, sizeof(struct vc4_fs_key
));
2495 vs_cache_hash(const void *key
)
2497 return _mesa_hash_data(key
, sizeof(struct vc4_vs_key
));
2501 fs_cache_compare(const void *key1
, const void *key2
)
2503 return memcmp(key1
, key2
, sizeof(struct vc4_fs_key
)) == 0;
2507 vs_cache_compare(const void *key1
, const void *key2
)
2509 return memcmp(key1
, key2
, sizeof(struct vc4_vs_key
)) == 0;
2513 delete_from_cache_if_matches(struct hash_table
*ht
,
2514 struct hash_entry
*entry
,
2515 struct vc4_uncompiled_shader
*so
)
2517 const struct vc4_key
*key
= entry
->key
;
2519 if (key
->shader_state
== so
) {
2520 struct vc4_compiled_shader
*shader
= entry
->data
;
2521 _mesa_hash_table_remove(ht
, entry
);
2522 vc4_bo_unreference(&shader
->bo
);
2523 ralloc_free(shader
);
2528 vc4_shader_state_delete(struct pipe_context
*pctx
, void *hwcso
)
2530 struct vc4_context
*vc4
= vc4_context(pctx
);
2531 struct vc4_uncompiled_shader
*so
= hwcso
;
2533 struct hash_entry
*entry
;
2534 hash_table_foreach(vc4
->fs_cache
, entry
)
2535 delete_from_cache_if_matches(vc4
->fs_cache
, entry
, so
);
2536 hash_table_foreach(vc4
->vs_cache
, entry
)
2537 delete_from_cache_if_matches(vc4
->vs_cache
, entry
, so
);
2539 if (so
->twoside_tokens
!= so
->base
.tokens
)
2540 free((void *)so
->twoside_tokens
);
2541 free((void *)so
->base
.tokens
);
2545 static uint32_t translate_wrap(uint32_t p_wrap
, bool using_nearest
)
2548 case PIPE_TEX_WRAP_REPEAT
:
2550 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
2552 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
2554 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
2556 case PIPE_TEX_WRAP_CLAMP
:
2557 return (using_nearest
? 1 : 3);
2559 fprintf(stderr
, "Unknown wrap mode %d\n", p_wrap
);
2560 assert(!"not reached");
2566 write_texture_p0(struct vc4_context
*vc4
,
2567 struct vc4_texture_stateobj
*texstate
,
2570 struct pipe_sampler_view
*texture
= texstate
->textures
[unit
];
2571 struct vc4_resource
*rsc
= vc4_resource(texture
->texture
);
2573 cl_reloc(vc4
, &vc4
->uniforms
, rsc
->bo
,
2574 VC4_SET_FIELD(rsc
->slices
[0].offset
>> 12, VC4_TEX_P0_OFFSET
) |
2575 VC4_SET_FIELD(texture
->u
.tex
.last_level
-
2576 texture
->u
.tex
.first_level
, VC4_TEX_P0_MIPLVLS
) |
2577 VC4_SET_FIELD(texture
->target
== PIPE_TEXTURE_CUBE
,
2578 VC4_TEX_P0_CMMODE
) |
2579 VC4_SET_FIELD(rsc
->vc4_format
& 15, VC4_TEX_P0_TYPE
));
2583 write_texture_p1(struct vc4_context
*vc4
,
2584 struct vc4_texture_stateobj
*texstate
,
2587 struct pipe_sampler_view
*texture
= texstate
->textures
[unit
];
2588 struct vc4_resource
*rsc
= vc4_resource(texture
->texture
);
2589 struct pipe_sampler_state
*sampler
= texstate
->samplers
[unit
];
2590 static const uint8_t minfilter_map
[6] = {
2591 VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR
,
2592 VC4_TEX_P1_MINFILT_LIN_MIP_NEAR
,
2593 VC4_TEX_P1_MINFILT_NEAR_MIP_LIN
,
2594 VC4_TEX_P1_MINFILT_LIN_MIP_LIN
,
2595 VC4_TEX_P1_MINFILT_NEAREST
,
2596 VC4_TEX_P1_MINFILT_LINEAR
,
2598 static const uint32_t magfilter_map
[] = {
2599 [PIPE_TEX_FILTER_NEAREST
] = VC4_TEX_P1_MAGFILT_NEAREST
,
2600 [PIPE_TEX_FILTER_LINEAR
] = VC4_TEX_P1_MAGFILT_LINEAR
,
2603 bool either_nearest
=
2604 (sampler
->mag_img_filter
== PIPE_TEX_MIPFILTER_NEAREST
||
2605 sampler
->min_img_filter
== PIPE_TEX_MIPFILTER_NEAREST
);
2607 cl_aligned_u32(&vc4
->uniforms
,
2608 VC4_SET_FIELD(rsc
->vc4_format
>> 4, VC4_TEX_P1_TYPE4
) |
2609 VC4_SET_FIELD(texture
->texture
->height0
& 2047,
2610 VC4_TEX_P1_HEIGHT
) |
2611 VC4_SET_FIELD(texture
->texture
->width0
& 2047,
2613 VC4_SET_FIELD(magfilter_map
[sampler
->mag_img_filter
],
2614 VC4_TEX_P1_MAGFILT
) |
2615 VC4_SET_FIELD(minfilter_map
[sampler
->min_mip_filter
* 2 +
2616 sampler
->min_img_filter
],
2617 VC4_TEX_P1_MINFILT
) |
2618 VC4_SET_FIELD(translate_wrap(sampler
->wrap_s
, either_nearest
),
2619 VC4_TEX_P1_WRAP_S
) |
2620 VC4_SET_FIELD(translate_wrap(sampler
->wrap_t
, either_nearest
),
2621 VC4_TEX_P1_WRAP_T
));
2625 write_texture_p2(struct vc4_context
*vc4
,
2626 struct vc4_texture_stateobj
*texstate
,
2629 uint32_t unit
= data
& 0xffff;
2630 struct pipe_sampler_view
*texture
= texstate
->textures
[unit
];
2631 struct vc4_resource
*rsc
= vc4_resource(texture
->texture
);
2633 cl_aligned_u32(&vc4
->uniforms
,
2634 VC4_SET_FIELD(VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE
,
2636 VC4_SET_FIELD(rsc
->cube_map_stride
>> 12, VC4_TEX_P2_CMST
) |
2637 VC4_SET_FIELD((data
>> 16) & 1, VC4_TEX_P2_BSLOD
));
2641 #define SWIZ(x,y,z,w) { \
2642 UTIL_FORMAT_SWIZZLE_##x, \
2643 UTIL_FORMAT_SWIZZLE_##y, \
2644 UTIL_FORMAT_SWIZZLE_##z, \
2645 UTIL_FORMAT_SWIZZLE_##w \
2649 write_texture_border_color(struct vc4_context
*vc4
,
2650 struct vc4_texture_stateobj
*texstate
,
2653 struct pipe_sampler_state
*sampler
= texstate
->samplers
[unit
];
2654 struct pipe_sampler_view
*texture
= texstate
->textures
[unit
];
2655 struct vc4_resource
*rsc
= vc4_resource(texture
->texture
);
2656 union util_color uc
;
2658 const struct util_format_description
*tex_format_desc
=
2659 util_format_description(texture
->format
);
2661 float border_color
[4];
2662 for (int i
= 0; i
< 4; i
++)
2663 border_color
[i
] = sampler
->border_color
.f
[i
];
2664 if (util_format_is_srgb(texture
->format
)) {
2665 for (int i
= 0; i
< 3; i
++)
2667 util_format_linear_to_srgb_float(border_color
[i
]);
2670 /* Turn the border color into the layout of channels that it would
2671 * have when stored as texture contents.
2673 float storage_color
[4];
2674 util_format_unswizzle_4f(storage_color
,
2676 tex_format_desc
->swizzle
);
2678 /* Now, pack so that when the vc4_format-sampled texture contents are
2679 * replaced with our border color, the vc4_get_format_swizzle()
2680 * swizzling will get the right channels.
2682 if (util_format_is_depth_or_stencil(texture
->format
)) {
2683 uc
.ui
[0] = util_pack_z(PIPE_FORMAT_Z24X8_UNORM
,
2684 sampler
->border_color
.f
[0]) << 8;
2686 switch (rsc
->vc4_format
) {
2688 case VC4_TEXTURE_TYPE_RGBA8888
:
2689 util_pack_color(storage_color
,
2690 PIPE_FORMAT_R8G8B8A8_UNORM
, &uc
);
2692 case VC4_TEXTURE_TYPE_RGBA4444
:
2693 util_pack_color(storage_color
,
2694 PIPE_FORMAT_A8B8G8R8_UNORM
, &uc
);
2696 case VC4_TEXTURE_TYPE_RGB565
:
2697 util_pack_color(storage_color
,
2698 PIPE_FORMAT_B8G8R8A8_UNORM
, &uc
);
2700 case VC4_TEXTURE_TYPE_ALPHA
:
2701 uc
.ui
[0] = float_to_ubyte(storage_color
[0]) << 24;
2703 case VC4_TEXTURE_TYPE_LUMALPHA
:
2704 uc
.ui
[0] = ((float_to_ubyte(storage_color
[1]) << 24) |
2705 (float_to_ubyte(storage_color
[0]) << 0));
2710 cl_aligned_u32(&vc4
->uniforms
, uc
.ui
[0]);
2714 get_texrect_scale(struct vc4_texture_stateobj
*texstate
,
2715 enum quniform_contents contents
,
2718 struct pipe_sampler_view
*texture
= texstate
->textures
[data
];
2721 if (contents
== QUNIFORM_TEXRECT_SCALE_X
)
2722 dim
= texture
->texture
->width0
;
2724 dim
= texture
->texture
->height0
;
2726 return fui(1.0f
/ dim
);
2729 static struct vc4_bo
*
2730 vc4_upload_ubo(struct vc4_context
*vc4
, struct vc4_compiled_shader
*shader
,
2731 const uint32_t *gallium_uniforms
)
2733 if (!shader
->ubo_size
)
2736 struct vc4_bo
*ubo
= vc4_bo_alloc(vc4
->screen
, shader
->ubo_size
, "ubo");
2737 uint32_t *data
= vc4_bo_map(ubo
);
2738 for (uint32_t i
= 0; i
< shader
->num_ubo_ranges
; i
++) {
2739 memcpy(data
+ shader
->ubo_ranges
[i
].dst_offset
,
2740 gallium_uniforms
+ shader
->ubo_ranges
[i
].src_offset
,
2741 shader
->ubo_ranges
[i
].size
);
2748 vc4_write_uniforms(struct vc4_context
*vc4
, struct vc4_compiled_shader
*shader
,
2749 struct vc4_constbuf_stateobj
*cb
,
2750 struct vc4_texture_stateobj
*texstate
)
2752 struct vc4_shader_uniform_info
*uinfo
= &shader
->uniforms
;
2753 const uint32_t *gallium_uniforms
= cb
->cb
[0].user_buffer
;
2754 struct vc4_bo
*ubo
= vc4_upload_ubo(vc4
, shader
, gallium_uniforms
);
2756 cl_ensure_space(&vc4
->uniforms
, (uinfo
->count
+
2757 uinfo
->num_texture_samples
) * 4);
2759 cl_start_shader_reloc(&vc4
->uniforms
, uinfo
->num_texture_samples
);
2761 for (int i
= 0; i
< uinfo
->count
; i
++) {
2763 switch (uinfo
->contents
[i
]) {
2764 case QUNIFORM_CONSTANT
:
2765 cl_aligned_u32(&vc4
->uniforms
, uinfo
->data
[i
]);
2767 case QUNIFORM_UNIFORM
:
2768 cl_aligned_u32(&vc4
->uniforms
,
2769 gallium_uniforms
[uinfo
->data
[i
]]);
2771 case QUNIFORM_VIEWPORT_X_SCALE
:
2772 cl_aligned_f(&vc4
->uniforms
, vc4
->viewport
.scale
[0] * 16.0f
);
2774 case QUNIFORM_VIEWPORT_Y_SCALE
:
2775 cl_aligned_f(&vc4
->uniforms
, vc4
->viewport
.scale
[1] * 16.0f
);
2778 case QUNIFORM_VIEWPORT_Z_OFFSET
:
2779 cl_aligned_f(&vc4
->uniforms
, vc4
->viewport
.translate
[2]);
2781 case QUNIFORM_VIEWPORT_Z_SCALE
:
2782 cl_aligned_f(&vc4
->uniforms
, vc4
->viewport
.scale
[2]);
2785 case QUNIFORM_USER_CLIP_PLANE
:
2786 cl_aligned_f(&vc4
->uniforms
,
2787 vc4
->clip
.ucp
[uinfo
->data
[i
] / 4][uinfo
->data
[i
] % 4]);
2790 case QUNIFORM_TEXTURE_CONFIG_P0
:
2791 write_texture_p0(vc4
, texstate
, uinfo
->data
[i
]);
2794 case QUNIFORM_TEXTURE_CONFIG_P1
:
2795 write_texture_p1(vc4
, texstate
, uinfo
->data
[i
]);
2798 case QUNIFORM_TEXTURE_CONFIG_P2
:
2799 write_texture_p2(vc4
, texstate
, uinfo
->data
[i
]);
2802 case QUNIFORM_UBO_ADDR
:
2803 cl_aligned_reloc(vc4
, &vc4
->uniforms
, ubo
, 0);
2806 case QUNIFORM_TEXTURE_BORDER_COLOR
:
2807 write_texture_border_color(vc4
, texstate
, uinfo
->data
[i
]);
2810 case QUNIFORM_TEXRECT_SCALE_X
:
2811 case QUNIFORM_TEXRECT_SCALE_Y
:
2812 cl_aligned_u32(&vc4
->uniforms
,
2813 get_texrect_scale(texstate
,
2818 case QUNIFORM_BLEND_CONST_COLOR
:
2819 cl_aligned_f(&vc4
->uniforms
,
2820 CLAMP(vc4
->blend_color
.color
[uinfo
->data
[i
]], 0, 1));
2823 case QUNIFORM_STENCIL
:
2824 cl_aligned_u32(&vc4
->uniforms
,
2825 vc4
->zsa
->stencil_uniforms
[uinfo
->data
[i
]] |
2826 (uinfo
->data
[i
] <= 1 ?
2827 (vc4
->stencil_ref
.ref_value
[uinfo
->data
[i
]] << 8) :
2831 case QUNIFORM_ALPHA_REF
:
2832 cl_aligned_f(&vc4
->uniforms
,
2833 vc4
->zsa
->base
.alpha
.ref_value
);
2837 uint32_t written_val
= *(uint32_t *)(vc4
->uniforms
.next
- 4);
2838 fprintf(stderr
, "%p: %d / 0x%08x (%f)\n",
2839 shader
, i
, written_val
, uif(written_val
));
2845 vc4_fp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
2847 struct vc4_context
*vc4
= vc4_context(pctx
);
2848 vc4
->prog
.bind_fs
= hwcso
;
2849 vc4
->dirty
|= VC4_DIRTY_UNCOMPILED_FS
;
2853 vc4_vp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
2855 struct vc4_context
*vc4
= vc4_context(pctx
);
2856 vc4
->prog
.bind_vs
= hwcso
;
2857 vc4
->dirty
|= VC4_DIRTY_UNCOMPILED_VS
;
2861 vc4_program_init(struct pipe_context
*pctx
)
2863 struct vc4_context
*vc4
= vc4_context(pctx
);
2865 pctx
->create_vs_state
= vc4_shader_state_create
;
2866 pctx
->delete_vs_state
= vc4_shader_state_delete
;
2868 pctx
->create_fs_state
= vc4_shader_state_create
;
2869 pctx
->delete_fs_state
= vc4_shader_state_delete
;
2871 pctx
->bind_fs_state
= vc4_fp_state_bind
;
2872 pctx
->bind_vs_state
= vc4_vp_state_bind
;
2874 vc4
->fs_cache
= _mesa_hash_table_create(pctx
, fs_cache_hash
,
2876 vc4
->vs_cache
= _mesa_hash_table_create(pctx
, vs_cache_hash
,
2881 vc4_program_fini(struct pipe_context
*pctx
)
2883 struct vc4_context
*vc4
= vc4_context(pctx
);
2885 struct hash_entry
*entry
;
2886 hash_table_foreach(vc4
->fs_cache
, entry
) {
2887 struct vc4_compiled_shader
*shader
= entry
->data
;
2888 vc4_bo_unreference(&shader
->bo
);
2889 ralloc_free(shader
);
2890 _mesa_hash_table_remove(vc4
->fs_cache
, entry
);
2893 hash_table_foreach(vc4
->vs_cache
, entry
) {
2894 struct vc4_compiled_shader
*shader
= entry
->data
;
2895 vc4_bo_unreference(&shader
->bo
);
2896 ralloc_free(shader
);
2897 _mesa_hash_table_remove(vc4
->vs_cache
, entry
);