2 * Copyright © 2015 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "kernel/vc4_packet.h"
26 #include "tgsi/tgsi_info.h"
27 #include "glsl/nir/nir_builder.h"
29 /** @file vc4_nir_lower_txf_ms.c
30 * Walks the NIR generated by TGSI-to-NIR to lower its nir_texop_txf_ms
31 * coordinates to do the math necessary and use a plain nir_texop_txf instead.
33 * MSAA textures are laid out as 32x32-aligned blocks of RGBA8888 or Z24S8.
34 * We can't load them through the normal sampler path because of the lack of
35 * linear support in the hardware. So, we treat MSAA textures as a giant UBO
36 * and do the math in the shader.
40 vc4_nir_lower_txf_ms_instr(struct vc4_compile
*c
, nir_builder
*b
,
41 nir_tex_instr
*txf_ms
)
43 if (txf_ms
->op
!= nir_texop_txf_ms
)
46 b
->cursor
= nir_before_instr(&txf_ms
->instr
);
48 nir_tex_instr
*txf
= nir_tex_instr_create(c
->s
, 1);
49 txf
->op
= nir_texop_txf
;
50 txf
->sampler
= txf_ms
->sampler
;
51 txf
->sampler_index
= txf_ms
->sampler_index
;
52 txf
->coord_components
= txf_ms
->coord_components
;
53 txf
->is_shadow
= txf_ms
->is_shadow
;
54 txf
->is_new_style_shadow
= txf_ms
->is_new_style_shadow
;
56 nir_ssa_def
*coord
= NULL
, *sample_index
= NULL
;
57 for (int i
= 0; i
< txf_ms
->num_srcs
; i
++) {
58 assert(txf_ms
->src
[i
].src
.is_ssa
);
60 switch (txf_ms
->src
[i
].src_type
) {
61 case nir_tex_src_coord
:
62 coord
= txf_ms
->src
[i
].src
.ssa
;
64 case nir_tex_src_ms_index
:
65 sample_index
= txf_ms
->src
[i
].src
.ssa
;
68 unreachable("Unknown txf_ms src\n");
74 nir_ssa_def
*x
= nir_channel(b
, coord
, 0);
75 nir_ssa_def
*y
= nir_channel(b
, coord
, 1);
79 uint32_t tile_w_shift
= 5;
80 uint32_t tile_h_shift
= 5;
81 uint32_t tile_size
= (tile_h
* tile_w
*
82 VC4_MAX_SAMPLES
* sizeof(uint32_t));
83 unsigned unit
= txf_ms
->sampler_index
;
84 uint32_t w
= align(c
->key
->tex
[unit
].msaa_width
, tile_w
);
85 uint32_t w_tiles
= w
/ tile_w
;
87 nir_ssa_def
*x_tile
= nir_ushr(b
, x
, nir_imm_int(b
, tile_w_shift
));
88 nir_ssa_def
*y_tile
= nir_ushr(b
, y
, nir_imm_int(b
, tile_h_shift
));
89 nir_ssa_def
*tile_addr
= nir_iadd(b
,
91 nir_imm_int(b
, tile_size
)),
93 nir_imm_int(b
, (w_tiles
*
95 nir_ssa_def
*x_subspan
= nir_iand(b
, x
,
96 nir_imm_int(b
, (tile_w
- 1) & ~1));
97 nir_ssa_def
*y_subspan
= nir_iand(b
, y
,
98 nir_imm_int(b
, (tile_h
- 1) & ~1));
99 nir_ssa_def
*subspan_addr
= nir_iadd(b
,
100 nir_imul(b
, x_subspan
,
101 nir_imm_int(b
, 2 * VC4_MAX_SAMPLES
* sizeof(uint32_t))),
102 nir_imul(b
, y_subspan
,
108 nir_ssa_def
*pixel_addr
= nir_ior(b
,
112 nir_imm_int(b
, (1 << 2))),
116 nir_imm_int(b
, (1 << 3))));
118 nir_ssa_def
*sample_addr
= nir_ishl(b
, sample_index
, nir_imm_int(b
, 4));
120 nir_ssa_def
*addr
= nir_iadd(b
,
121 nir_ior(b
, sample_addr
, pixel_addr
),
122 nir_iadd(b
, subspan_addr
, tile_addr
));
124 txf
->src
[0].src_type
= nir_tex_src_coord
;
125 txf
->src
[0].src
= nir_src_for_ssa(nir_vec2(b
, addr
, nir_imm_int(b
, 0)));
126 nir_ssa_dest_init(&txf
->instr
, &txf
->dest
, 4, NULL
);
127 nir_builder_instr_insert(b
, &txf
->instr
);
128 nir_ssa_def_rewrite_uses(&txf_ms
->dest
.ssa
,
129 nir_src_for_ssa(&txf
->dest
.ssa
));
130 nir_instr_remove(&txf_ms
->instr
);
134 vc4_nir_lower_txf_ms_block(nir_block
*block
, void *arg
)
136 struct vc4_compile
*c
= arg
;
137 nir_function_impl
*impl
=
138 nir_cf_node_get_function(&block
->cf_node
);
141 nir_builder_init(&b
, impl
);
143 nir_foreach_instr_safe(block
, instr
) {
144 if (instr
->type
== nir_instr_type_tex
) {
145 vc4_nir_lower_txf_ms_instr(c
, &b
,
146 nir_instr_as_tex(instr
));
154 vc4_nir_lower_txf_ms_impl(struct vc4_compile
*c
, nir_function_impl
*impl
)
156 nir_foreach_block(impl
, vc4_nir_lower_txf_ms_block
, c
);
158 nir_metadata_preserve(impl
,
159 nir_metadata_block_index
|
160 nir_metadata_dominance
);
166 vc4_nir_lower_txf_ms(struct vc4_compile
*c
)
168 nir_foreach_function(c
->s
, function
) {
170 vc4_nir_lower_txf_ms_impl(c
, function
->impl
);