2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "main/context.h"
25 #include "main/teximage.h"
26 #include "main/fbobject.h"
28 #include "compiler/nir/nir_builder.h"
30 #include "intel_fbo.h"
32 #include "brw_blorp.h"
33 #include "brw_context.h"
34 #include "brw_blorp_blit_eu.h"
35 #include "brw_state.h"
36 #include "brw_meta_util.h"
38 #define FILE_DEBUG_FLAG DEBUG_BLORP
40 static struct intel_mipmap_tree
*
41 find_miptree(GLbitfield buffer_bit
, struct intel_renderbuffer
*irb
)
43 struct intel_mipmap_tree
*mt
= irb
->mt
;
44 if (buffer_bit
== GL_STENCIL_BUFFER_BIT
&& mt
->stencil_mt
)
50 blorp_get_texture_swizzle(const struct intel_renderbuffer
*irb
)
52 return irb
->Base
.Base
._BaseFormat
== GL_RGB
?
53 MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Z
, SWIZZLE_ONE
) :
58 do_blorp_blit(struct brw_context
*brw
, GLbitfield buffer_bit
,
59 struct intel_renderbuffer
*src_irb
, mesa_format src_format
,
60 struct intel_renderbuffer
*dst_irb
, mesa_format dst_format
,
61 GLfloat srcX0
, GLfloat srcY0
, GLfloat srcX1
, GLfloat srcY1
,
62 GLfloat dstX0
, GLfloat dstY0
, GLfloat dstX1
, GLfloat dstY1
,
63 GLenum filter
, bool mirror_x
, bool mirror_y
)
65 /* Find source/dst miptrees */
66 struct intel_mipmap_tree
*src_mt
= find_miptree(buffer_bit
, src_irb
);
67 struct intel_mipmap_tree
*dst_mt
= find_miptree(buffer_bit
, dst_irb
);
69 const bool es3
= _mesa_is_gles3(&brw
->ctx
);
71 brw_blorp_blit_miptrees(brw
,
72 src_mt
, src_irb
->mt_level
, src_irb
->mt_layer
,
73 src_format
, blorp_get_texture_swizzle(src_irb
),
74 dst_mt
, dst_irb
->mt_level
, dst_irb
->mt_layer
,
76 srcX0
, srcY0
, srcX1
, srcY1
,
77 dstX0
, dstY0
, dstX1
, dstY1
,
78 filter
, mirror_x
, mirror_y
,
81 dst_irb
->need_downsample
= true;
85 try_blorp_blit(struct brw_context
*brw
,
86 const struct gl_framebuffer
*read_fb
,
87 const struct gl_framebuffer
*draw_fb
,
88 GLfloat srcX0
, GLfloat srcY0
, GLfloat srcX1
, GLfloat srcY1
,
89 GLfloat dstX0
, GLfloat dstY0
, GLfloat dstX1
, GLfloat dstY1
,
90 GLenum filter
, GLbitfield buffer_bit
)
92 struct gl_context
*ctx
= &brw
->ctx
;
94 /* Sync up the state of window system buffers. We need to do this before
95 * we go looking for the buffers.
97 intel_prepare_render(brw
);
99 bool mirror_x
, mirror_y
;
100 if (brw_meta_mirror_clip_and_scissor(ctx
, read_fb
, draw_fb
,
101 &srcX0
, &srcY0
, &srcX1
, &srcY1
,
102 &dstX0
, &dstY0
, &dstX1
, &dstY1
,
103 &mirror_x
, &mirror_y
))
107 struct intel_renderbuffer
*src_irb
;
108 struct intel_renderbuffer
*dst_irb
;
109 struct intel_mipmap_tree
*src_mt
;
110 struct intel_mipmap_tree
*dst_mt
;
111 switch (buffer_bit
) {
112 case GL_COLOR_BUFFER_BIT
:
113 src_irb
= intel_renderbuffer(read_fb
->_ColorReadBuffer
);
114 for (unsigned i
= 0; i
< draw_fb
->_NumColorDrawBuffers
; ++i
) {
115 dst_irb
= intel_renderbuffer(draw_fb
->_ColorDrawBuffers
[i
]);
117 do_blorp_blit(brw
, buffer_bit
,
118 src_irb
, src_irb
->Base
.Base
.Format
,
119 dst_irb
, dst_irb
->Base
.Base
.Format
,
120 srcX0
, srcY0
, srcX1
, srcY1
,
121 dstX0
, dstY0
, dstX1
, dstY1
,
122 filter
, mirror_x
, mirror_y
);
125 case GL_DEPTH_BUFFER_BIT
:
127 intel_renderbuffer(read_fb
->Attachment
[BUFFER_DEPTH
].Renderbuffer
);
129 intel_renderbuffer(draw_fb
->Attachment
[BUFFER_DEPTH
].Renderbuffer
);
130 src_mt
= find_miptree(buffer_bit
, src_irb
);
131 dst_mt
= find_miptree(buffer_bit
, dst_irb
);
133 /* We can't handle format conversions between Z24 and other formats
134 * since we have to lie about the surface format. See the comments in
135 * brw_blorp_surface_info::set().
137 if ((src_mt
->format
== MESA_FORMAT_Z24_UNORM_X8_UINT
) !=
138 (dst_mt
->format
== MESA_FORMAT_Z24_UNORM_X8_UINT
))
141 do_blorp_blit(brw
, buffer_bit
, src_irb
, MESA_FORMAT_NONE
,
142 dst_irb
, MESA_FORMAT_NONE
, srcX0
, srcY0
,
143 srcX1
, srcY1
, dstX0
, dstY0
, dstX1
, dstY1
,
144 filter
, mirror_x
, mirror_y
);
146 case GL_STENCIL_BUFFER_BIT
:
148 intel_renderbuffer(read_fb
->Attachment
[BUFFER_STENCIL
].Renderbuffer
);
150 intel_renderbuffer(draw_fb
->Attachment
[BUFFER_STENCIL
].Renderbuffer
);
151 do_blorp_blit(brw
, buffer_bit
, src_irb
, MESA_FORMAT_NONE
,
152 dst_irb
, MESA_FORMAT_NONE
, srcX0
, srcY0
,
153 srcX1
, srcY1
, dstX0
, dstY0
, dstX1
, dstY1
,
154 filter
, mirror_x
, mirror_y
);
157 unreachable("not reached");
164 brw_blorp_copytexsubimage(struct brw_context
*brw
,
165 struct gl_renderbuffer
*src_rb
,
166 struct gl_texture_image
*dst_image
,
168 int srcX0
, int srcY0
,
169 int dstX0
, int dstY0
,
170 int width
, int height
)
172 struct gl_context
*ctx
= &brw
->ctx
;
173 struct intel_renderbuffer
*src_irb
= intel_renderbuffer(src_rb
);
174 struct intel_texture_image
*intel_image
= intel_texture_image(dst_image
);
176 /* No pixel transfer operations (zoom, bias, mapping), just a blit */
177 if (brw
->ctx
._ImageTransferState
)
180 /* Sync up the state of window system buffers. We need to do this before
181 * we go looking at the src renderbuffer's miptree.
183 intel_prepare_render(brw
);
185 struct intel_mipmap_tree
*src_mt
= src_irb
->mt
;
186 struct intel_mipmap_tree
*dst_mt
= intel_image
->mt
;
188 /* There is support for only up to eight samples. */
189 if (src_mt
->num_samples
> 8 || dst_mt
->num_samples
> 8)
192 /* BLORP is only supported from Gen6 onwards. */
196 if (_mesa_get_format_base_format(src_rb
->Format
) !=
197 _mesa_get_format_base_format(dst_image
->TexFormat
)) {
201 /* We can't handle format conversions between Z24 and other formats since
202 * we have to lie about the surface format. See the comments in
203 * brw_blorp_surface_info::set().
205 if ((src_mt
->format
== MESA_FORMAT_Z24_UNORM_X8_UINT
) !=
206 (dst_mt
->format
== MESA_FORMAT_Z24_UNORM_X8_UINT
)) {
210 if (!brw
->format_supported_as_render_target
[dst_image
->TexFormat
])
213 /* Source clipping shouldn't be necessary, since copytexsubimage (in
214 * src/mesa/main/teximage.c) calls _mesa_clip_copytexsubimage() which
217 * Destination clipping shouldn't be necessary since the restrictions on
218 * glCopyTexSubImage prevent the user from specifying a destination rectangle
219 * that falls outside the bounds of the destination texture.
220 * See error_check_subtexture_dimensions().
223 int srcY1
= srcY0
+ height
;
224 int srcX1
= srcX0
+ width
;
225 int dstX1
= dstX0
+ width
;
226 int dstY1
= dstY0
+ height
;
228 /* Account for the fact that in the system framebuffer, the origin is at
231 bool mirror_y
= false;
232 if (_mesa_is_winsys_fbo(ctx
->ReadBuffer
)) {
233 GLint tmp
= src_rb
->Height
- srcY0
;
234 srcY0
= src_rb
->Height
- srcY1
;
239 /* Account for face selection and texture view MinLayer */
240 int dst_slice
= slice
+ dst_image
->TexObject
->MinLayer
+ dst_image
->Face
;
241 int dst_level
= dst_image
->Level
+ dst_image
->TexObject
->MinLevel
;
243 brw_blorp_blit_miptrees(brw
,
244 src_mt
, src_irb
->mt_level
, src_irb
->mt_layer
,
245 src_rb
->Format
, blorp_get_texture_swizzle(src_irb
),
246 dst_mt
, dst_level
, dst_slice
,
247 dst_image
->TexFormat
,
248 srcX0
, srcY0
, srcX1
, srcY1
,
249 dstX0
, dstY0
, dstX1
, dstY1
,
250 GL_NEAREST
, false, mirror_y
,
253 /* If we're copying to a packed depth stencil texture and the source
254 * framebuffer has separate stencil, we need to also copy the stencil data
257 src_rb
= ctx
->ReadBuffer
->Attachment
[BUFFER_STENCIL
].Renderbuffer
;
258 if (_mesa_get_format_bits(dst_image
->TexFormat
, GL_STENCIL_BITS
) > 0 &&
260 src_irb
= intel_renderbuffer(src_rb
);
261 src_mt
= src_irb
->mt
;
263 if (src_mt
->stencil_mt
)
264 src_mt
= src_mt
->stencil_mt
;
265 if (dst_mt
->stencil_mt
)
266 dst_mt
= dst_mt
->stencil_mt
;
268 if (src_mt
!= dst_mt
) {
269 brw_blorp_blit_miptrees(brw
,
270 src_mt
, src_irb
->mt_level
, src_irb
->mt_layer
,
272 blorp_get_texture_swizzle(src_irb
),
273 dst_mt
, dst_level
, dst_slice
,
275 srcX0
, srcY0
, srcX1
, srcY1
,
276 dstX0
, dstY0
, dstX1
, dstY1
,
277 GL_NEAREST
, false, mirror_y
,
287 brw_blorp_framebuffer(struct brw_context
*brw
,
288 struct gl_framebuffer
*readFb
,
289 struct gl_framebuffer
*drawFb
,
290 GLint srcX0
, GLint srcY0
, GLint srcX1
, GLint srcY1
,
291 GLint dstX0
, GLint dstY0
, GLint dstX1
, GLint dstY1
,
292 GLbitfield mask
, GLenum filter
)
294 /* BLORP is not supported before Gen6. */
298 /* There is support for only up to eight samples. */
299 if (readFb
->Visual
.samples
> 8 || drawFb
->Visual
.samples
> 8)
302 static GLbitfield buffer_bits
[] = {
305 GL_STENCIL_BUFFER_BIT
,
308 for (unsigned int i
= 0; i
< ARRAY_SIZE(buffer_bits
); ++i
) {
309 if ((mask
& buffer_bits
[i
]) &&
310 try_blorp_blit(brw
, readFb
, drawFb
,
311 srcX0
, srcY0
, srcX1
, srcY1
,
312 dstX0
, dstY0
, dstX1
, dstY1
,
313 filter
, buffer_bits
[i
])) {
314 mask
&= ~buffer_bits
[i
];
323 * Enum to specify the order of arguments in a sampler message
325 enum sampler_message_arg
327 SAMPLER_MESSAGE_ARG_U_FLOAT
,
328 SAMPLER_MESSAGE_ARG_V_FLOAT
,
329 SAMPLER_MESSAGE_ARG_U_INT
,
330 SAMPLER_MESSAGE_ARG_V_INT
,
331 SAMPLER_MESSAGE_ARG_R_INT
,
332 SAMPLER_MESSAGE_ARG_SI_INT
,
333 SAMPLER_MESSAGE_ARG_MCS_INT
,
334 SAMPLER_MESSAGE_ARG_ZERO_INT
,
337 struct brw_blorp_blit_vars
{
338 /* Uniforms values from brw_blorp_wm_push_constants */
339 nir_variable
*u_dst_x0
;
340 nir_variable
*u_dst_x1
;
341 nir_variable
*u_dst_y0
;
342 nir_variable
*u_dst_y1
;
343 nir_variable
*u_rect_grid_x1
;
344 nir_variable
*u_rect_grid_y1
;
346 nir_variable
*multiplier
;
347 nir_variable
*offset
;
348 } u_x_transform
, u_y_transform
;
349 nir_variable
*u_src_z
;
352 nir_variable
*frag_coord
;
355 nir_variable
*color_out
;
359 brw_blorp_blit_vars_init(nir_builder
*b
, struct brw_blorp_blit_vars
*v
,
360 const struct brw_blorp_blit_prog_key
*key
)
362 #define LOAD_UNIFORM(name, type)\
363 v->u_##name = nir_variable_create(b->shader, nir_var_uniform, type, #name); \
364 v->u_##name->data.location = \
365 offsetof(struct brw_blorp_wm_push_constants, name);
367 LOAD_UNIFORM(dst_x0
, glsl_uint_type())
368 LOAD_UNIFORM(dst_x1
, glsl_uint_type())
369 LOAD_UNIFORM(dst_y0
, glsl_uint_type())
370 LOAD_UNIFORM(dst_y1
, glsl_uint_type())
371 LOAD_UNIFORM(rect_grid_x1
, glsl_float_type())
372 LOAD_UNIFORM(rect_grid_y1
, glsl_float_type())
373 LOAD_UNIFORM(x_transform
.multiplier
, glsl_float_type())
374 LOAD_UNIFORM(x_transform
.offset
, glsl_float_type())
375 LOAD_UNIFORM(y_transform
.multiplier
, glsl_float_type())
376 LOAD_UNIFORM(y_transform
.offset
, glsl_float_type())
377 LOAD_UNIFORM(src_z
, glsl_uint_type())
381 v
->frag_coord
= nir_variable_create(b
->shader
, nir_var_shader_in
,
382 glsl_vec4_type(), "gl_FragCoord");
383 v
->frag_coord
->data
.location
= VARYING_SLOT_POS
;
384 v
->frag_coord
->data
.origin_upper_left
= true;
386 v
->color_out
= nir_variable_create(b
->shader
, nir_var_shader_out
,
387 glsl_vec4_type(), "gl_FragColor");
388 v
->color_out
->data
.location
= FRAG_RESULT_COLOR
;
392 blorp_blit_get_frag_coords(nir_builder
*b
,
393 const struct brw_blorp_blit_prog_key
*key
,
394 struct brw_blorp_blit_vars
*v
)
396 nir_ssa_def
*coord
= nir_f2i(b
, nir_load_var(b
, v
->frag_coord
));
398 if (key
->persample_msaa_dispatch
) {
399 return nir_vec3(b
, nir_channel(b
, coord
, 0), nir_channel(b
, coord
, 1),
400 nir_load_system_value(b
, nir_intrinsic_load_sample_id
, 0));
402 return nir_vec2(b
, nir_channel(b
, coord
, 0), nir_channel(b
, coord
, 1));
407 * Emit code to translate from destination (X, Y) coordinates to source (X, Y)
411 blorp_blit_apply_transform(nir_builder
*b
, nir_ssa_def
*src_pos
,
412 struct brw_blorp_blit_vars
*v
)
414 nir_ssa_def
*offset
= nir_vec2(b
, nir_load_var(b
, v
->u_x_transform
.offset
),
415 nir_load_var(b
, v
->u_y_transform
.offset
));
416 nir_ssa_def
*mul
= nir_vec2(b
, nir_load_var(b
, v
->u_x_transform
.multiplier
),
417 nir_load_var(b
, v
->u_y_transform
.multiplier
));
419 nir_ssa_def
*pos
= nir_ffma(b
, src_pos
, mul
, offset
);
421 if (src_pos
->num_components
== 3) {
422 /* Leave the sample id alone */
423 pos
= nir_vec3(b
, nir_channel(b
, pos
, 0), nir_channel(b
, pos
, 1),
424 nir_channel(b
, src_pos
, 2));
431 blorp_nir_discard_if_outside_rect(nir_builder
*b
, nir_ssa_def
*pos
,
432 struct brw_blorp_blit_vars
*v
)
434 nir_ssa_def
*c0
, *c1
, *c2
, *c3
;
435 c0
= nir_ult(b
, nir_channel(b
, pos
, 0), nir_load_var(b
, v
->u_dst_x0
));
436 c1
= nir_uge(b
, nir_channel(b
, pos
, 0), nir_load_var(b
, v
->u_dst_x1
));
437 c2
= nir_ult(b
, nir_channel(b
, pos
, 1), nir_load_var(b
, v
->u_dst_y0
));
438 c3
= nir_uge(b
, nir_channel(b
, pos
, 1), nir_load_var(b
, v
->u_dst_y1
));
439 nir_ssa_def
*oob
= nir_ior(b
, nir_ior(b
, c0
, c1
), nir_ior(b
, c2
, c3
));
441 nir_intrinsic_instr
*discard
=
442 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_discard_if
);
443 discard
->src
[0] = nir_src_for_ssa(oob
);
444 nir_builder_instr_insert(b
, &discard
->instr
);
447 static nir_tex_instr
*
448 blorp_create_nir_tex_instr(nir_shader
*shader
, nir_texop op
,
449 nir_ssa_def
*pos
, unsigned num_srcs
,
450 enum brw_reg_type dst_type
)
452 nir_tex_instr
*tex
= nir_tex_instr_create(shader
, num_srcs
);
457 case BRW_REGISTER_TYPE_F
:
458 tex
->dest_type
= nir_type_float
;
460 case BRW_REGISTER_TYPE_D
:
461 tex
->dest_type
= nir_type_int
;
463 case BRW_REGISTER_TYPE_UD
:
464 tex
->dest_type
= nir_type_uint
;
467 unreachable("Invalid texture return type");
470 tex
->is_array
= false;
471 tex
->is_shadow
= false;
473 /* Blorp only has one texture and it's bound at unit 0 */
476 tex
->texture_index
= 0;
477 tex
->sampler_index
= 0;
479 nir_ssa_dest_init(&tex
->instr
, &tex
->dest
, 4, 32, NULL
);
485 blorp_nir_tex(nir_builder
*b
, nir_ssa_def
*pos
, enum brw_reg_type dst_type
)
488 blorp_create_nir_tex_instr(b
->shader
, nir_texop_tex
, pos
, 2, dst_type
);
490 assert(pos
->num_components
== 2);
491 tex
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
492 tex
->coord_components
= 2;
493 tex
->src
[0].src_type
= nir_tex_src_coord
;
494 tex
->src
[0].src
= nir_src_for_ssa(pos
);
495 tex
->src
[1].src_type
= nir_tex_src_lod
;
496 tex
->src
[1].src
= nir_src_for_ssa(nir_imm_int(b
, 0));
498 nir_builder_instr_insert(b
, &tex
->instr
);
500 return &tex
->dest
.ssa
;
504 blorp_nir_txf(nir_builder
*b
, struct brw_blorp_blit_vars
*v
,
505 nir_ssa_def
*pos
, enum brw_reg_type dst_type
)
508 blorp_create_nir_tex_instr(b
->shader
, nir_texop_txf
, pos
, 2, dst_type
);
510 /* In order to properly handle 3-D textures, we pull the Z component from
511 * a uniform. TODO: This is a bit magic; we should probably make this
512 * more explicit in the future.
514 assert(pos
->num_components
== 2);
515 pos
= nir_vec3(b
, nir_channel(b
, pos
, 0), nir_channel(b
, pos
, 1),
516 nir_load_var(b
, v
->u_src_z
));
518 tex
->sampler_dim
= GLSL_SAMPLER_DIM_3D
;
519 tex
->coord_components
= 3;
520 tex
->src
[0].src_type
= nir_tex_src_coord
;
521 tex
->src
[0].src
= nir_src_for_ssa(pos
);
522 tex
->src
[1].src_type
= nir_tex_src_lod
;
523 tex
->src
[1].src
= nir_src_for_ssa(nir_imm_int(b
, 0));
525 nir_builder_instr_insert(b
, &tex
->instr
);
527 return &tex
->dest
.ssa
;
531 blorp_nir_txf_ms(nir_builder
*b
, nir_ssa_def
*pos
, nir_ssa_def
*mcs
,
532 enum brw_reg_type dst_type
)
535 blorp_create_nir_tex_instr(b
->shader
, nir_texop_txf_ms
, pos
,
536 mcs
!= NULL
? 3 : 2, dst_type
);
538 tex
->sampler_dim
= GLSL_SAMPLER_DIM_MS
;
539 tex
->coord_components
= 2;
540 tex
->src
[0].src_type
= nir_tex_src_coord
;
541 tex
->src
[0].src
= nir_src_for_ssa(pos
);
543 tex
->src
[1].src_type
= nir_tex_src_ms_index
;
544 if (pos
->num_components
== 2) {
545 tex
->src
[1].src
= nir_src_for_ssa(nir_imm_int(b
, 0));
547 assert(pos
->num_components
== 3);
548 tex
->src
[1].src
= nir_src_for_ssa(nir_channel(b
, pos
, 2));
552 tex
->src
[2].src_type
= nir_tex_src_ms_mcs
;
553 tex
->src
[2].src
= nir_src_for_ssa(mcs
);
556 nir_builder_instr_insert(b
, &tex
->instr
);
558 return &tex
->dest
.ssa
;
562 blorp_nir_txf_ms_mcs(nir_builder
*b
, nir_ssa_def
*pos
)
565 blorp_create_nir_tex_instr(b
->shader
, nir_texop_txf_ms_mcs
,
566 pos
, 1, BRW_REGISTER_TYPE_D
);
568 tex
->sampler_dim
= GLSL_SAMPLER_DIM_MS
;
569 tex
->coord_components
= 2;
570 tex
->src
[0].src_type
= nir_tex_src_coord
;
571 tex
->src
[0].src
= nir_src_for_ssa(pos
);
573 nir_builder_instr_insert(b
, &tex
->instr
);
575 return &tex
->dest
.ssa
;
579 nir_mask_shift_or(struct nir_builder
*b
, nir_ssa_def
*dst
, nir_ssa_def
*src
,
580 uint32_t src_mask
, int src_left_shift
)
582 nir_ssa_def
*masked
= nir_iand(b
, src
, nir_imm_int(b
, src_mask
));
584 nir_ssa_def
*shifted
;
585 if (src_left_shift
> 0) {
586 shifted
= nir_ishl(b
, masked
, nir_imm_int(b
, src_left_shift
));
587 } else if (src_left_shift
< 0) {
588 shifted
= nir_ushr(b
, masked
, nir_imm_int(b
, -src_left_shift
));
590 assert(src_left_shift
== 0);
594 return nir_ior(b
, dst
, shifted
);
598 * Emit code to compensate for the difference between Y and W tiling.
600 * This code modifies the X and Y coordinates according to the formula:
602 * (X', Y', S') = detile(W-MAJOR, tile(Y-MAJOR, X, Y, S))
604 * (See brw_blorp_build_nir_shader).
606 static inline nir_ssa_def
*
607 blorp_nir_retile_y_to_w(nir_builder
*b
, nir_ssa_def
*pos
)
609 assert(pos
->num_components
== 2);
610 nir_ssa_def
*x_Y
= nir_channel(b
, pos
, 0);
611 nir_ssa_def
*y_Y
= nir_channel(b
, pos
, 1);
613 /* Given X and Y coordinates that describe an address using Y tiling,
614 * translate to the X and Y coordinates that describe the same address
617 * If we break down the low order bits of X and Y, using a
618 * single letter to represent each low-order bit:
620 * X = A << 7 | 0bBCDEFGH
621 * Y = J << 5 | 0bKLMNP (1)
623 * Then we can apply the Y tiling formula to see the memory offset being
626 * offset = (J * tile_pitch + A) << 12 | 0bBCDKLMNPEFGH (2)
628 * If we apply the W detiling formula to this memory location, that the
629 * corresponding X' and Y' coordinates are:
631 * X' = A << 6 | 0bBCDPFH (3)
632 * Y' = J << 6 | 0bKLMNEG
634 * Combining (1) and (3), we see that to transform (X, Y) to (X', Y'),
635 * we need to make the following computation:
637 * X' = (X & ~0b1011) >> 1 | (Y & 0b1) << 2 | X & 0b1 (4)
638 * Y' = (Y & ~0b1) << 1 | (X & 0b1000) >> 2 | (X & 0b10) >> 1
640 nir_ssa_def
*x_W
= nir_imm_int(b
, 0);
641 x_W
= nir_mask_shift_or(b
, x_W
, x_Y
, 0xfffffff4, -1);
642 x_W
= nir_mask_shift_or(b
, x_W
, y_Y
, 0x1, 2);
643 x_W
= nir_mask_shift_or(b
, x_W
, x_Y
, 0x1, 0);
645 nir_ssa_def
*y_W
= nir_imm_int(b
, 0);
646 y_W
= nir_mask_shift_or(b
, y_W
, y_Y
, 0xfffffffe, 1);
647 y_W
= nir_mask_shift_or(b
, y_W
, x_Y
, 0x8, -2);
648 y_W
= nir_mask_shift_or(b
, y_W
, x_Y
, 0x2, -1);
650 return nir_vec2(b
, x_W
, y_W
);
654 * Emit code to compensate for the difference between Y and W tiling.
656 * This code modifies the X and Y coordinates according to the formula:
658 * (X', Y', S') = detile(Y-MAJOR, tile(W-MAJOR, X, Y, S))
660 * (See brw_blorp_build_nir_shader).
662 static inline nir_ssa_def
*
663 blorp_nir_retile_w_to_y(nir_builder
*b
, nir_ssa_def
*pos
)
665 assert(pos
->num_components
== 2);
666 nir_ssa_def
*x_W
= nir_channel(b
, pos
, 0);
667 nir_ssa_def
*y_W
= nir_channel(b
, pos
, 1);
669 /* Applying the same logic as above, but in reverse, we obtain the
672 * X' = (X & ~0b101) << 1 | (Y & 0b10) << 2 | (Y & 0b1) << 1 | X & 0b1
673 * Y' = (Y & ~0b11) >> 1 | (X & 0b100) >> 2
675 nir_ssa_def
*x_Y
= nir_imm_int(b
, 0);
676 x_Y
= nir_mask_shift_or(b
, x_Y
, x_W
, 0xfffffffa, 1);
677 x_Y
= nir_mask_shift_or(b
, x_Y
, y_W
, 0x2, 2);
678 x_Y
= nir_mask_shift_or(b
, x_Y
, y_W
, 0x1, 1);
679 x_Y
= nir_mask_shift_or(b
, x_Y
, x_W
, 0x1, 0);
681 nir_ssa_def
*y_Y
= nir_imm_int(b
, 0);
682 y_Y
= nir_mask_shift_or(b
, y_Y
, y_W
, 0xfffffffc, -1);
683 y_Y
= nir_mask_shift_or(b
, y_Y
, x_W
, 0x4, -2);
685 return nir_vec2(b
, x_Y
, y_Y
);
689 * Emit code to compensate for the difference between MSAA and non-MSAA
692 * This code modifies the X and Y coordinates according to the formula:
694 * (X', Y', S') = encode_msaa(num_samples, IMS, X, Y, S)
696 * (See brw_blorp_blit_program).
698 static inline nir_ssa_def
*
699 blorp_nir_encode_msaa(nir_builder
*b
, nir_ssa_def
*pos
,
700 unsigned num_samples
, enum intel_msaa_layout layout
)
702 assert(pos
->num_components
== 2 || pos
->num_components
== 3);
705 case INTEL_MSAA_LAYOUT_NONE
:
706 assert(pos
->num_components
== 2);
708 case INTEL_MSAA_LAYOUT_CMS
:
709 /* We can't compensate for compressed layout since at this point in the
710 * program we haven't read from the MCS buffer.
712 unreachable("Bad layout in encode_msaa");
713 case INTEL_MSAA_LAYOUT_UMS
:
714 /* No translation needed */
716 case INTEL_MSAA_LAYOUT_IMS
: {
717 nir_ssa_def
*x_in
= nir_channel(b
, pos
, 0);
718 nir_ssa_def
*y_in
= nir_channel(b
, pos
, 1);
719 nir_ssa_def
*s_in
= pos
->num_components
== 2 ? nir_imm_int(b
, 0) :
720 nir_channel(b
, pos
, 2);
722 nir_ssa_def
*x_out
= nir_imm_int(b
, 0);
723 nir_ssa_def
*y_out
= nir_imm_int(b
, 0);
724 switch (num_samples
) {
727 /* encode_msaa(2, IMS, X, Y, S) = (X', Y', 0)
728 * where X' = (X & ~0b1) << 1 | (S & 0b1) << 1 | (X & 0b1)
731 * encode_msaa(4, IMS, X, Y, S) = (X', Y', 0)
732 * where X' = (X & ~0b1) << 1 | (S & 0b1) << 1 | (X & 0b1)
733 * Y' = (Y & ~0b1) << 1 | (S & 0b10) | (Y & 0b1)
735 x_out
= nir_mask_shift_or(b
, x_out
, x_in
, 0xfffffffe, 1);
736 x_out
= nir_mask_shift_or(b
, x_out
, s_in
, 0x1, 1);
737 x_out
= nir_mask_shift_or(b
, x_out
, x_in
, 0x1, 0);
738 if (num_samples
== 2) {
741 y_out
= nir_mask_shift_or(b
, y_out
, y_in
, 0xfffffffe, 1);
742 y_out
= nir_mask_shift_or(b
, y_out
, s_in
, 0x2, 0);
743 y_out
= nir_mask_shift_or(b
, y_out
, y_in
, 0x1, 0);
748 /* encode_msaa(8, IMS, X, Y, S) = (X', Y', 0)
749 * where X' = (X & ~0b1) << 2 | (S & 0b100) | (S & 0b1) << 1
751 * Y' = (Y & ~0b1) << 1 | (S & 0b10) | (Y & 0b1)
753 x_out
= nir_mask_shift_or(b
, x_out
, x_in
, 0xfffffffe, 2);
754 x_out
= nir_mask_shift_or(b
, x_out
, s_in
, 0x4, 0);
755 x_out
= nir_mask_shift_or(b
, x_out
, s_in
, 0x1, 1);
756 x_out
= nir_mask_shift_or(b
, x_out
, x_in
, 0x1, 0);
757 y_out
= nir_mask_shift_or(b
, y_out
, y_in
, 0xfffffffe, 1);
758 y_out
= nir_mask_shift_or(b
, y_out
, s_in
, 0x2, 0);
759 y_out
= nir_mask_shift_or(b
, y_out
, y_in
, 0x1, 0);
763 unreachable("Invalid number of samples for IMS layout");
766 return nir_vec2(b
, x_out
, y_out
);
770 unreachable("Invalid MSAA layout");
775 * Emit code to compensate for the difference between MSAA and non-MSAA
778 * This code modifies the X and Y coordinates according to the formula:
780 * (X', Y', S) = decode_msaa(num_samples, IMS, X, Y, S)
782 * (See brw_blorp_blit_program).
784 static inline nir_ssa_def
*
785 blorp_nir_decode_msaa(nir_builder
*b
, nir_ssa_def
*pos
,
786 unsigned num_samples
, enum intel_msaa_layout layout
)
788 assert(pos
->num_components
== 2 || pos
->num_components
== 3);
791 case INTEL_MSAA_LAYOUT_NONE
:
792 /* No translation necessary, and S should already be zero. */
793 assert(pos
->num_components
== 2);
795 case INTEL_MSAA_LAYOUT_CMS
:
796 /* We can't compensate for compressed layout since at this point in the
797 * program we don't have access to the MCS buffer.
799 unreachable("Bad layout in encode_msaa");
800 case INTEL_MSAA_LAYOUT_UMS
:
801 /* No translation necessary. */
803 case INTEL_MSAA_LAYOUT_IMS
: {
804 assert(pos
->num_components
== 2);
806 nir_ssa_def
*x_in
= nir_channel(b
, pos
, 0);
807 nir_ssa_def
*y_in
= nir_channel(b
, pos
, 1);
809 nir_ssa_def
*x_out
= nir_imm_int(b
, 0);
810 nir_ssa_def
*y_out
= nir_imm_int(b
, 0);
811 nir_ssa_def
*s_out
= nir_imm_int(b
, 0);
812 switch (num_samples
) {
815 /* decode_msaa(2, IMS, X, Y, 0) = (X', Y', S)
816 * where X' = (X & ~0b11) >> 1 | (X & 0b1)
817 * S = (X & 0b10) >> 1
819 * decode_msaa(4, IMS, X, Y, 0) = (X', Y', S)
820 * where X' = (X & ~0b11) >> 1 | (X & 0b1)
821 * Y' = (Y & ~0b11) >> 1 | (Y & 0b1)
822 * S = (Y & 0b10) | (X & 0b10) >> 1
824 x_out
= nir_mask_shift_or(b
, x_out
, x_in
, 0xfffffffc, -1);
825 x_out
= nir_mask_shift_or(b
, x_out
, x_in
, 0x1, 0);
826 if (num_samples
== 2) {
828 s_out
= nir_mask_shift_or(b
, s_out
, x_in
, 0x2, -1);
830 y_out
= nir_mask_shift_or(b
, y_out
, y_in
, 0xfffffffc, -1);
831 y_out
= nir_mask_shift_or(b
, y_out
, y_in
, 0x1, 0);
832 s_out
= nir_mask_shift_or(b
, s_out
, x_in
, 0x2, -1);
833 s_out
= nir_mask_shift_or(b
, s_out
, y_in
, 0x2, 0);
838 /* decode_msaa(8, IMS, X, Y, 0) = (X', Y', S)
839 * where X' = (X & ~0b111) >> 2 | (X & 0b1)
840 * Y' = (Y & ~0b11) >> 1 | (Y & 0b1)
841 * S = (X & 0b100) | (Y & 0b10) | (X & 0b10) >> 1
843 x_out
= nir_mask_shift_or(b
, x_out
, x_in
, 0xfffffff8, -2);
844 x_out
= nir_mask_shift_or(b
, x_out
, x_in
, 0x1, 0);
845 y_out
= nir_mask_shift_or(b
, y_out
, y_in
, 0xfffffffc, -1);
846 y_out
= nir_mask_shift_or(b
, y_out
, y_in
, 0x1, 0);
847 s_out
= nir_mask_shift_or(b
, s_out
, x_in
, 0x4, 0);
848 s_out
= nir_mask_shift_or(b
, s_out
, y_in
, 0x2, 0);
849 s_out
= nir_mask_shift_or(b
, s_out
, x_in
, 0x2, -1);
853 unreachable("Invalid number of samples for IMS layout");
856 return nir_vec3(b
, x_out
, y_out
, s_out
);
860 unreachable("Invalid MSAA layout");
865 * Count the number of trailing 1 bits in the given value. For example:
867 * count_trailing_one_bits(0) == 0
868 * count_trailing_one_bits(7) == 3
869 * count_trailing_one_bits(11) == 2
871 static inline int count_trailing_one_bits(unsigned value
)
873 #ifdef HAVE___BUILTIN_CTZ
874 return __builtin_ctz(~value
);
876 return _mesa_bitcount(value
& ~(value
+ 1));
881 blorp_nir_manual_blend_average(nir_builder
*b
, nir_ssa_def
*pos
,
882 unsigned tex_samples
,
883 enum intel_msaa_layout tex_layout
,
884 enum brw_reg_type dst_type
)
886 /* If non-null, this is the outer-most if statement */
887 nir_if
*outer_if
= NULL
;
889 nir_variable
*color
=
890 nir_local_variable_create(b
->impl
, glsl_vec4_type(), "color");
892 nir_ssa_def
*mcs
= NULL
;
893 if (tex_layout
== INTEL_MSAA_LAYOUT_CMS
)
894 mcs
= blorp_nir_txf_ms_mcs(b
, pos
);
896 /* We add together samples using a binary tree structure, e.g. for 4x MSAA:
898 * result = ((sample[0] + sample[1]) + (sample[2] + sample[3])) / 4
900 * This ensures that when all samples have the same value, no numerical
901 * precision is lost, since each addition operation always adds two equal
902 * values, and summing two equal floating point values does not lose
905 * We perform this computation by treating the texture_data array as a
906 * stack and performing the following operations:
908 * - push sample 0 onto stack
909 * - push sample 1 onto stack
910 * - add top two stack entries
911 * - push sample 2 onto stack
912 * - push sample 3 onto stack
913 * - add top two stack entries
914 * - add top two stack entries
915 * - divide top stack entry by 4
917 * Note that after pushing sample i onto the stack, the number of add
918 * operations we do is equal to the number of trailing 1 bits in i. This
919 * works provided the total number of samples is a power of two, which it
920 * always is for i965.
922 * For integer formats, we replace the add operations with average
923 * operations and skip the final division.
925 nir_ssa_def
*texture_data
[4];
926 unsigned stack_depth
= 0;
927 for (unsigned i
= 0; i
< tex_samples
; ++i
) {
928 assert(stack_depth
== _mesa_bitcount(i
)); /* Loop invariant */
930 /* Push sample i onto the stack */
931 assert(stack_depth
< ARRAY_SIZE(texture_data
));
933 nir_ssa_def
*ms_pos
= nir_vec3(b
, nir_channel(b
, pos
, 0),
934 nir_channel(b
, pos
, 1),
936 texture_data
[stack_depth
++] = blorp_nir_txf_ms(b
, ms_pos
, mcs
, dst_type
);
938 if (i
== 0 && tex_layout
== INTEL_MSAA_LAYOUT_CMS
) {
939 /* The Ivy Bridge PRM, Vol4 Part1 p27 (Multisample Control Surface)
940 * suggests an optimization:
942 * "A simple optimization with probable large return in
943 * performance is to compare the MCS value to zero (indicating
944 * all samples are on sample slice 0), and sample only from
945 * sample slice 0 using ld2dss if MCS is zero."
947 * Note that in the case where the MCS value is zero, sampling from
948 * sample slice 0 using ld2dss and sampling from sample 0 using
949 * ld2dms are equivalent (since all samples are on sample slice 0).
950 * Since we have already sampled from sample 0, all we need to do is
951 * skip the remaining fetches and averaging if MCS is zero.
953 nir_ssa_def
*mcs_zero
=
954 nir_ieq(b
, nir_channel(b
, mcs
, 0), nir_imm_int(b
, 0));
955 nir_if
*if_stmt
= nir_if_create(b
->shader
);
956 if_stmt
->condition
= nir_src_for_ssa(mcs_zero
);
957 nir_cf_node_insert(b
->cursor
, &if_stmt
->cf_node
);
959 b
->cursor
= nir_after_cf_list(&if_stmt
->then_list
);
960 nir_store_var(b
, color
, texture_data
[0], 0xf);
962 b
->cursor
= nir_after_cf_list(&if_stmt
->else_list
);
966 for (int j
= 0; j
< count_trailing_one_bits(i
); j
++) {
967 assert(stack_depth
>= 2);
970 assert(dst_type
== BRW_REGISTER_TYPE_F
);
971 texture_data
[stack_depth
- 1] =
972 nir_fadd(b
, texture_data
[stack_depth
- 1],
973 texture_data
[stack_depth
]);
977 /* We should have just 1 sample on the stack now. */
978 assert(stack_depth
== 1);
980 texture_data
[0] = nir_fmul(b
, texture_data
[0],
981 nir_imm_float(b
, 1.0 / tex_samples
));
983 nir_store_var(b
, color
, texture_data
[0], 0xf);
986 b
->cursor
= nir_after_cf_node(&outer_if
->cf_node
);
988 return nir_load_var(b
, color
);
991 static inline nir_ssa_def
*
992 nir_imm_vec2(nir_builder
*build
, float x
, float y
)
996 memset(&v
, 0, sizeof(v
));
1000 return nir_build_imm(build
, 4, 32, v
);
1003 static nir_ssa_def
*
1004 blorp_nir_manual_blend_bilinear(nir_builder
*b
, nir_ssa_def
*pos
,
1005 unsigned tex_samples
,
1006 const brw_blorp_blit_prog_key
*key
,
1007 struct brw_blorp_blit_vars
*v
)
1009 nir_ssa_def
*pos_xy
= nir_channels(b
, pos
, 0x3);
1011 nir_ssa_def
*scale
= nir_imm_vec2(b
, key
->x_scale
, key
->y_scale
);
1013 /* Translate coordinates to lay out the samples in a rectangular grid
1014 * roughly corresponding to sample locations.
1016 pos_xy
= nir_fmul(b
, pos_xy
, scale
);
1017 /* Adjust coordinates so that integers represent pixel centers rather
1020 pos_xy
= nir_fadd(b
, pos_xy
, nir_imm_float(b
, -0.5));
1021 /* Clamp the X, Y texture coordinates to properly handle the sampling of
1022 * texels on texture edges.
1024 pos_xy
= nir_fmin(b
, nir_fmax(b
, pos_xy
, nir_imm_float(b
, 0.0)),
1025 nir_vec2(b
, nir_load_var(b
, v
->u_rect_grid_x1
),
1026 nir_load_var(b
, v
->u_rect_grid_y1
)));
1028 /* Store the fractional parts to be used as bilinear interpolation
1031 nir_ssa_def
*frac_xy
= nir_ffract(b
, pos_xy
);
1032 /* Round the float coordinates down to nearest integer */
1033 pos_xy
= nir_fdiv(b
, nir_ftrunc(b
, pos_xy
), scale
);
1035 nir_ssa_def
*tex_data
[4];
1036 for (unsigned i
= 0; i
< 4; ++i
) {
1037 float sample_off_x
= (float)(i
& 0x1) / key
->x_scale
;
1038 float sample_off_y
= (float)((i
>> 1) & 0x1) / key
->y_scale
;
1039 nir_ssa_def
*sample_off
= nir_imm_vec2(b
, sample_off_x
, sample_off_y
);
1041 nir_ssa_def
*sample_coords
= nir_fadd(b
, pos_xy
, sample_off
);
1042 nir_ssa_def
*sample_coords_int
= nir_f2i(b
, sample_coords
);
1044 /* The MCS value we fetch has to match up with the pixel that we're
1045 * sampling from. Since we sample from different pixels in each
1046 * iteration of this "for" loop, the call to mcs_fetch() should be
1047 * here inside the loop after computing the pixel coordinates.
1049 nir_ssa_def
*mcs
= NULL
;
1050 if (key
->tex_layout
== INTEL_MSAA_LAYOUT_CMS
)
1051 mcs
= blorp_nir_txf_ms_mcs(b
, sample_coords_int
);
1053 /* Compute sample index and map the sample index to a sample number.
1054 * Sample index layout shows the numbering of slots in a rectangular
1055 * grid of samples with in a pixel. Sample number layout shows the
1056 * rectangular grid of samples roughly corresponding to the real sample
1057 * locations with in a pixel.
1058 * In case of 4x MSAA, layout of sample indices matches the layout of
1066 * In case of 8x MSAA the two layouts don't match.
1067 * sample index layout : --------- sample number layout : ---------
1068 * | 0 | 1 | | 5 | 2 |
1069 * --------- ---------
1070 * | 2 | 3 | | 4 | 6 |
1071 * --------- ---------
1072 * | 4 | 5 | | 0 | 3 |
1073 * --------- ---------
1074 * | 6 | 7 | | 7 | 1 |
1075 * --------- ---------
1077 * Fortunately, this can be done fairly easily as:
1078 * S' = (0x17306425 >> (S * 4)) & 0xf
1080 nir_ssa_def
*frac
= nir_ffract(b
, sample_coords
);
1081 nir_ssa_def
*sample
=
1082 nir_fdot2(b
, frac
, nir_imm_vec2(b
, key
->x_scale
,
1083 key
->x_scale
* key
->y_scale
));
1084 sample
= nir_f2i(b
, sample
);
1086 if (tex_samples
== 8) {
1087 sample
= nir_iand(b
, nir_ishr(b
, nir_imm_int(b
, 0x17306425),
1088 nir_ishl(b
, sample
, nir_imm_int(b
, 2))),
1089 nir_imm_int(b
, 0xf));
1091 nir_ssa_def
*pos_ms
= nir_vec3(b
, nir_channel(b
, sample_coords_int
, 0),
1092 nir_channel(b
, sample_coords_int
, 1),
1094 tex_data
[i
] = blorp_nir_txf_ms(b
, pos_ms
, mcs
, key
->texture_data_type
);
1097 nir_ssa_def
*frac_x
= nir_channel(b
, frac_xy
, 0);
1098 nir_ssa_def
*frac_y
= nir_channel(b
, frac_xy
, 1);
1099 return nir_flrp(b
, nir_flrp(b
, tex_data
[0], tex_data
[1], frac_x
),
1100 nir_flrp(b
, tex_data
[2], tex_data
[3], frac_x
),
1105 * Generator for WM programs used in BLORP blits.
1107 * The bulk of the work done by the WM program is to wrap and unwrap the
1108 * coordinate transformations used by the hardware to store surfaces in
1109 * memory. The hardware transforms a pixel location (X, Y, S) (where S is the
1110 * sample index for a multisampled surface) to a memory offset by the
1111 * following formulas:
1113 * offset = tile(tiling_format, encode_msaa(num_samples, layout, X, Y, S))
1114 * (X, Y, S) = decode_msaa(num_samples, layout, detile(tiling_format, offset))
1116 * For a single-sampled surface, or for a multisampled surface using
1117 * INTEL_MSAA_LAYOUT_UMS, encode_msaa() and decode_msaa are the identity
1120 * encode_msaa(1, NONE, X, Y, 0) = (X, Y, 0)
1121 * decode_msaa(1, NONE, X, Y, 0) = (X, Y, 0)
1122 * encode_msaa(n, UMS, X, Y, S) = (X, Y, S)
1123 * decode_msaa(n, UMS, X, Y, S) = (X, Y, S)
1125 * For a 4x multisampled surface using INTEL_MSAA_LAYOUT_IMS, encode_msaa()
1126 * embeds the sample number into bit 1 of the X and Y coordinates:
1128 * encode_msaa(4, IMS, X, Y, S) = (X', Y', 0)
1129 * where X' = (X & ~0b1) << 1 | (S & 0b1) << 1 | (X & 0b1)
1130 * Y' = (Y & ~0b1 ) << 1 | (S & 0b10) | (Y & 0b1)
1131 * decode_msaa(4, IMS, X, Y, 0) = (X', Y', S)
1132 * where X' = (X & ~0b11) >> 1 | (X & 0b1)
1133 * Y' = (Y & ~0b11) >> 1 | (Y & 0b1)
1134 * S = (Y & 0b10) | (X & 0b10) >> 1
1136 * For an 8x multisampled surface using INTEL_MSAA_LAYOUT_IMS, encode_msaa()
1137 * embeds the sample number into bits 1 and 2 of the X coordinate and bit 1 of
1140 * encode_msaa(8, IMS, X, Y, S) = (X', Y', 0)
1141 * where X' = (X & ~0b1) << 2 | (S & 0b100) | (S & 0b1) << 1 | (X & 0b1)
1142 * Y' = (Y & ~0b1) << 1 | (S & 0b10) | (Y & 0b1)
1143 * decode_msaa(8, IMS, X, Y, 0) = (X', Y', S)
1144 * where X' = (X & ~0b111) >> 2 | (X & 0b1)
1145 * Y' = (Y & ~0b11) >> 1 | (Y & 0b1)
1146 * S = (X & 0b100) | (Y & 0b10) | (X & 0b10) >> 1
1148 * For X tiling, tile() combines together the low-order bits of the X and Y
1149 * coordinates in the pattern 0byyyxxxxxxxxx, creating 4k tiles that are 512
1150 * bytes wide and 8 rows high:
1152 * tile(x_tiled, X, Y, S) = A
1153 * where A = tile_num << 12 | offset
1154 * tile_num = (Y' >> 3) * tile_pitch + (X' >> 9)
1155 * offset = (Y' & 0b111) << 9
1156 * | (X & 0b111111111)
1158 * Y' = Y + S * qpitch
1159 * detile(x_tiled, A) = (X, Y, S)
1160 * where X = X' / cpp
1163 * Y' = (tile_num / tile_pitch) << 3
1164 * | (A & 0b111000000000) >> 9
1165 * X' = (tile_num % tile_pitch) << 9
1166 * | (A & 0b111111111)
1168 * (In all tiling formulas, cpp is the number of bytes occupied by a single
1169 * sample ("chars per pixel"), tile_pitch is the number of 4k tiles required
1170 * to fill the width of the surface, and qpitch is the spacing (in rows)
1171 * between array slices).
1173 * For Y tiling, tile() combines together the low-order bits of the X and Y
1174 * coordinates in the pattern 0bxxxyyyyyxxxx, creating 4k tiles that are 128
1175 * bytes wide and 32 rows high:
1177 * tile(y_tiled, X, Y, S) = A
1178 * where A = tile_num << 12 | offset
1179 * tile_num = (Y' >> 5) * tile_pitch + (X' >> 7)
1180 * offset = (X' & 0b1110000) << 5
1181 * | (Y' & 0b11111) << 4
1184 * Y' = Y + S * qpitch
1185 * detile(y_tiled, A) = (X, Y, S)
1186 * where X = X' / cpp
1189 * Y' = (tile_num / tile_pitch) << 5
1190 * | (A & 0b111110000) >> 4
1191 * X' = (tile_num % tile_pitch) << 7
1192 * | (A & 0b111000000000) >> 5
1195 * For W tiling, tile() combines together the low-order bits of the X and Y
1196 * coordinates in the pattern 0bxxxyyyyxyxyx, creating 4k tiles that are 64
1197 * bytes wide and 64 rows high (note that W tiling is only used for stencil
1198 * buffers, which always have cpp = 1 and S=0):
1200 * tile(w_tiled, X, Y, S) = A
1201 * where A = tile_num << 12 | offset
1202 * tile_num = (Y' >> 6) * tile_pitch + (X' >> 6)
1203 * offset = (X' & 0b111000) << 6
1204 * | (Y' & 0b111100) << 3
1205 * | (X' & 0b100) << 2
1206 * | (Y' & 0b10) << 2
1207 * | (X' & 0b10) << 1
1211 * Y' = Y + S * qpitch
1212 * detile(w_tiled, A) = (X, Y, S)
1213 * where X = X' / cpp = X'
1214 * Y = Y' % qpitch = Y'
1215 * S = Y / qpitch = 0
1216 * Y' = (tile_num / tile_pitch) << 6
1217 * | (A & 0b111100000) >> 3
1218 * | (A & 0b1000) >> 2
1220 * X' = (tile_num % tile_pitch) << 6
1221 * | (A & 0b111000000000) >> 6
1222 * | (A & 0b10000) >> 2
1223 * | (A & 0b100) >> 1
1226 * Finally, for a non-tiled surface, tile() simply combines together the X and
1227 * Y coordinates in the natural way:
1229 * tile(untiled, X, Y, S) = A
1230 * where A = Y * pitch + X'
1232 * Y' = Y + S * qpitch
1233 * detile(untiled, A) = (X, Y, S)
1234 * where X = X' / cpp
1240 * (In these formulas, pitch is the number of bytes occupied by a single row
1244 brw_blorp_build_nir_shader(struct brw_context
*brw
,
1245 const brw_blorp_blit_prog_key
*key
,
1246 struct brw_blorp_prog_data
*prog_data
)
1248 nir_ssa_def
*src_pos
, *dst_pos
, *color
;
1251 if (key
->dst_tiled_w
&& key
->rt_samples
> 0) {
1252 /* If the destination image is W tiled and multisampled, then the thread
1253 * must be dispatched once per sample, not once per pixel. This is
1254 * necessary because after conversion between W and Y tiling, there's no
1255 * guarantee that all samples corresponding to a single pixel will still
1258 assert(key
->persample_msaa_dispatch
);
1262 /* We are blending, which means we won't have an opportunity to
1263 * translate the tiling and sample count for the texture surface. So
1264 * the surface state for the texture must be configured with the correct
1265 * tiling and sample count.
1267 assert(!key
->src_tiled_w
);
1268 assert(key
->tex_samples
== key
->src_samples
);
1269 assert(key
->tex_layout
== key
->src_layout
);
1270 assert(key
->tex_samples
> 0);
1273 if (key
->persample_msaa_dispatch
) {
1274 /* It only makes sense to do persample dispatch if the render target is
1275 * configured as multisampled.
1277 assert(key
->rt_samples
> 0);
1280 /* Make sure layout is consistent with sample count */
1281 assert((key
->tex_layout
== INTEL_MSAA_LAYOUT_NONE
) ==
1282 (key
->tex_samples
== 0));
1283 assert((key
->rt_layout
== INTEL_MSAA_LAYOUT_NONE
) ==
1284 (key
->rt_samples
== 0));
1285 assert((key
->src_layout
== INTEL_MSAA_LAYOUT_NONE
) ==
1286 (key
->src_samples
== 0));
1287 assert((key
->dst_layout
== INTEL_MSAA_LAYOUT_NONE
) ==
1288 (key
->dst_samples
== 0));
1290 /* Set up prog_data */
1291 brw_blorp_prog_data_init(prog_data
);
1294 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_FRAGMENT
, NULL
);
1296 struct brw_blorp_blit_vars v
;
1297 brw_blorp_blit_vars_init(&b
, &v
, key
);
1299 dst_pos
= blorp_blit_get_frag_coords(&b
, key
, &v
);
1301 /* Render target and texture hardware don't support W tiling until Gen8. */
1302 const bool rt_tiled_w
= false;
1303 const bool tex_tiled_w
= brw
->gen
>= 8 && key
->src_tiled_w
;
1305 /* The address that data will be written to is determined by the
1306 * coordinates supplied to the WM thread and the tiling and sample count of
1307 * the render target, according to the formula:
1309 * (X, Y, S) = decode_msaa(rt_samples, detile(rt_tiling, offset))
1311 * If the actual tiling and sample count of the destination surface are not
1312 * the same as the configuration of the render target, then these
1313 * coordinates are wrong and we have to adjust them to compensate for the
1316 if (rt_tiled_w
!= key
->dst_tiled_w
||
1317 key
->rt_samples
!= key
->dst_samples
||
1318 key
->rt_layout
!= key
->dst_layout
) {
1319 dst_pos
= blorp_nir_encode_msaa(&b
, dst_pos
, key
->rt_samples
,
1321 /* Now (X, Y, S) = detile(rt_tiling, offset) */
1322 if (rt_tiled_w
!= key
->dst_tiled_w
)
1323 dst_pos
= blorp_nir_retile_y_to_w(&b
, dst_pos
);
1324 /* Now (X, Y, S) = detile(rt_tiling, offset) */
1325 dst_pos
= blorp_nir_decode_msaa(&b
, dst_pos
, key
->dst_samples
,
1329 /* Now (X, Y, S) = decode_msaa(dst_samples, detile(dst_tiling, offset)).
1331 * That is: X, Y and S now contain the true coordinates and sample index of
1332 * the data that the WM thread should output.
1334 * If we need to kill pixels that are outside the destination rectangle,
1335 * now is the time to do it.
1338 blorp_nir_discard_if_outside_rect(&b
, dst_pos
, &v
);
1340 src_pos
= blorp_blit_apply_transform(&b
, nir_i2f(&b
, dst_pos
), &v
);
1342 /* If the source image is not multisampled, then we want to fetch sample
1343 * number 0, because that's the only sample there is.
1345 if (key
->src_samples
== 0)
1346 src_pos
= nir_channels(&b
, src_pos
, 0x3);
1348 /* X, Y, and S are now the coordinates of the pixel in the source image
1349 * that we want to texture from. Exception: if we are blending, then S is
1350 * irrelevant, because we are going to fetch all samples.
1352 if (key
->blend
&& !key
->blit_scaled
) {
1353 /* Resolves (effecively) use texelFetch, so we need integers */
1354 src_pos
= nir_f2i(&b
, src_pos
);
1356 if (brw
->gen
== 6) {
1357 /* Because gen6 only supports 4x interleved MSAA, we can do all the
1358 * blending we need with a single linear-interpolated texture lookup
1359 * at the center of the sample. The texture coordinates to be odd
1360 * integers so that they correspond to the center of a 2x2 block
1361 * representing the four samples that maxe up a pixel. So we need
1362 * to multiply our X and Y coordinates each by 2 and then add 1.
1364 src_pos
= nir_ishl(&b
, src_pos
, nir_imm_int(&b
, 1));
1365 src_pos
= nir_iadd(&b
, src_pos
, nir_imm_int(&b
, 1));
1366 src_pos
= nir_i2f(&b
, nir_channels(&b
, src_pos
, 0x3));
1367 color
= blorp_nir_tex(&b
, src_pos
, key
->texture_data_type
);
1369 /* Gen7+ hardware doesn't automaticaly blend. */
1370 color
= blorp_nir_manual_blend_average(&b
, src_pos
, key
->src_samples
,
1372 key
->texture_data_type
);
1374 } else if (key
->blend
&& key
->blit_scaled
) {
1375 color
= blorp_nir_manual_blend_bilinear(&b
, src_pos
, key
->src_samples
, key
, &v
);
1377 if (key
->bilinear_filter
) {
1378 color
= blorp_nir_tex(&b
, src_pos
, key
->texture_data_type
);
1380 /* We're going to use texelFetch, so we need integers */
1381 src_pos
= nir_f2i(&b
, src_pos
);
1383 /* We aren't blending, which means we just want to fetch a single
1384 * sample from the source surface. The address that we want to fetch
1385 * from is related to the X, Y and S values according to the formula:
1387 * (X, Y, S) = decode_msaa(src_samples, detile(src_tiling, offset)).
1389 * If the actual tiling and sample count of the source surface are
1390 * not the same as the configuration of the texture, then we need to
1391 * adjust the coordinates to compensate for the difference.
1393 if (tex_tiled_w
!= key
->src_tiled_w
||
1394 key
->tex_samples
!= key
->src_samples
||
1395 key
->tex_layout
!= key
->src_layout
) {
1396 src_pos
= blorp_nir_encode_msaa(&b
, src_pos
, key
->src_samples
,
1398 /* Now (X, Y, S) = detile(src_tiling, offset) */
1399 if (tex_tiled_w
!= key
->src_tiled_w
)
1400 src_pos
= blorp_nir_retile_w_to_y(&b
, src_pos
);
1401 /* Now (X, Y, S) = detile(tex_tiling, offset) */
1402 src_pos
= blorp_nir_decode_msaa(&b
, src_pos
, key
->tex_samples
,
1406 /* Now (X, Y, S) = decode_msaa(tex_samples, detile(tex_tiling, offset)).
1408 * In other words: X, Y, and S now contain values which, when passed to
1409 * the texturing unit, will cause data to be read from the correct
1410 * memory location. So we can fetch the texel now.
1412 if (key
->src_samples
== 0) {
1413 color
= blorp_nir_txf(&b
, &v
, src_pos
, key
->texture_data_type
);
1415 nir_ssa_def
*mcs
= NULL
;
1416 if (key
->tex_layout
== INTEL_MSAA_LAYOUT_CMS
)
1417 mcs
= blorp_nir_txf_ms_mcs(&b
, src_pos
);
1419 color
= blorp_nir_txf_ms(&b
, src_pos
, mcs
, key
->texture_data_type
);
1424 nir_store_var(&b
, v
.color_out
, color
, 0xf);
1429 class brw_blorp_blit_program
: public brw_blorp_eu_emitter
1432 brw_blorp_blit_program(struct brw_context
*brw
,
1433 const brw_blorp_blit_prog_key
*key
);
1435 const GLuint
*compile(struct brw_context
*brw
, bool debug_flag
,
1436 GLuint
*program_size
);
1438 brw_blorp_prog_data prog_data
;
1442 void alloc_push_const_regs(int base_reg
);
1443 void compute_frag_coords();
1444 void translate_tiling(bool old_tiled_w
, bool new_tiled_w
);
1445 void encode_msaa(unsigned num_samples
, intel_msaa_layout layout
);
1446 void decode_msaa(unsigned num_samples
, intel_msaa_layout layout
);
1447 void translate_dst_to_src();
1448 void clamp_tex_coords(struct brw_reg regX
, struct brw_reg regY
,
1449 struct brw_reg clampX0
, struct brw_reg clampY0
,
1450 struct brw_reg clampX1
, struct brw_reg clampY1
);
1451 void single_to_blend();
1452 void manual_blend_average(unsigned num_samples
);
1453 void manual_blend_bilinear(unsigned num_samples
);
1454 void sample(struct brw_reg dst
);
1455 void texel_fetch(struct brw_reg dst
);
1457 void texture_lookup(struct brw_reg dst
, enum opcode op
,
1458 const sampler_message_arg
*args
, int num_args
);
1459 void render_target_write();
1462 * Base-2 logarithm of the maximum number of samples that can be blended.
1464 static const unsigned LOG2_MAX_BLEND_SAMPLES
= 3;
1466 struct brw_context
*brw
;
1467 const brw_blorp_blit_prog_key
*key
;
1469 /* Thread dispatch header */
1472 /* Pixel X/Y coordinates (always in R1). */
1475 /* Push constants */
1476 struct brw_reg dst_x0
;
1477 struct brw_reg dst_x1
;
1478 struct brw_reg dst_y0
;
1479 struct brw_reg dst_y1
;
1480 /* Top right coordinates of the rectangular grid used for scaled blitting */
1481 struct brw_reg rect_grid_x1
;
1482 struct brw_reg rect_grid_y1
;
1484 struct brw_reg multiplier
;
1485 struct brw_reg offset
;
1486 } x_transform
, y_transform
;
1487 struct brw_reg src_z
;
1489 /* Data read from texture (4 vec16's per array element) */
1490 struct brw_reg texture_data
[LOG2_MAX_BLEND_SAMPLES
+ 1];
1492 /* Auxiliary storage for the contents of the MCS surface.
1494 * Since the sampler always returns 8 registers worth of data, this is 8
1495 * registers wide, even though we only use the first 2 registers of it.
1497 struct brw_reg mcs_data
;
1499 /* X coordinates. We have two of them so that we can perform coordinate
1500 * transformations easily.
1502 struct brw_reg x_coords
[2];
1504 /* Y coordinates. We have two of them so that we can perform coordinate
1505 * transformations easily.
1507 struct brw_reg y_coords
[2];
1509 /* X, Y coordinates of the pixel from which we need to fetch the specific
1510 * sample. These are used for multisample scaled blitting.
1512 struct brw_reg x_sample_coords
;
1513 struct brw_reg y_sample_coords
;
1515 /* Fractional parts of the x and y coordinates, used as bilinear interpolation coefficients */
1516 struct brw_reg x_frac
;
1517 struct brw_reg y_frac
;
1519 /* Which element of x_coords and y_coords is currently in use.
1523 /* True if, at the point in the program currently being compiled, the
1524 * sample index is known to be zero.
1528 /* Register storing the sample index when s_is_zero is false. */
1529 struct brw_reg sample_index
;
1535 /* MRF used for sampling and render target writes */
1539 brw_blorp_blit_program::brw_blorp_blit_program(
1540 struct brw_context
*brw
, const brw_blorp_blit_prog_key
*key
)
1541 : brw_blorp_eu_emitter(), brw(brw
), key(key
)
1546 brw_blorp_blit_program::compile(struct brw_context
*brw
, bool debug_flag
,
1547 GLuint
*program_size
)
1550 if (key
->dst_tiled_w
&& key
->rt_samples
> 0) {
1551 /* If the destination image is W tiled and multisampled, then the thread
1552 * must be dispatched once per sample, not once per pixel. This is
1553 * necessary because after conversion between W and Y tiling, there's no
1554 * guarantee that all samples corresponding to a single pixel will still
1557 assert(key
->persample_msaa_dispatch
);
1561 /* We are blending, which means we won't have an opportunity to
1562 * translate the tiling and sample count for the texture surface. So
1563 * the surface state for the texture must be configured with the correct
1564 * tiling and sample count.
1566 assert(!key
->src_tiled_w
);
1567 assert(key
->tex_samples
== key
->src_samples
);
1568 assert(key
->tex_layout
== key
->src_layout
);
1569 assert(key
->tex_samples
> 0);
1572 if (key
->persample_msaa_dispatch
) {
1573 /* It only makes sense to do persample dispatch if the render target is
1574 * configured as multisampled.
1576 assert(key
->rt_samples
> 0);
1579 /* Make sure layout is consistent with sample count */
1580 assert((key
->tex_layout
== INTEL_MSAA_LAYOUT_NONE
) ==
1581 (key
->tex_samples
== 0));
1582 assert((key
->rt_layout
== INTEL_MSAA_LAYOUT_NONE
) ==
1583 (key
->rt_samples
== 0));
1584 assert((key
->src_layout
== INTEL_MSAA_LAYOUT_NONE
) ==
1585 (key
->src_samples
== 0));
1586 assert((key
->dst_layout
== INTEL_MSAA_LAYOUT_NONE
) ==
1587 (key
->dst_samples
== 0));
1589 /* Set up prog_data */
1590 brw_blorp_prog_data_init(&prog_data
);
1591 prog_data
.persample_msaa_dispatch
= key
->persample_msaa_dispatch
;
1594 compute_frag_coords();
1596 /* Render target and texture hardware don't support W tiling until Gen8. */
1597 const bool rt_tiled_w
= false;
1598 const bool tex_tiled_w
= brw
->gen
>= 8 && key
->src_tiled_w
;
1600 /* The address that data will be written to is determined by the
1601 * coordinates supplied to the WM thread and the tiling and sample count of
1602 * the render target, according to the formula:
1604 * (X, Y, S) = decode_msaa(rt_samples, detile(rt_tiling, offset))
1606 * If the actual tiling and sample count of the destination surface are not
1607 * the same as the configuration of the render target, then these
1608 * coordinates are wrong and we have to adjust them to compensate for the
1611 if (rt_tiled_w
!= key
->dst_tiled_w
||
1612 key
->rt_samples
!= key
->dst_samples
||
1613 key
->rt_layout
!= key
->dst_layout
) {
1614 encode_msaa(key
->rt_samples
, key
->rt_layout
);
1615 /* Now (X, Y, S) = detile(rt_tiling, offset) */
1616 translate_tiling(rt_tiled_w
, key
->dst_tiled_w
);
1617 /* Now (X, Y, S) = detile(dst_tiling, offset) */
1618 decode_msaa(key
->dst_samples
, key
->dst_layout
);
1621 /* Now (X, Y, S) = decode_msaa(dst_samples, detile(dst_tiling, offset)).
1623 * That is: X, Y and S now contain the true coordinates and sample index of
1624 * the data that the WM thread should output.
1626 * If we need to kill pixels that are outside the destination rectangle,
1627 * now is the time to do it.
1631 emit_kill_if_outside_rect(x_coords
[xy_coord_index
],
1632 y_coords
[xy_coord_index
],
1633 dst_x0
, dst_x1
, dst_y0
, dst_y1
);
1635 /* Next, apply a translation to obtain coordinates in the source image. */
1636 translate_dst_to_src();
1638 /* If the source image is not multisampled, then we want to fetch sample
1639 * number 0, because that's the only sample there is.
1641 if (key
->src_samples
== 0)
1644 /* X, Y, and S are now the coordinates of the pixel in the source image
1645 * that we want to texture from. Exception: if we are blending, then S is
1646 * irrelevant, because we are going to fetch all samples.
1648 if (key
->blend
&& !key
->blit_scaled
) {
1649 if (brw
->gen
== 6) {
1650 /* Gen6 hardware an automatically blend using the SAMPLE message */
1652 sample(texture_data
[0]);
1654 /* Gen7+ hardware doesn't automaticaly blend. */
1655 manual_blend_average(key
->src_samples
);
1657 } else if(key
->blend
&& key
->blit_scaled
) {
1658 manual_blend_bilinear(key
->src_samples
);
1660 /* We aren't blending, which means we just want to fetch a single sample
1661 * from the source surface. The address that we want to fetch from is
1662 * related to the X, Y and S values according to the formula:
1664 * (X, Y, S) = decode_msaa(src_samples, detile(src_tiling, offset)).
1666 * If the actual tiling and sample count of the source surface are not
1667 * the same as the configuration of the texture, then we need to adjust
1668 * the coordinates to compensate for the difference.
1670 if ((tex_tiled_w
!= key
->src_tiled_w
||
1671 key
->tex_samples
!= key
->src_samples
||
1672 key
->tex_layout
!= key
->src_layout
) &&
1673 !key
->bilinear_filter
) {
1674 encode_msaa(key
->src_samples
, key
->src_layout
);
1675 /* Now (X, Y, S) = detile(src_tiling, offset) */
1676 translate_tiling(key
->src_tiled_w
, tex_tiled_w
);
1677 /* Now (X, Y, S) = detile(tex_tiling, offset) */
1678 decode_msaa(key
->tex_samples
, key
->tex_layout
);
1681 if (key
->bilinear_filter
) {
1682 sample(texture_data
[0]);
1685 /* Now (X, Y, S) = decode_msaa(tex_samples, detile(tex_tiling, offset)).
1687 * In other words: X, Y, and S now contain values which, when passed to
1688 * the texturing unit, will cause data to be read from the correct
1689 * memory location. So we can fetch the texel now.
1691 if (key
->tex_layout
== INTEL_MSAA_LAYOUT_CMS
)
1693 texel_fetch(texture_data
[0]);
1697 /* Finally, write the fetched (or blended) value to the render target and
1698 * terminate the thread.
1700 render_target_write();
1702 return get_program(brw
, debug_flag
, program_size
);
1706 brw_blorp_blit_program::alloc_push_const_regs(int base_reg
)
1708 #define CONST_LOC(name) offsetof(brw_blorp_wm_push_constants, name)
1709 #define ALLOC_REG(name, type) \
1711 retype(brw_vec1_reg(BRW_GENERAL_REGISTER_FILE, \
1712 base_reg + CONST_LOC(name) / 32, \
1713 (CONST_LOC(name) % 32) / 4), type)
1715 ALLOC_REG(dst_x0
, BRW_REGISTER_TYPE_UD
);
1716 ALLOC_REG(dst_x1
, BRW_REGISTER_TYPE_UD
);
1717 ALLOC_REG(dst_y0
, BRW_REGISTER_TYPE_UD
);
1718 ALLOC_REG(dst_y1
, BRW_REGISTER_TYPE_UD
);
1719 ALLOC_REG(rect_grid_x1
, BRW_REGISTER_TYPE_F
);
1720 ALLOC_REG(rect_grid_y1
, BRW_REGISTER_TYPE_F
);
1721 ALLOC_REG(x_transform
.multiplier
, BRW_REGISTER_TYPE_F
);
1722 ALLOC_REG(x_transform
.offset
, BRW_REGISTER_TYPE_F
);
1723 ALLOC_REG(y_transform
.multiplier
, BRW_REGISTER_TYPE_F
);
1724 ALLOC_REG(y_transform
.offset
, BRW_REGISTER_TYPE_F
);
1725 ALLOC_REG(src_z
, BRW_REGISTER_TYPE_UD
);
1731 brw_blorp_blit_program::alloc_regs()
1734 this->R0
= retype(brw_vec8_grf(reg
++, 0), BRW_REGISTER_TYPE_UW
);
1735 this->R1
= retype(brw_vec8_grf(reg
++, 0), BRW_REGISTER_TYPE_UW
);
1736 prog_data
.first_curbe_grf_0
= reg
;
1737 alloc_push_const_regs(reg
);
1738 reg
+= BRW_BLORP_NUM_PUSH_CONST_REGS
;
1739 for (unsigned i
= 0; i
< ARRAY_SIZE(texture_data
); ++i
) {
1740 this->texture_data
[i
] =
1741 retype(vec16(brw_vec8_grf(reg
, 0)), key
->texture_data_type
);
1745 retype(brw_vec8_grf(reg
, 0), BRW_REGISTER_TYPE_UD
); reg
+= 8;
1747 for (int i
= 0; i
< 2; ++i
) {
1749 = retype(brw_vec8_grf(reg
, 0), BRW_REGISTER_TYPE_UD
);
1752 = retype(brw_vec8_grf(reg
, 0), BRW_REGISTER_TYPE_UD
);
1756 if (key
->blit_scaled
&& key
->blend
) {
1757 this->x_sample_coords
= brw_vec8_grf(reg
, 0);
1759 this->y_sample_coords
= brw_vec8_grf(reg
, 0);
1761 this->x_frac
= brw_vec8_grf(reg
, 0);
1763 this->y_frac
= brw_vec8_grf(reg
, 0);
1767 this->xy_coord_index
= 0;
1769 = retype(brw_vec8_grf(reg
, 0), BRW_REGISTER_TYPE_UD
);
1771 this->t1
= retype(brw_vec8_grf(reg
, 0), BRW_REGISTER_TYPE_UD
);
1773 this->t2
= retype(brw_vec8_grf(reg
, 0), BRW_REGISTER_TYPE_UD
);
1776 /* Make sure we didn't run out of registers */
1777 assert(reg
<= GEN7_MRF_HACK_START
);
1780 this->base_mrf
= mrf
;
1783 /* In the code that follows, X and Y can be used to quickly refer to the
1784 * active elements of x_coords and y_coords, and Xp and Yp ("X prime" and "Y
1785 * prime") to the inactive elements.
1787 * S can be used to quickly refer to sample_index.
1789 #define X x_coords[xy_coord_index]
1790 #define Y y_coords[xy_coord_index]
1791 #define Xp x_coords[!xy_coord_index]
1792 #define Yp y_coords[!xy_coord_index]
1793 #define S sample_index
1795 /* Quickly swap the roles of (X, Y) and (Xp, Yp). Saves us from having to do
1796 * MOVs to transfor (Xp, Yp) to (X, Y) after a coordinate transformation.
1798 #define SWAP_XY_AND_XPYP() xy_coord_index = !xy_coord_index;
1801 * Emit code to compute the X and Y coordinates of the pixels being rendered
1802 * by this WM invocation.
1804 * Assuming the render target is set up for Y tiling, these (X, Y) values are
1805 * related to the address offset where outputs will be written by the formula:
1807 * (X, Y, S) = decode_msaa(detile(offset)).
1809 * (See brw_blorp_blit_program).
1812 brw_blorp_blit_program::compute_frag_coords()
1814 /* R1.2[15:0] = X coordinate of upper left pixel of subspan 0 (pixel 0)
1815 * R1.3[15:0] = X coordinate of upper left pixel of subspan 1 (pixel 4)
1816 * R1.4[15:0] = X coordinate of upper left pixel of subspan 2 (pixel 8)
1817 * R1.5[15:0] = X coordinate of upper left pixel of subspan 3 (pixel 12)
1819 * Pixels within a subspan are laid out in this arrangement:
1823 * So, to compute the coordinates of each pixel, we need to read every 2nd
1824 * 16-bit value (vstride=2) from R1, starting at the 4th 16-bit value
1825 * (suboffset=4), and duplicate each value 4 times (hstride=0, width=4).
1826 * In other words, the data we want to access is R1.4<2;4,0>UW.
1828 * Then, we need to add the repeating sequence (0, 1, 0, 1, ...) to the
1829 * result, since pixels n+1 and n+3 are in the right half of the subspan.
1831 emit_add(vec16(retype(X
, BRW_REGISTER_TYPE_UW
)),
1832 stride(suboffset(R1
, 4), 2, 4, 0), brw_imm_v(0x10101010));
1834 /* Similarly, Y coordinates for subspans come from R1.2[31:16] through
1835 * R1.5[31:16], so to get pixel Y coordinates we need to start at the 5th
1836 * 16-bit value instead of the 4th (R1.5<2;4,0>UW instead of
1839 * And we need to add the repeating sequence (0, 0, 1, 1, ...), since
1840 * pixels n+2 and n+3 are in the bottom half of the subspan.
1842 emit_add(vec16(retype(Y
, BRW_REGISTER_TYPE_UW
)),
1843 stride(suboffset(R1
, 5), 2, 4, 0), brw_imm_v(0x11001100));
1845 /* Move the coordinates to UD registers. */
1846 emit_mov(vec16(Xp
), retype(X
, BRW_REGISTER_TYPE_UW
));
1847 emit_mov(vec16(Yp
), retype(Y
, BRW_REGISTER_TYPE_UW
));
1850 if (key
->persample_msaa_dispatch
) {
1851 switch (key
->rt_samples
) {
1854 /* The WM will be run in MSDISPMODE_PERSAMPLE with num_samples == 4.
1855 * Therefore, subspan 0 will represent sample 0, subspan 1 will
1856 * represent sample 1, and so on.
1858 * So we need to populate S with the sequence (0, 0, 0, 0, 1, 1, 1,
1859 * 1, 2, 2, 2, 2, 3, 3, 3, 3). The easiest way to do this is to
1860 * populate a temporary variable with the sequence (0, 1, 2, 3), and
1861 * then copy from it using vstride=1, width=4, hstride=0.
1863 struct brw_reg t1_uw1
= retype(t1
, BRW_REGISTER_TYPE_UW
);
1864 emit_mov(vec16(t1_uw1
), key
->rt_samples
== 4 ?
1865 brw_imm_v(0x3210) : brw_imm_v(0x1010));
1866 /* Move to UD sample_index register. */
1867 emit_mov_8(S
, stride(t1_uw1
, 1, 4, 0));
1868 emit_mov_8(offset(S
, 1), suboffset(stride(t1_uw1
, 1, 4, 0), 2));
1872 /* The WM will be run in MSDISPMODE_PERSAMPLE with num_samples == 8.
1873 * Therefore, subspan 0 will represent sample N (where N is 0 or 4),
1874 * subspan 1 will represent sample 1, and so on. We can find the
1875 * value of N by looking at R0.0 bits 7:6 ("Starting Sample Pair
1876 * Index") and multiplying by two (since samples are always delivered
1877 * in pairs). That is, we compute 2*((R0.0 & 0xc0) >> 6) == (R0.0 &
1880 * Then we need to add N to the sequence (0, 0, 0, 0, 1, 1, 1, 1, 2,
1881 * 2, 2, 2, 3, 3, 3, 3), which we compute by populating a temporary
1882 * variable with the sequence (0, 1, 2, 3), and then reading from it
1883 * using vstride=1, width=4, hstride=0.
1885 struct brw_reg t1_ud1
= vec1(retype(t1
, BRW_REGISTER_TYPE_UD
));
1886 struct brw_reg t2_uw1
= retype(t2
, BRW_REGISTER_TYPE_UW
);
1887 struct brw_reg r0_ud1
= vec1(retype(R0
, BRW_REGISTER_TYPE_UD
));
1888 emit_and(t1_ud1
, r0_ud1
, brw_imm_ud(0xc0));
1889 emit_shr(t1_ud1
, t1_ud1
, brw_imm_ud(5));
1890 emit_mov(vec16(t2_uw1
), brw_imm_v(0x3210));
1891 emit_add(vec16(S
), retype(t1_ud1
, BRW_REGISTER_TYPE_UW
),
1892 stride(t2_uw1
, 1, 4, 0));
1893 emit_add_8(offset(S
, 1),
1894 retype(t1_ud1
, BRW_REGISTER_TYPE_UW
),
1895 suboffset(stride(t2_uw1
, 1, 4, 0), 2));
1899 unreachable("Unrecognized sample count in "
1900 "brw_blorp_blit_program::compute_frag_coords()");
1904 /* Either the destination surface is single-sampled, or the WM will be
1905 * run in MSDISPMODE_PERPIXEL (which causes a single fragment dispatch
1906 * per pixel). In either case, it's not meaningful to compute a sample
1907 * value. Just set it to 0.
1914 * Emit code to compensate for the difference between Y and W tiling.
1916 * This code modifies the X and Y coordinates according to the formula:
1918 * (X', Y', S') = detile(new_tiling, tile(old_tiling, X, Y, S))
1920 * (See brw_blorp_blit_program).
1922 * It can only translate between W and Y tiling, so new_tiling and old_tiling
1923 * are booleans where true represents W tiling and false represents Y tiling.
1926 brw_blorp_blit_program::translate_tiling(bool old_tiled_w
, bool new_tiled_w
)
1928 if (old_tiled_w
== new_tiled_w
)
1931 /* In the code that follows, we can safely assume that S = 0, because W
1932 * tiling formats always use IMS layout.
1937 /* Given X and Y coordinates that describe an address using Y tiling,
1938 * translate to the X and Y coordinates that describe the same address
1941 * If we break down the low order bits of X and Y, using a
1942 * single letter to represent each low-order bit:
1944 * X = A << 7 | 0bBCDEFGH
1945 * Y = J << 5 | 0bKLMNP (1)
1947 * Then we can apply the Y tiling formula to see the memory offset being
1950 * offset = (J * tile_pitch + A) << 12 | 0bBCDKLMNPEFGH (2)
1952 * If we apply the W detiling formula to this memory location, that the
1953 * corresponding X' and Y' coordinates are:
1955 * X' = A << 6 | 0bBCDPFH (3)
1956 * Y' = J << 6 | 0bKLMNEG
1958 * Combining (1) and (3), we see that to transform (X, Y) to (X', Y'),
1959 * we need to make the following computation:
1961 * X' = (X & ~0b1011) >> 1 | (Y & 0b1) << 2 | X & 0b1 (4)
1962 * Y' = (Y & ~0b1) << 1 | (X & 0b1000) >> 2 | (X & 0b10) >> 1
1964 emit_and(t1
, X
, brw_imm_uw(0xfff4)); /* X & ~0b1011 */
1965 emit_shr(t1
, t1
, brw_imm_uw(1)); /* (X & ~0b1011) >> 1 */
1966 emit_and(t2
, Y
, brw_imm_uw(1)); /* Y & 0b1 */
1967 emit_shl(t2
, t2
, brw_imm_uw(2)); /* (Y & 0b1) << 2 */
1968 emit_or(t1
, t1
, t2
); /* (X & ~0b1011) >> 1 | (Y & 0b1) << 2 */
1969 emit_and(t2
, X
, brw_imm_uw(1)); /* X & 0b1 */
1970 emit_or(Xp
, t1
, t2
);
1971 emit_and(t1
, Y
, brw_imm_uw(0xfffe)); /* Y & ~0b1 */
1972 emit_shl(t1
, t1
, brw_imm_uw(1)); /* (Y & ~0b1) << 1 */
1973 emit_and(t2
, X
, brw_imm_uw(8)); /* X & 0b1000 */
1974 emit_shr(t2
, t2
, brw_imm_uw(2)); /* (X & 0b1000) >> 2 */
1975 emit_or(t1
, t1
, t2
); /* (Y & ~0b1) << 1 | (X & 0b1000) >> 2 */
1976 emit_and(t2
, X
, brw_imm_uw(2)); /* X & 0b10 */
1977 emit_shr(t2
, t2
, brw_imm_uw(1)); /* (X & 0b10) >> 1 */
1978 emit_or(Yp
, t1
, t2
);
1981 /* Applying the same logic as above, but in reverse, we obtain the
1984 * X' = (X & ~0b101) << 1 | (Y & 0b10) << 2 | (Y & 0b1) << 1 | X & 0b1
1985 * Y' = (Y & ~0b11) >> 1 | (X & 0b100) >> 2
1987 emit_and(t1
, X
, brw_imm_uw(0xfffa)); /* X & ~0b101 */
1988 emit_shl(t1
, t1
, brw_imm_uw(1)); /* (X & ~0b101) << 1 */
1989 emit_and(t2
, Y
, brw_imm_uw(2)); /* Y & 0b10 */
1990 emit_shl(t2
, t2
, brw_imm_uw(2)); /* (Y & 0b10) << 2 */
1991 emit_or(t1
, t1
, t2
); /* (X & ~0b101) << 1 | (Y & 0b10) << 2 */
1992 emit_and(t2
, Y
, brw_imm_uw(1)); /* Y & 0b1 */
1993 emit_shl(t2
, t2
, brw_imm_uw(1)); /* (Y & 0b1) << 1 */
1994 emit_or(t1
, t1
, t2
); /* (X & ~0b101) << 1 | (Y & 0b10) << 2
1996 emit_and(t2
, X
, brw_imm_uw(1)); /* X & 0b1 */
1997 emit_or(Xp
, t1
, t2
);
1998 emit_and(t1
, Y
, brw_imm_uw(0xfffc)); /* Y & ~0b11 */
1999 emit_shr(t1
, t1
, brw_imm_uw(1)); /* (Y & ~0b11) >> 1 */
2000 emit_and(t2
, X
, brw_imm_uw(4)); /* X & 0b100 */
2001 emit_shr(t2
, t2
, brw_imm_uw(2)); /* (X & 0b100) >> 2 */
2002 emit_or(Yp
, t1
, t2
);
2008 * Emit code to compensate for the difference between MSAA and non-MSAA
2011 * This code modifies the X and Y coordinates according to the formula:
2013 * (X', Y', S') = encode_msaa(num_samples, IMS, X, Y, S)
2015 * (See brw_blorp_blit_program).
2018 brw_blorp_blit_program::encode_msaa(unsigned num_samples
,
2019 intel_msaa_layout layout
)
2022 case INTEL_MSAA_LAYOUT_NONE
:
2023 /* No translation necessary, and S should already be zero. */
2026 case INTEL_MSAA_LAYOUT_CMS
:
2027 /* We can't compensate for compressed layout since at this point in the
2028 * program we haven't read from the MCS buffer.
2030 unreachable("Bad layout in encode_msaa");
2031 case INTEL_MSAA_LAYOUT_UMS
:
2032 /* No translation necessary. */
2034 case INTEL_MSAA_LAYOUT_IMS
:
2035 switch (num_samples
) {
2037 /* encode_msaa(2, IMS, X, Y, S) = (X', Y', 0)
2038 * where X' = (X & ~0b1) << 1 | (S & 0b1) << 1 | (X & 0b1)
2042 /* encode_msaa(4, IMS, X, Y, S) = (X', Y', 0)
2043 * where X' = (X & ~0b1) << 1 | (S & 0b1) << 1 | (X & 0b1)
2044 * Y' = (Y & ~0b1) << 1 | (S & 0b10) | (Y & 0b1)
2046 emit_and(t1
, X
, brw_imm_uw(0xfffe)); /* X & ~0b1 */
2048 emit_and(t2
, S
, brw_imm_uw(1)); /* S & 0b1 */
2049 emit_or(t1
, t1
, t2
); /* (X & ~0b1) | (S & 0b1) */
2051 emit_shl(t1
, t1
, brw_imm_uw(1)); /* (X & ~0b1) << 1
2053 if (num_samples
== 2) {
2058 emit_and(t2
, X
, brw_imm_uw(1)); /* X & 0b1 */
2059 emit_or(Xp
, t1
, t2
);
2060 emit_and(t1
, Y
, brw_imm_uw(0xfffe)); /* Y & ~0b1 */
2061 emit_shl(t1
, t1
, brw_imm_uw(1)); /* (Y & ~0b1) << 1 */
2063 emit_and(t2
, S
, brw_imm_uw(2)); /* S & 0b10 */
2064 emit_or(t1
, t1
, t2
); /* (Y & ~0b1) << 1 | (S & 0b10) */
2066 emit_and(t2
, Y
, brw_imm_uw(1)); /* Y & 0b1 */
2067 emit_or(Yp
, t1
, t2
);
2070 /* encode_msaa(8, IMS, X, Y, S) = (X', Y', 0)
2071 * where X' = (X & ~0b1) << 2 | (S & 0b100) | (S & 0b1) << 1
2073 * Y' = (Y & ~0b1) << 1 | (S & 0b10) | (Y & 0b1)
2075 emit_and(t1
, X
, brw_imm_uw(0xfffe)); /* X & ~0b1 */
2076 emit_shl(t1
, t1
, brw_imm_uw(2)); /* (X & ~0b1) << 2 */
2078 emit_and(t2
, S
, brw_imm_uw(4)); /* S & 0b100 */
2079 emit_or(t1
, t1
, t2
); /* (X & ~0b1) << 2 | (S & 0b100) */
2080 emit_and(t2
, S
, brw_imm_uw(1)); /* S & 0b1 */
2081 emit_shl(t2
, t2
, brw_imm_uw(1)); /* (S & 0b1) << 1 */
2082 emit_or(t1
, t1
, t2
); /* (X & ~0b1) << 2 | (S & 0b100)
2085 emit_and(t2
, X
, brw_imm_uw(1)); /* X & 0b1 */
2086 emit_or(Xp
, t1
, t2
);
2087 emit_and(t1
, Y
, brw_imm_uw(0xfffe)); /* Y & ~0b1 */
2088 emit_shl(t1
, t1
, brw_imm_uw(1)); /* (Y & ~0b1) << 1 */
2090 emit_and(t2
, S
, brw_imm_uw(2)); /* S & 0b10 */
2091 emit_or(t1
, t1
, t2
); /* (Y & ~0b1) << 1 | (S & 0b10) */
2093 emit_and(t2
, Y
, brw_imm_uw(1)); /* Y & 0b1 */
2094 emit_or(Yp
, t1
, t2
);
2104 * Emit code to compensate for the difference between MSAA and non-MSAA
2107 * This code modifies the X and Y coordinates according to the formula:
2109 * (X', Y', S) = decode_msaa(num_samples, IMS, X, Y, S)
2111 * (See brw_blorp_blit_program).
2114 brw_blorp_blit_program::decode_msaa(unsigned num_samples
,
2115 intel_msaa_layout layout
)
2118 case INTEL_MSAA_LAYOUT_NONE
:
2119 /* No translation necessary, and S should already be zero. */
2122 case INTEL_MSAA_LAYOUT_CMS
:
2123 /* We can't compensate for compressed layout since at this point in the
2124 * program we don't have access to the MCS buffer.
2126 unreachable("Bad layout in encode_msaa");
2127 case INTEL_MSAA_LAYOUT_UMS
:
2128 /* No translation necessary. */
2130 case INTEL_MSAA_LAYOUT_IMS
:
2132 switch (num_samples
) {
2134 /* decode_msaa(2, IMS, X, Y, 0) = (X', Y', S)
2135 * where X' = (X & ~0b11) >> 1 | (X & 0b1)
2136 * S = (X & 0b10) >> 1
2139 /* decode_msaa(4, IMS, X, Y, 0) = (X', Y', S)
2140 * where X' = (X & ~0b11) >> 1 | (X & 0b1)
2141 * Y' = (Y & ~0b11) >> 1 | (Y & 0b1)
2142 * S = (Y & 0b10) | (X & 0b10) >> 1
2144 emit_and(t1
, X
, brw_imm_uw(0xfffc)); /* X & ~0b11 */
2145 emit_shr(t1
, t1
, brw_imm_uw(1)); /* (X & ~0b11) >> 1 */
2146 emit_and(t2
, X
, brw_imm_uw(1)); /* X & 0b1 */
2147 emit_or(Xp
, t1
, t2
);
2149 if (num_samples
== 2) {
2151 emit_and(t2
, X
, brw_imm_uw(2)); /* X & 0b10 */
2152 emit_shr(S
, t2
, brw_imm_uw(1)); /* (X & 0b10) >> 1 */
2154 emit_and(t1
, Y
, brw_imm_uw(0xfffc)); /* Y & ~0b11 */
2155 emit_shr(t1
, t1
, brw_imm_uw(1)); /* (Y & ~0b11) >> 1 */
2156 emit_and(t2
, Y
, brw_imm_uw(1)); /* Y & 0b1 */
2157 emit_or(Yp
, t1
, t2
);
2158 emit_and(t1
, Y
, brw_imm_uw(2)); /* Y & 0b10 */
2159 emit_and(t2
, X
, brw_imm_uw(2)); /* X & 0b10 */
2160 emit_shr(t2
, t2
, brw_imm_uw(1)); /* (X & 0b10) >> 1 */
2165 /* decode_msaa(8, IMS, X, Y, 0) = (X', Y', S)
2166 * where X' = (X & ~0b111) >> 2 | (X & 0b1)
2167 * Y' = (Y & ~0b11) >> 1 | (Y & 0b1)
2168 * S = (X & 0b100) | (Y & 0b10) | (X & 0b10) >> 1
2170 emit_and(t1
, X
, brw_imm_uw(0xfff8)); /* X & ~0b111 */
2171 emit_shr(t1
, t1
, brw_imm_uw(2)); /* (X & ~0b111) >> 2 */
2172 emit_and(t2
, X
, brw_imm_uw(1)); /* X & 0b1 */
2173 emit_or(Xp
, t1
, t2
);
2174 emit_and(t1
, Y
, brw_imm_uw(0xfffc)); /* Y & ~0b11 */
2175 emit_shr(t1
, t1
, brw_imm_uw(1)); /* (Y & ~0b11) >> 1 */
2176 emit_and(t2
, Y
, brw_imm_uw(1)); /* Y & 0b1 */
2177 emit_or(Yp
, t1
, t2
);
2178 emit_and(t1
, X
, brw_imm_uw(4)); /* X & 0b100 */
2179 emit_and(t2
, Y
, brw_imm_uw(2)); /* Y & 0b10 */
2180 emit_or(t1
, t1
, t2
); /* (X & 0b100) | (Y & 0b10) */
2181 emit_and(t2
, X
, brw_imm_uw(2)); /* X & 0b10 */
2182 emit_shr(t2
, t2
, brw_imm_uw(1)); /* (X & 0b10) >> 1 */
2193 * Emit code to translate from destination (X, Y) coordinates to source (X, Y)
2197 brw_blorp_blit_program::translate_dst_to_src()
2199 struct brw_reg X_f
= retype(X
, BRW_REGISTER_TYPE_F
);
2200 struct brw_reg Y_f
= retype(Y
, BRW_REGISTER_TYPE_F
);
2201 struct brw_reg Xp_f
= retype(Xp
, BRW_REGISTER_TYPE_F
);
2202 struct brw_reg Yp_f
= retype(Yp
, BRW_REGISTER_TYPE_F
);
2204 /* Move the UD coordinates to float registers. */
2207 /* Scale and offset */
2208 emit_mad(X_f
, x_transform
.offset
, Xp_f
, x_transform
.multiplier
);
2209 emit_mad(Y_f
, y_transform
.offset
, Yp_f
, y_transform
.multiplier
);
2210 if (key
->blit_scaled
&& key
->blend
) {
2211 /* Translate coordinates to lay out the samples in a rectangular grid
2212 * roughly corresponding to sample locations.
2214 emit_mul(X_f
, X_f
, brw_imm_f(key
->x_scale
));
2215 emit_mul(Y_f
, Y_f
, brw_imm_f(key
->y_scale
));
2216 /* Adjust coordinates so that integers represent pixel centers rather
2219 emit_add(X_f
, X_f
, brw_imm_f(-0.5));
2220 emit_add(Y_f
, Y_f
, brw_imm_f(-0.5));
2222 /* Clamp the X, Y texture coordinates to properly handle the sampling of
2223 * texels on texture edges.
2225 clamp_tex_coords(X_f
, Y_f
,
2226 brw_imm_f(0.0), brw_imm_f(0.0),
2227 rect_grid_x1
, rect_grid_y1
);
2229 /* Store the fractional parts to be used as bilinear interpolation
2232 emit_frc(x_frac
, X_f
);
2233 emit_frc(y_frac
, Y_f
);
2235 /* Round the float coordinates down to nearest integer */
2236 emit_rndd(Xp_f
, X_f
);
2237 emit_rndd(Yp_f
, Y_f
);
2238 emit_mul(X_f
, Xp_f
, brw_imm_f(1.0f
/ key
->x_scale
));
2239 emit_mul(Y_f
, Yp_f
, brw_imm_f(1.0f
/ key
->y_scale
));
2241 } else if (!key
->bilinear_filter
) {
2242 /* Round the float coordinates down to nearest integer by moving to
2252 brw_blorp_blit_program::clamp_tex_coords(struct brw_reg regX
,
2253 struct brw_reg regY
,
2254 struct brw_reg clampX0
,
2255 struct brw_reg clampY0
,
2256 struct brw_reg clampX1
,
2257 struct brw_reg clampY1
)
2259 emit_max(regX
, regX
, clampX0
);
2260 emit_max(regY
, regY
, clampY0
);
2261 emit_min(regX
, regX
, clampX1
);
2262 emit_min(regY
, regY
, clampY1
);
2268 brw_blorp_blit_program::manual_blend_average(unsigned num_samples
)
2270 if (key
->tex_layout
== INTEL_MSAA_LAYOUT_CMS
)
2273 assert(key
->texture_data_type
== BRW_REGISTER_TYPE_F
);
2275 /* We add together samples using a binary tree structure, e.g. for 4x MSAA:
2277 * result = ((sample[0] + sample[1]) + (sample[2] + sample[3])) / 4
2279 * This ensures that when all samples have the same value, no numerical
2280 * precision is lost, since each addition operation always adds two equal
2281 * values, and summing two equal floating point values does not lose
2284 * We perform this computation by treating the texture_data array as a
2285 * stack and performing the following operations:
2287 * - push sample 0 onto stack
2288 * - push sample 1 onto stack
2289 * - add top two stack entries
2290 * - push sample 2 onto stack
2291 * - push sample 3 onto stack
2292 * - add top two stack entries
2293 * - add top two stack entries
2294 * - divide top stack entry by 4
2296 * Note that after pushing sample i onto the stack, the number of add
2297 * operations we do is equal to the number of trailing 1 bits in i. This
2298 * works provided the total number of samples is a power of two, which it
2299 * always is for i965.
2301 * For integer formats, we replace the add operations with average
2302 * operations and skip the final division.
2304 unsigned stack_depth
= 0;
2305 for (unsigned i
= 0; i
< num_samples
; ++i
) {
2306 assert(stack_depth
== _mesa_bitcount(i
)); /* Loop invariant */
2308 /* Push sample i onto the stack */
2309 assert(stack_depth
< ARRAY_SIZE(texture_data
));
2314 emit_mov(vec16(S
), brw_imm_ud(i
));
2316 texel_fetch(texture_data
[stack_depth
++]);
2318 if (i
== 0 && key
->tex_layout
== INTEL_MSAA_LAYOUT_CMS
) {
2319 /* The Ivy Bridge PRM, Vol4 Part1 p27 (Multisample Control Surface)
2320 * suggests an optimization:
2322 * "A simple optimization with probable large return in
2323 * performance is to compare the MCS value to zero (indicating
2324 * all samples are on sample slice 0), and sample only from
2325 * sample slice 0 using ld2dss if MCS is zero."
2327 * Note that in the case where the MCS value is zero, sampling from
2328 * sample slice 0 using ld2dss and sampling from sample 0 using
2329 * ld2dms are equivalent (since all samples are on sample slice 0).
2330 * Since we have already sampled from sample 0, all we need to do is
2331 * skip the remaining fetches and averaging if MCS is zero.
2333 emit_cmp_if(BRW_CONDITIONAL_NZ
, mcs_data
, brw_imm_ud(0));
2336 /* Do count_trailing_one_bits(i) times */
2337 for (int j
= count_trailing_one_bits(i
); j
-- > 0; ) {
2338 assert(stack_depth
>= 2);
2341 /* TODO: should use a smaller loop bound for non_RGBA formats */
2342 for (int k
= 0; k
< 4; ++k
) {
2343 emit_combine(BRW_OPCODE_ADD
,
2344 offset(texture_data
[stack_depth
- 1], 2*k
),
2345 offset(vec8(texture_data
[stack_depth
- 1]), 2*k
),
2346 offset(vec8(texture_data
[stack_depth
]), 2*k
));
2351 /* We should have just 1 sample on the stack now. */
2352 assert(stack_depth
== 1);
2354 /* Scale the result down by a factor of num_samples */
2355 /* TODO: should use a smaller loop bound for non-RGBA formats */
2356 for (int j
= 0; j
< 4; ++j
) {
2357 emit_mul(offset(texture_data
[0], 2*j
),
2358 offset(vec8(texture_data
[0]), 2*j
),
2359 brw_imm_f(1.0f
/ num_samples
));
2362 if (key
->tex_layout
== INTEL_MSAA_LAYOUT_CMS
)
2367 brw_blorp_blit_program::manual_blend_bilinear(unsigned num_samples
)
2369 /* We do this computation by performing the following operations:
2371 * In case of 4x, 8x MSAA:
2372 * - Compute the pixel coordinates and sample numbers (a, b, c, d)
2373 * which are later used for interpolation
2374 * - linearly interpolate samples a and b in X
2375 * - linearly interpolate samples c and d in X
2376 * - linearly interpolate the results of last two operations in Y
2378 * result = lrp(lrp(a + b) + lrp(c + d))
2380 struct brw_reg Xp_f
= retype(Xp
, BRW_REGISTER_TYPE_F
);
2381 struct brw_reg Yp_f
= retype(Yp
, BRW_REGISTER_TYPE_F
);
2382 struct brw_reg t1_f
= retype(t1
, BRW_REGISTER_TYPE_F
);
2383 struct brw_reg t2_f
= retype(t2
, BRW_REGISTER_TYPE_F
);
2385 for (unsigned i
= 0; i
< 4; ++i
) {
2386 assert(i
< ARRAY_SIZE(texture_data
));
2389 /* Compute pixel coordinates */
2390 emit_add(vec16(x_sample_coords
), Xp_f
,
2391 brw_imm_f((float)(i
& 0x1) * (1.0f
/ key
->x_scale
)));
2392 emit_add(vec16(y_sample_coords
), Yp_f
,
2393 brw_imm_f((float)((i
>> 1) & 0x1) * (1.0f
/ key
->y_scale
)));
2394 emit_mov(vec16(X
), x_sample_coords
);
2395 emit_mov(vec16(Y
), y_sample_coords
);
2397 /* The MCS value we fetch has to match up with the pixel that we're
2398 * sampling from. Since we sample from different pixels in each
2399 * iteration of this "for" loop, the call to mcs_fetch() should be
2400 * here inside the loop after computing the pixel coordinates.
2402 if (key
->tex_layout
== INTEL_MSAA_LAYOUT_CMS
)
2405 /* Compute sample index and map the sample index to a sample number.
2406 * Sample index layout shows the numbering of slots in a rectangular
2407 * grid of samples with in a pixel. Sample number layout shows the
2408 * rectangular grid of samples roughly corresponding to the real sample
2409 * locations with in a pixel.
2410 * In case of 4x MSAA, layout of sample indices matches the layout of
2418 * In case of 8x MSAA the two layouts don't match.
2419 * sample index layout : --------- sample number layout : ---------
2420 * | 0 | 1 | | 5 | 2 |
2421 * --------- ---------
2422 * | 2 | 3 | | 4 | 6 |
2423 * --------- ---------
2424 * | 4 | 5 | | 0 | 3 |
2425 * --------- ---------
2426 * | 6 | 7 | | 7 | 1 |
2427 * --------- ---------
2429 * Fortunately, this can be done fairly easily as:
2430 * S' = (0x17306425 >> (S * 4)) & 0xf
2432 emit_frc(vec16(t1_f
), x_sample_coords
);
2433 emit_frc(vec16(t2_f
), y_sample_coords
);
2434 emit_mul(vec16(t1_f
), t1_f
, brw_imm_f(key
->x_scale
));
2435 emit_mul(vec16(t2_f
), t2_f
, brw_imm_f(key
->x_scale
* key
->y_scale
));
2436 emit_add(vec16(t1_f
), t1_f
, t2_f
);
2437 emit_mov(vec16(S
), t1_f
);
2439 if (num_samples
== 8) {
2440 emit_mov(vec16(t2
), brw_imm_d(0x17306425));
2441 emit_shl(vec16(S
), S
, brw_imm_d(2));
2442 emit_shr(vec16(S
), t2
, S
);
2443 emit_and(vec16(S
), S
, brw_imm_d(0xf));
2445 texel_fetch(texture_data
[i
]);
2448 #define SAMPLE(x, y) offset(texture_data[x], y)
2449 for (int index
= 3; index
> 0; ) {
2450 /* Since we're doing SIMD16, 4 color channels fits in to 8 registers.
2451 * Counter value of 8 in 'for' loop below is used to interpolate all
2452 * the color components.
2454 for (int k
= 0; k
< 8; k
+= 2)
2455 emit_lrp(vec8(SAMPLE(index
- 1, k
)),
2457 vec8(SAMPLE(index
, k
)),
2458 vec8(SAMPLE(index
- 1, k
)));
2461 for (int k
= 0; k
< 8; k
+= 2)
2462 emit_lrp(vec8(SAMPLE(0, k
)),
2465 vec8(SAMPLE(0, k
)));
2470 * Emit code to look up a value in the texture using the SAMPLE message (which
2471 * does blending of MSAA surfaces).
2474 brw_blorp_blit_program::sample(struct brw_reg dst
)
2476 static const sampler_message_arg args
[2] = {
2477 SAMPLER_MESSAGE_ARG_U_FLOAT
,
2478 SAMPLER_MESSAGE_ARG_V_FLOAT
2481 texture_lookup(dst
, SHADER_OPCODE_TEX
, args
, ARRAY_SIZE(args
));
2485 * Emit code to look up a value in the texture using the SAMPLE_LD message
2486 * (which does a simple texel fetch).
2489 brw_blorp_blit_program::texel_fetch(struct brw_reg dst
)
2491 static const sampler_message_arg gen6_args
[5] = {
2492 SAMPLER_MESSAGE_ARG_U_INT
,
2493 SAMPLER_MESSAGE_ARG_V_INT
,
2494 SAMPLER_MESSAGE_ARG_ZERO_INT
, /* R */
2495 SAMPLER_MESSAGE_ARG_ZERO_INT
, /* LOD */
2496 SAMPLER_MESSAGE_ARG_SI_INT
2498 static const sampler_message_arg gen7_ld_args
[] = {
2499 SAMPLER_MESSAGE_ARG_U_INT
,
2500 SAMPLER_MESSAGE_ARG_ZERO_INT
, /* LOD */
2501 SAMPLER_MESSAGE_ARG_V_INT
,
2502 SAMPLER_MESSAGE_ARG_R_INT
2504 static const sampler_message_arg gen7_ld2dss_args
[3] = {
2505 SAMPLER_MESSAGE_ARG_SI_INT
,
2506 SAMPLER_MESSAGE_ARG_U_INT
,
2507 SAMPLER_MESSAGE_ARG_V_INT
2509 static const sampler_message_arg gen7_ld2dms_args
[4] = {
2510 SAMPLER_MESSAGE_ARG_SI_INT
,
2511 SAMPLER_MESSAGE_ARG_MCS_INT
,
2512 SAMPLER_MESSAGE_ARG_U_INT
,
2513 SAMPLER_MESSAGE_ARG_V_INT
2515 static const sampler_message_arg gen9_ld_args
[] = {
2516 SAMPLER_MESSAGE_ARG_U_INT
,
2517 SAMPLER_MESSAGE_ARG_V_INT
,
2518 SAMPLER_MESSAGE_ARG_ZERO_INT
, /* LOD */
2519 SAMPLER_MESSAGE_ARG_R_INT
2524 texture_lookup(dst
, SHADER_OPCODE_TXF
, gen6_args
, s_is_zero
? 2 : 5);
2529 switch (key
->tex_layout
) {
2530 case INTEL_MSAA_LAYOUT_IMS
:
2531 /* From the Ivy Bridge PRM, Vol4 Part1 p72 (Multisampled Surface Storage
2534 * If this field is MSFMT_DEPTH_STENCIL
2535 * [a.k.a. INTEL_MSAA_LAYOUT_IMS], the only sampling engine
2536 * messages allowed are "ld2dms", "resinfo", and "sampleinfo".
2538 * So fall through to emit the same message as we use for
2539 * INTEL_MSAA_LAYOUT_CMS.
2541 case INTEL_MSAA_LAYOUT_CMS
:
2542 texture_lookup(dst
, SHADER_OPCODE_TXF_CMS
,
2543 gen7_ld2dms_args
, ARRAY_SIZE(gen7_ld2dms_args
));
2545 case INTEL_MSAA_LAYOUT_UMS
:
2546 texture_lookup(dst
, SHADER_OPCODE_TXF_UMS
,
2547 gen7_ld2dss_args
, ARRAY_SIZE(gen7_ld2dss_args
));
2549 case INTEL_MSAA_LAYOUT_NONE
:
2552 texture_lookup(dst
, SHADER_OPCODE_TXF
, gen7_ld_args
,
2553 ARRAY_SIZE(gen7_ld_args
));
2555 texture_lookup(dst
, SHADER_OPCODE_TXF
, gen9_ld_args
,
2556 ARRAY_SIZE(gen9_ld_args
));
2562 unreachable("Should not get here.");
2567 brw_blorp_blit_program::mcs_fetch()
2569 static const sampler_message_arg gen7_ld_mcs_args
[2] = {
2570 SAMPLER_MESSAGE_ARG_U_INT
,
2571 SAMPLER_MESSAGE_ARG_V_INT
2573 texture_lookup(vec16(mcs_data
), SHADER_OPCODE_TXF_MCS
,
2574 gen7_ld_mcs_args
, ARRAY_SIZE(gen7_ld_mcs_args
));
2578 brw_blorp_blit_program::texture_lookup(struct brw_reg dst
,
2580 const sampler_message_arg
*args
,
2583 struct brw_reg mrf
=
2584 retype(vec16(brw_message_reg(base_mrf
)), BRW_REGISTER_TYPE_UD
);
2585 for (int arg
= 0; arg
< num_args
; ++arg
) {
2586 switch (args
[arg
]) {
2587 case SAMPLER_MESSAGE_ARG_U_FLOAT
:
2588 if (key
->bilinear_filter
)
2589 emit_mov(retype(mrf
, BRW_REGISTER_TYPE_F
),
2590 retype(X
, BRW_REGISTER_TYPE_F
));
2592 emit_mov(retype(mrf
, BRW_REGISTER_TYPE_F
), X
);
2594 case SAMPLER_MESSAGE_ARG_V_FLOAT
:
2595 if (key
->bilinear_filter
)
2596 emit_mov(retype(mrf
, BRW_REGISTER_TYPE_F
),
2597 retype(Y
, BRW_REGISTER_TYPE_F
));
2599 emit_mov(retype(mrf
, BRW_REGISTER_TYPE_F
), Y
);
2601 case SAMPLER_MESSAGE_ARG_U_INT
:
2604 case SAMPLER_MESSAGE_ARG_V_INT
:
2607 case SAMPLER_MESSAGE_ARG_R_INT
:
2608 emit_mov(mrf
, src_z
);
2610 case SAMPLER_MESSAGE_ARG_SI_INT
:
2611 /* Note: on Gen7, this code may be reached with s_is_zero==true
2612 * because in Gen7's ld2dss message, the sample index is the first
2613 * argument. When this happens, we need to move a 0 into the
2614 * appropriate message register.
2617 emit_mov(mrf
, brw_imm_ud(0));
2621 case SAMPLER_MESSAGE_ARG_MCS_INT
:
2622 switch (key
->tex_layout
) {
2623 case INTEL_MSAA_LAYOUT_CMS
:
2624 emit_mov(mrf
, mcs_data
);
2626 case INTEL_MSAA_LAYOUT_IMS
:
2627 /* When sampling from an IMS surface, MCS data is not relevant,
2628 * and the hardware ignores it. So don't bother populating it.
2632 /* We shouldn't be trying to send MCS data with any other
2635 assert (!"Unsupported layout for MCS data");
2639 case SAMPLER_MESSAGE_ARG_ZERO_INT
:
2640 emit_mov(mrf
, brw_imm_ud(0));
2646 emit_texture_lookup(retype(dst
, BRW_REGISTER_TYPE_UW
) /* dest */,
2649 mrf
.nr
- base_mrf
/* msg_length */);
2657 #undef SWAP_XY_AND_XPYP
2660 brw_blorp_get_blit_kernel(struct brw_context
*brw
,
2661 struct brw_blorp_params
*params
,
2662 const struct brw_blorp_blit_prog_key
*prog_key
)
2664 if (brw_search_cache(&brw
->cache
, BRW_CACHE_BLORP_PROG
,
2665 prog_key
, sizeof(*prog_key
),
2666 ¶ms
->wm_prog_kernel
, ¶ms
->wm_prog_data
))
2669 const unsigned *program
;
2670 unsigned program_size
;
2671 struct brw_blorp_prog_data prog_data
;
2673 /* Try and compile with NIR first. If that fails, fall back to the old
2674 * method of building shaders manually.
2676 nir_shader
*nir
= brw_blorp_build_nir_shader(brw
, prog_key
, &prog_data
);
2678 struct brw_wm_prog_key wm_key
;
2679 brw_blorp_init_wm_prog_key(&wm_key
);
2680 wm_key
.tex
.compressed_multisample_layout_mask
=
2681 prog_key
->tex_layout
== INTEL_MSAA_LAYOUT_CMS
;
2682 wm_key
.multisample_fbo
= prog_key
->rt_samples
> 1;
2684 program
= brw_blorp_compile_nir_shader(brw
, nir
, &wm_key
, false,
2685 &prog_data
, &program_size
);
2687 brw_blorp_blit_program
prog(brw
, prog_key
);
2688 program
= prog
.compile(brw
, INTEL_DEBUG
& DEBUG_BLORP
, &program_size
);
2689 prog_data
= prog
.prog_data
;
2692 brw_upload_cache(&brw
->cache
, BRW_CACHE_BLORP_PROG
,
2693 prog_key
, sizeof(*prog_key
),
2694 program
, program_size
,
2695 &prog_data
, sizeof(prog_data
),
2696 ¶ms
->wm_prog_kernel
, ¶ms
->wm_prog_data
);
2700 brw_blorp_blit_program::render_target_write()
2702 struct brw_reg mrf_rt_write
=
2703 retype(vec16(brw_message_reg(base_mrf
)), key
->texture_data_type
);
2706 /* If we may have killed pixels, then we need to send R0 and R1 in a header
2707 * so that the render target knows which pixels we killed.
2709 bool use_header
= key
->use_kill
;
2711 /* Copy R0/1 to MRF */
2712 emit_mov(retype(mrf_rt_write
, BRW_REGISTER_TYPE_UD
),
2713 retype(R0
, BRW_REGISTER_TYPE_UD
));
2717 /* Copy texture data to MRFs */
2718 for (int i
= 0; i
< 4; ++i
) {
2719 /* E.g. mov(16) m2.0<1>:f r2.0<8;8,1>:f { Align1, H1 } */
2720 emit_mov(offset(mrf_rt_write
, mrf_offset
),
2721 offset(vec8(texture_data
[0]), 2*i
));
2725 /* Now write to the render target and terminate the thread */
2726 emit_render_target_write(
2728 brw
->gen
< 8 ? base_mrf
: -1,
2729 mrf_offset
/* msg_length. TODO: Should be smaller for non-RGBA formats. */,
2735 brw_blorp_setup_coord_transform(struct brw_blorp_coord_transform
*xform
,
2736 GLfloat src0
, GLfloat src1
,
2737 GLfloat dst0
, GLfloat dst1
,
2740 float scale
= (src1
- src0
) / (dst1
- dst0
);
2742 /* When not mirroring a coordinate (say, X), we need:
2743 * src_x - src_x0 = (dst_x - dst_x0 + 0.5) * scale
2745 * src_x = src_x0 + (dst_x - dst_x0 + 0.5) * scale
2747 * blorp program uses "round toward zero" to convert the
2748 * transformed floating point coordinates to integer coordinates,
2749 * whereas the behaviour we actually want is "round to nearest",
2750 * so 0.5 provides the necessary correction.
2752 xform
->multiplier
= scale
;
2753 xform
->offset
= src0
+ (-dst0
+ 0.5f
) * scale
;
2755 /* When mirroring X we need:
2756 * src_x - src_x0 = dst_x1 - dst_x - 0.5
2758 * src_x = src_x0 + (dst_x1 -dst_x - 0.5) * scale
2760 xform
->multiplier
= -scale
;
2761 xform
->offset
= src0
+ (dst1
- 0.5f
) * scale
;
2767 * Determine which MSAA layout the GPU pipeline should be configured for,
2768 * based on the chip generation, the number of samples, and the true layout of
2769 * the image in memory.
2771 inline intel_msaa_layout
2772 compute_msaa_layout_for_pipeline(struct brw_context
*brw
, unsigned num_samples
,
2773 intel_msaa_layout true_layout
)
2775 if (num_samples
<= 1) {
2776 /* Layout is used to determine if ld2dms is needed for sampling. In
2777 * single sampled case normal ld is enough avoiding also the need to
2778 * fetch mcs. Therefore simply set the layout to none.
2780 if (brw
->gen
>= 9 && true_layout
== INTEL_MSAA_LAYOUT_CMS
) {
2781 return INTEL_MSAA_LAYOUT_NONE
;
2784 /* When configuring the GPU for non-MSAA, we can still accommodate IMS
2785 * format buffers, by transforming coordinates appropriately.
2787 assert(true_layout
== INTEL_MSAA_LAYOUT_NONE
||
2788 true_layout
== INTEL_MSAA_LAYOUT_IMS
);
2789 return INTEL_MSAA_LAYOUT_NONE
;
2791 assert(true_layout
!= INTEL_MSAA_LAYOUT_NONE
);
2794 /* Prior to Gen7, all MSAA surfaces use IMS layout. */
2795 if (brw
->gen
== 6) {
2796 assert(true_layout
== INTEL_MSAA_LAYOUT_IMS
);
2804 * Note: if the src (or dst) is a 2D multisample array texture on Gen7+ using
2805 * INTEL_MSAA_LAYOUT_UMS or INTEL_MSAA_LAYOUT_CMS, src_layer (dst_layer) is
2806 * the physical layer holding sample 0. So, for example, if
2807 * src_mt->num_samples == 4, then logical layer n corresponds to src_layer ==
2811 brw_blorp_blit_miptrees(struct brw_context
*brw
,
2812 struct intel_mipmap_tree
*src_mt
,
2813 unsigned src_level
, unsigned src_layer
,
2814 mesa_format src_format
, int src_swizzle
,
2815 struct intel_mipmap_tree
*dst_mt
,
2816 unsigned dst_level
, unsigned dst_layer
,
2817 mesa_format dst_format
,
2818 float src_x0
, float src_y0
,
2819 float src_x1
, float src_y1
,
2820 float dst_x0
, float dst_y0
,
2821 float dst_x1
, float dst_y1
,
2822 GLenum filter
, bool mirror_x
, bool mirror_y
,
2823 bool decode_srgb
, bool encode_srgb
)
2825 /* Get ready to blit. This includes depth resolving the src and dst
2826 * buffers if necessary. Note: it's not necessary to do a color resolve on
2827 * the destination buffer because we use the standard render path to render
2828 * to destination color buffers, and the standard render path is
2831 intel_miptree_resolve_color(brw
, src_mt
, INTEL_MIPTREE_IGNORE_CCS_E
);
2832 intel_miptree_slice_resolve_depth(brw
, src_mt
, src_level
, src_layer
);
2833 intel_miptree_slice_resolve_depth(brw
, dst_mt
, dst_level
, dst_layer
);
2835 intel_miptree_prepare_mcs(brw
, dst_mt
);
2837 DBG("%s from %dx %s mt %p %d %d (%f,%f) (%f,%f)"
2838 "to %dx %s mt %p %d %d (%f,%f) (%f,%f) (flip %d,%d)\n",
2840 src_mt
->num_samples
, _mesa_get_format_name(src_mt
->format
), src_mt
,
2841 src_level
, src_layer
, src_x0
, src_y0
, src_x1
, src_y1
,
2842 dst_mt
->num_samples
, _mesa_get_format_name(dst_mt
->format
), dst_mt
,
2843 dst_level
, dst_layer
, dst_x0
, dst_y0
, dst_x1
, dst_y1
,
2844 mirror_x
, mirror_y
);
2846 if (!decode_srgb
&& _mesa_get_format_color_encoding(src_format
) == GL_SRGB
)
2847 src_format
= _mesa_get_srgb_format_linear(src_format
);
2849 if (!encode_srgb
&& _mesa_get_format_color_encoding(dst_format
) == GL_SRGB
)
2850 dst_format
= _mesa_get_srgb_format_linear(dst_format
);
2852 struct brw_blorp_params params
;
2853 brw_blorp_params_init(¶ms
);
2855 brw_blorp_surface_info_init(brw
, ¶ms
.src
, src_mt
, src_level
,
2856 src_layer
, src_format
, false);
2857 brw_blorp_surface_info_init(brw
, ¶ms
.dst
, dst_mt
, dst_level
,
2858 dst_layer
, dst_format
, true);
2860 /* Even though we do multisample resolves at the time of the blit, OpenGL
2861 * specification defines them as if they happen at the time of rendering,
2862 * which means that the type of averaging we do during the resolve should
2863 * only depend on the source format; the destination format should be
2864 * ignored. But, specification doesn't seem to be strict about it.
2866 * It has been observed that mulitisample resolves produce slightly better
2867 * looking images when averaging is done using destination format. NVIDIA's
2868 * proprietary OpenGL driver also follow this approach. So, we choose to
2869 * follow it in our driver.
2871 * When multisampling, if the source and destination formats are equal
2872 * (aside from the color space), we choose to blit in sRGB space to get
2873 * this higher quality image.
2875 if (params
.src
.num_samples
> 1 &&
2876 _mesa_get_format_color_encoding(dst_mt
->format
) == GL_SRGB
&&
2877 _mesa_get_srgb_format_linear(src_mt
->format
) ==
2878 _mesa_get_srgb_format_linear(dst_mt
->format
)) {
2879 assert(brw
->format_supported_as_render_target
[dst_mt
->format
]);
2880 params
.dst
.brw_surfaceformat
= brw
->render_target_format
[dst_mt
->format
];
2881 params
.src
.brw_surfaceformat
= brw_format_for_mesa_format(dst_mt
->format
);
2884 /* When doing a multisample resolve of a GL_LUMINANCE32F or GL_INTENSITY32F
2885 * texture, the above code configures the source format for L32_FLOAT or
2886 * I32_FLOAT, and the destination format for R32_FLOAT. On Sandy Bridge,
2887 * the SAMPLE message appears to handle multisampled L32_FLOAT and
2888 * I32_FLOAT textures incorrectly, resulting in blocky artifacts. So work
2889 * around the problem by using a source format of R32_FLOAT. This
2890 * shouldn't affect rendering correctness, since the destination format is
2891 * R32_FLOAT, so only the contents of the red channel matters.
2893 if (brw
->gen
== 6 &&
2894 params
.src
.num_samples
> 1 && params
.dst
.num_samples
<= 1 &&
2895 src_mt
->format
== dst_mt
->format
&&
2896 params
.dst
.brw_surfaceformat
== BRW_SURFACEFORMAT_R32_FLOAT
) {
2897 params
.src
.brw_surfaceformat
= params
.dst
.brw_surfaceformat
;
2900 struct brw_blorp_blit_prog_key wm_prog_key
;
2901 memset(&wm_prog_key
, 0, sizeof(wm_prog_key
));
2903 /* texture_data_type indicates the register type that should be used to
2904 * manipulate texture data.
2906 switch (_mesa_get_format_datatype(src_mt
->format
)) {
2907 case GL_UNSIGNED_NORMALIZED
:
2908 case GL_SIGNED_NORMALIZED
:
2910 wm_prog_key
.texture_data_type
= BRW_REGISTER_TYPE_F
;
2912 case GL_UNSIGNED_INT
:
2913 if (src_mt
->format
== MESA_FORMAT_S_UINT8
) {
2914 /* We process stencil as though it's an unsigned normalized color */
2915 wm_prog_key
.texture_data_type
= BRW_REGISTER_TYPE_F
;
2917 wm_prog_key
.texture_data_type
= BRW_REGISTER_TYPE_UD
;
2921 wm_prog_key
.texture_data_type
= BRW_REGISTER_TYPE_D
;
2924 unreachable("Unrecognized blorp format");
2928 /* Gen7's rendering hardware only supports the IMS layout for depth and
2929 * stencil render targets. Blorp always maps its destination surface as
2930 * a color render target (even if it's actually a depth or stencil
2931 * buffer). So if the destination is IMS, we'll have to map it as a
2932 * single-sampled texture and interleave the samples ourselves.
2934 if (dst_mt
->msaa_layout
== INTEL_MSAA_LAYOUT_IMS
)
2935 params
.dst
.num_samples
= 0;
2938 if (params
.dst
.map_stencil_as_y_tiled
&& params
.dst
.num_samples
> 1) {
2939 /* If the destination surface is a W-tiled multisampled stencil buffer
2940 * that we're mapping as Y tiled, then we need to arrange for the WM
2941 * program to run once per sample rather than once per pixel, because
2942 * the memory layout of related samples doesn't match between W and Y
2945 wm_prog_key
.persample_msaa_dispatch
= true;
2948 if (params
.src
.num_samples
> 0 && params
.dst
.num_samples
> 1) {
2949 /* We are blitting from a multisample buffer to a multisample buffer, so
2950 * we must preserve samples within a pixel. This means we have to
2951 * arrange for the WM program to run once per sample rather than once
2954 wm_prog_key
.persample_msaa_dispatch
= true;
2957 /* Scaled blitting or not. */
2958 wm_prog_key
.blit_scaled
=
2959 ((dst_x1
- dst_x0
) == (src_x1
- src_x0
) &&
2960 (dst_y1
- dst_y0
) == (src_y1
- src_y0
)) ? false : true;
2962 /* Scaling factors used for bilinear filtering in multisample scaled
2965 wm_prog_key
.x_scale
= 2.0f
;
2966 wm_prog_key
.y_scale
= src_mt
->num_samples
/ 2.0f
;
2968 if (filter
== GL_LINEAR
&&
2969 params
.src
.num_samples
<= 1 && params
.dst
.num_samples
<= 1)
2970 wm_prog_key
.bilinear_filter
= true;
2972 GLenum base_format
= _mesa_get_format_base_format(src_mt
->format
);
2973 if (base_format
!= GL_DEPTH_COMPONENT
&& /* TODO: what about depth/stencil? */
2974 base_format
!= GL_STENCIL_INDEX
&&
2975 !_mesa_is_format_integer(src_mt
->format
) &&
2976 src_mt
->num_samples
> 1 && dst_mt
->num_samples
<= 1) {
2977 /* We are downsampling a non-integer color buffer, so blend.
2979 * Regarding integer color buffers, the OpenGL ES 3.2 spec says:
2981 * "If the source formats are integer types or stencil values, a
2982 * single sample's value is selected for each pixel."
2984 * This implies we should not blend in that case.
2986 wm_prog_key
.blend
= true;
2989 /* src_samples and dst_samples are the true sample counts */
2990 wm_prog_key
.src_samples
= src_mt
->num_samples
;
2991 wm_prog_key
.dst_samples
= dst_mt
->num_samples
;
2993 /* tex_samples and rt_samples are the sample counts that are set up in
2996 wm_prog_key
.tex_samples
= params
.src
.num_samples
;
2997 wm_prog_key
.rt_samples
= params
.dst
.num_samples
;
2999 /* tex_layout and rt_layout indicate the MSAA layout the GPU pipeline will
3000 * use to access the source and destination surfaces.
3002 wm_prog_key
.tex_layout
=
3003 compute_msaa_layout_for_pipeline(brw
, params
.src
.num_samples
,
3004 params
.src
.msaa_layout
);
3005 wm_prog_key
.rt_layout
=
3006 compute_msaa_layout_for_pipeline(brw
, params
.dst
.num_samples
,
3007 params
.dst
.msaa_layout
);
3009 /* src_layout and dst_layout indicate the true MSAA layout used by src and
3012 wm_prog_key
.src_layout
= src_mt
->msaa_layout
;
3013 wm_prog_key
.dst_layout
= dst_mt
->msaa_layout
;
3015 /* On gen9+ compressed single sampled buffers carry the same layout type as
3016 * multisampled. The difference is that they can be sampled using normal
3017 * ld message and as render target behave just like non-compressed surface
3018 * from compiler point of view. Therefore override the type in the program
3021 if (brw
->gen
>= 9 && params
.src
.num_samples
<= 1 &&
3022 src_mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
)
3023 wm_prog_key
.src_layout
= INTEL_MSAA_LAYOUT_NONE
;
3024 if (brw
->gen
>= 9 && params
.dst
.num_samples
<= 1 &&
3025 dst_mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
)
3026 wm_prog_key
.dst_layout
= INTEL_MSAA_LAYOUT_NONE
;
3028 wm_prog_key
.src_tiled_w
= params
.src
.map_stencil_as_y_tiled
;
3029 wm_prog_key
.dst_tiled_w
= params
.dst
.map_stencil_as_y_tiled
;
3030 /* Round floating point values to nearest integer to avoid "off by one texel"
3031 * kind of errors when blitting.
3033 params
.x0
= params
.wm_push_consts
.dst_x0
= roundf(dst_x0
);
3034 params
.y0
= params
.wm_push_consts
.dst_y0
= roundf(dst_y0
);
3035 params
.x1
= params
.wm_push_consts
.dst_x1
= roundf(dst_x1
);
3036 params
.y1
= params
.wm_push_consts
.dst_y1
= roundf(dst_y1
);
3037 params
.wm_push_consts
.rect_grid_x1
=
3038 minify(src_mt
->logical_width0
, src_level
) * wm_prog_key
.x_scale
- 1.0f
;
3039 params
.wm_push_consts
.rect_grid_y1
=
3040 minify(src_mt
->logical_height0
, src_level
) * wm_prog_key
.y_scale
- 1.0f
;
3042 brw_blorp_setup_coord_transform(¶ms
.wm_push_consts
.x_transform
,
3043 src_x0
, src_x1
, dst_x0
, dst_x1
, mirror_x
);
3044 brw_blorp_setup_coord_transform(¶ms
.wm_push_consts
.y_transform
,
3045 src_y0
, src_y1
, dst_y0
, dst_y1
, mirror_y
);
3047 params
.wm_push_consts
.src_z
=
3048 params
.src
.mt
->target
== GL_TEXTURE_3D
? params
.src
.layer
: 0;
3050 if (params
.dst
.num_samples
<= 1 && dst_mt
->num_samples
> 1) {
3051 /* We must expand the rectangle we send through the rendering pipeline,
3052 * to account for the fact that we are mapping the destination region as
3053 * single-sampled when it is in fact multisampled. We must also align
3054 * it to a multiple of the multisampling pattern, because the
3055 * differences between multisampled and single-sampled surface formats
3056 * will mean that pixels are scrambled within the multisampling pattern.
3057 * TODO: what if this makes the coordinates too large?
3059 * Note: this only works if the destination surface uses the IMS layout.
3060 * If it's UMS, then we have no choice but to set up the rendering
3061 * pipeline as multisampled.
3063 assert(dst_mt
->msaa_layout
== INTEL_MSAA_LAYOUT_IMS
);
3064 switch (dst_mt
->num_samples
) {
3066 params
.x0
= ROUND_DOWN_TO(params
.x0
* 2, 4);
3067 params
.y0
= ROUND_DOWN_TO(params
.y0
, 4);
3068 params
.x1
= ALIGN(params
.x1
* 2, 4);
3069 params
.y1
= ALIGN(params
.y1
, 4);
3072 params
.x0
= ROUND_DOWN_TO(params
.x0
* 2, 4);
3073 params
.y0
= ROUND_DOWN_TO(params
.y0
* 2, 4);
3074 params
.x1
= ALIGN(params
.x1
* 2, 4);
3075 params
.y1
= ALIGN(params
.y1
* 2, 4);
3078 params
.x0
= ROUND_DOWN_TO(params
.x0
* 4, 8);
3079 params
.y0
= ROUND_DOWN_TO(params
.y0
* 2, 4);
3080 params
.x1
= ALIGN(params
.x1
* 4, 8);
3081 params
.y1
= ALIGN(params
.y1
* 2, 4);
3084 unreachable("Unrecognized sample count in brw_blorp_blit_params ctor");
3086 wm_prog_key
.use_kill
= true;
3089 if (params
.dst
.map_stencil_as_y_tiled
) {
3090 /* We must modify the rectangle we send through the rendering pipeline
3091 * (and the size and x/y offset of the destination surface), to account
3092 * for the fact that we are mapping it as Y-tiled when it is in fact
3095 * Both Y tiling and W tiling can be understood as organizations of
3096 * 32-byte sub-tiles; within each 32-byte sub-tile, the layout of pixels
3097 * is different, but the layout of the 32-byte sub-tiles within the 4k
3098 * tile is the same (8 sub-tiles across by 16 sub-tiles down, in
3099 * column-major order). In Y tiling, the sub-tiles are 16 bytes wide
3100 * and 2 rows high; in W tiling, they are 8 bytes wide and 4 rows high.
3102 * Therefore, to account for the layout differences within the 32-byte
3103 * sub-tiles, we must expand the rectangle so the X coordinates of its
3104 * edges are multiples of 8 (the W sub-tile width), and its Y
3105 * coordinates of its edges are multiples of 4 (the W sub-tile height).
3106 * Then we need to scale the X and Y coordinates of the rectangle to
3107 * account for the differences in aspect ratio between the Y and W
3108 * sub-tiles. We need to modify the layer width and height similarly.
3110 * A correction needs to be applied when MSAA is in use: since
3111 * INTEL_MSAA_LAYOUT_IMS uses an interleaving pattern whose height is 4,
3112 * we need to align the Y coordinates to multiples of 8, so that when
3113 * they are divided by two they are still multiples of 4.
3115 * Note: Since the x/y offset of the surface will be applied using the
3116 * SURFACE_STATE command packet, it will be invisible to the swizzling
3117 * code in the shader; therefore it needs to be in a multiple of the
3118 * 32-byte sub-tile size. Fortunately it is, since the sub-tile is 8
3119 * pixels wide and 4 pixels high (when viewed as a W-tiled stencil
3120 * buffer), and the miplevel alignment used for stencil buffers is 8
3121 * pixels horizontally and either 4 or 8 pixels vertically (see
3122 * intel_horizontal_texture_alignment_unit() and
3123 * intel_vertical_texture_alignment_unit()).
3125 * Note: Also, since the SURFACE_STATE command packet can only apply
3126 * offsets that are multiples of 4 pixels horizontally and 2 pixels
3127 * vertically, it is important that the offsets will be multiples of
3128 * these sizes after they are converted into Y-tiled coordinates.
3129 * Fortunately they will be, since we know from above that the offsets
3130 * are a multiple of the 32-byte sub-tile size, and in Y-tiled
3131 * coordinates the sub-tile is 16 pixels wide and 2 pixels high.
3133 * TODO: what if this makes the coordinates (or the texture size) too
3136 const unsigned x_align
= 8, y_align
= params
.dst
.num_samples
!= 0 ? 8 : 4;
3137 params
.x0
= ROUND_DOWN_TO(params
.x0
, x_align
) * 2;
3138 params
.y0
= ROUND_DOWN_TO(params
.y0
, y_align
) / 2;
3139 params
.x1
= ALIGN(params
.x1
, x_align
) * 2;
3140 params
.y1
= ALIGN(params
.y1
, y_align
) / 2;
3141 params
.dst
.width
= ALIGN(params
.dst
.width
, x_align
) * 2;
3142 params
.dst
.height
= ALIGN(params
.dst
.height
, y_align
) / 2;
3143 params
.dst
.x_offset
*= 2;
3144 params
.dst
.y_offset
/= 2;
3145 wm_prog_key
.use_kill
= true;
3148 if (params
.src
.map_stencil_as_y_tiled
) {
3149 /* We must modify the size and x/y offset of the source surface to
3150 * account for the fact that we are mapping it as Y-tiled when it is in
3153 * See the comments above concerning x/y offset alignment for the
3154 * destination surface.
3156 * TODO: what if this makes the texture size too large?
3158 const unsigned x_align
= 8, y_align
= params
.src
.num_samples
!= 0 ? 8 : 4;
3159 params
.src
.width
= ALIGN(params
.src
.width
, x_align
) * 2;
3160 params
.src
.height
= ALIGN(params
.src
.height
, y_align
) / 2;
3161 params
.src
.x_offset
*= 2;
3162 params
.src
.y_offset
/= 2;
3165 brw_blorp_get_blit_kernel(brw
, ¶ms
, &wm_prog_key
);
3167 params
.src
.swizzle
= src_swizzle
;
3169 brw_blorp_exec(brw
, ¶ms
);
3171 intel_miptree_slice_set_needs_hiz_resolve(dst_mt
, dst_level
, dst_layer
);
3173 if (intel_miptree_is_lossless_compressed(brw
, dst_mt
))
3174 dst_mt
->fast_clear_state
= INTEL_FAST_CLEAR_STATE_UNRESOLVED
;