float src_x1, float src_y1,
float dst_x0, float dst_y0,
float dst_x1, float dst_y1,
- bool mirror_x, bool mirror_y)
+ GLenum filter, bool mirror_x, bool mirror_y)
{
/* Get ready to blit. This includes depth resolving the src and dst
* buffers if necessary. Note: it's not necessary to do a color resolve on
src_x1, src_y1,
dst_x0, dst_y0,
dst_x1, dst_y1,
- mirror_x, mirror_y);
+ filter, mirror_x, mirror_y);
brw_blorp_exec(brw, ¶ms);
intel_miptree_slice_set_needs_hiz_resolve(dst_mt, dst_level, dst_layer);
struct intel_renderbuffer *dst_irb,
GLfloat srcX0, GLfloat srcY0, GLfloat srcX1, GLfloat srcY1,
GLfloat dstX0, GLfloat dstY0, GLfloat dstX1, GLfloat dstY1,
- bool mirror_x, bool mirror_y)
+ GLenum filter, bool mirror_x, bool mirror_y)
{
/* Find source/dst miptrees */
struct intel_mipmap_tree *src_mt = find_miptree(buffer_bit, src_irb);
dst_mt, dst_irb->mt_level, dst_irb->mt_layer,
srcX0, srcY0, srcX1, srcY1,
dstX0, dstY0, dstX1, dstY1,
- mirror_x, mirror_y);
+ filter, mirror_x, mirror_y);
intel_renderbuffer_set_needs_downsample(dst_irb);
}
fixup_mirroring(mirror_y, srcY0, srcY1);
fixup_mirroring(mirror_y, dstY0, dstY1);
- /* Linear filtering is not yet implemented in blorp engine. So, fallback
- * to other blit paths.
- */
- if ((srcX1 - srcX0 != dstX1 - dstX0 ||
- srcY1 - srcY0 != dstY1 - dstY0) &&
- filter == GL_LINEAR)
- return false;
-
/* If the destination rectangle needs to be clipped or scissored, do so.
*/
if (!(clip_or_scissor(mirror_x, srcX0, srcX1, dstX0, dstX1,
if (dst_irb)
do_blorp_blit(brw, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
srcX1, srcY1, dstX0, dstY0, dstX1, dstY1,
- mirror_x, mirror_y);
+ filter, mirror_x, mirror_y);
}
break;
case GL_DEPTH_BUFFER_BIT:
return false;
do_blorp_blit(brw, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
srcX1, srcY1, dstX0, dstY0, dstX1, dstY1,
- mirror_x, mirror_y);
+ filter, mirror_x, mirror_y);
break;
case GL_STENCIL_BUFFER_BIT:
src_irb =
return false;
do_blorp_blit(brw, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
srcX1, srcY1, dstX0, dstY0, dstX1, dstY1,
- mirror_x, mirror_y);
+ filter, mirror_x, mirror_y);
break;
default:
assert(false);
dst_mt, dst_image->Level, dst_image->Face + slice,
srcX0, srcY0, srcX1, srcY1,
dstX0, dstY0, dstX1, dstY1,
- false, mirror_y);
+ GL_NEAREST, false, mirror_y);
/* If we're copying to a packed depth stencil texture and the source
* framebuffer has separate stencil, we need to also copy the stencil data
dst_image->Face + slice,
srcX0, srcY0, srcX1, srcY1,
dstX0, dstY0, dstX1, dstY1,
- false, mirror_y);
+ GL_NEAREST, false, mirror_y);
}
}
* the same as the configuration of the texture, then we need to adjust
* the coordinates to compensate for the difference.
*/
- if (tex_tiled_w != key->src_tiled_w ||
- key->tex_samples != key->src_samples ||
- key->tex_layout != key->src_layout) {
+ if ((tex_tiled_w != key->src_tiled_w ||
+ key->tex_samples != key->src_samples ||
+ key->tex_layout != key->src_layout) &&
+ !key->bilinear_filter) {
encode_msaa(key->src_samples, key->src_layout);
/* Now (X, Y, S) = detile(src_tiling, offset) */
translate_tiling(key->src_tiled_w, tex_tiled_w);
decode_msaa(key->tex_samples, key->tex_layout);
}
- /* Now (X, Y, S) = decode_msaa(tex_samples, detile(tex_tiling, offset)).
- *
- * In other words: X, Y, and S now contain values which, when passed to
- * the texturing unit, will cause data to be read from the correct
- * memory location. So we can fetch the texel now.
- */
- if (key->tex_layout == INTEL_MSAA_LAYOUT_CMS)
- mcs_fetch();
- texel_fetch(texture_data[0]);
+ if (key->bilinear_filter) {
+ sample(texture_data[0]);
+ }
+ else {
+ /* Now (X, Y, S) = decode_msaa(tex_samples, detile(tex_tiling, offset)).
+ *
+ * In other words: X, Y, and S now contain values which, when passed to
+ * the texturing unit, will cause data to be read from the correct
+ * memory location. So we can fetch the texel now.
+ */
+ if (key->tex_layout == INTEL_MSAA_LAYOUT_CMS)
+ mcs_fetch();
+ texel_fetch(texture_data[0]);
+ }
}
/* Finally, write the fetched (or blended) value to the render target and
brw_RNDD(&func, Yp_f, Y_f);
brw_MUL(&func, X_f, Xp_f, brw_imm_f(1 / key->x_scale));
brw_MUL(&func, Y_f, Yp_f, brw_imm_f(1 / key->y_scale));
- } else {
+ SWAP_XY_AND_XPYP();
+ } else if (!key->bilinear_filter) {
/* Round the float coordinates down to nearest integer by moving to
* UD registers.
*/
brw_MOV(&func, Xp, X_f);
brw_MOV(&func, Yp, Y_f);
+ SWAP_XY_AND_XPYP();
}
- SWAP_XY_AND_XPYP();
brw_set_compression_control(&func, BRW_COMPRESSION_NONE);
}
for (int arg = 0; arg < num_args; ++arg) {
switch (args[arg]) {
case SAMPLER_MESSAGE_ARG_U_FLOAT:
- brw_MOV(&func, retype(mrf, BRW_REGISTER_TYPE_F), X);
+ if (key->bilinear_filter)
+ brw_MOV(&func, retype(mrf, BRW_REGISTER_TYPE_F),
+ retype(X, BRW_REGISTER_TYPE_F));
+ else
+ brw_MOV(&func, retype(mrf, BRW_REGISTER_TYPE_F), X);
break;
case SAMPLER_MESSAGE_ARG_V_FLOAT:
- brw_MOV(&func, retype(mrf, BRW_REGISTER_TYPE_F), Y);
+ if (key->bilinear_filter)
+ brw_MOV(&func, retype(mrf, BRW_REGISTER_TYPE_F),
+ retype(Y, BRW_REGISTER_TYPE_F));
+ else
+ brw_MOV(&func, retype(mrf, BRW_REGISTER_TYPE_F), Y);
break;
case SAMPLER_MESSAGE_ARG_U_INT:
brw_MOV(&func, mrf, X);
GLfloat src_x1, GLfloat src_y1,
GLfloat dst_x0, GLfloat dst_y0,
GLfloat dst_x1, GLfloat dst_y1,
+ GLenum filter,
bool mirror_x, bool mirror_y)
{
struct gl_context *ctx = &brw->ctx;
src.set(brw, src_mt, src_level, src_layer);
dst.set(brw, dst_mt, dst_level, dst_layer);
- src.brw_surfaceformat = dst.brw_surfaceformat;
+ /* Even though we do multisample resolves at the time of the blit, OpenGL
+ * specification defines them as if they happen at the time of rendering,
+ * which means that the type of averaging we do during the resolve should
+ * only depend on the source format; the destination format should be
+ * ignored. But, specification doesn't seem to be strict about it.
+ *
+ * It has been observed that mulitisample resolves produce slightly better
+ * looking images when averaging is done using destination format. NVIDIA's
+ * proprietary OpenGL driver also follow this approach. So, we choose to
+ * follow it in our driver.
+ *
+ * Following if-else block takes care of this exception made for
+ * multisampled resolves.
+ */
+ if (src.num_samples > 1)
+ src.brw_surfaceformat = dst.brw_surfaceformat;
+ else
+ dst.brw_surfaceformat = src.brw_surfaceformat;
use_wm_prog = true;
memset(&wm_prog_key, 0, sizeof(wm_prog_key));
wm_prog_key.x_scale = 2.0;
wm_prog_key.y_scale = src_mt->num_samples / 2.0;
+ if (filter == GL_LINEAR)
+ wm_prog_key.bilinear_filter = true;
+
/* The render path must be configured to use the same number of samples as
* the destination buffer.
*/