}
static bool
-color_formats_match(gl_format src_format, gl_format dst_format)
+color_formats_match(mesa_format src_format, mesa_format dst_format)
{
- gl_format linear_src_format = _mesa_get_srgb_format_linear(src_format);
- gl_format linear_dst_format = _mesa_get_srgb_format_linear(dst_format);
+ mesa_format linear_src_format = _mesa_get_srgb_format_linear(src_format);
+ mesa_format linear_dst_format = _mesa_get_srgb_format_linear(dst_format);
/* Normally, we require the formats to be equal. However, we also support
* blitting from ARGB to XRGB (discarding alpha), and from XRGB to ARGB
* (overriding alpha to 1.0 via blending).
*/
return linear_src_format == linear_dst_format ||
- (linear_src_format == MESA_FORMAT_XRGB8888 &&
- linear_dst_format == MESA_FORMAT_ARGB8888) ||
- (linear_src_format == MESA_FORMAT_ARGB8888 &&
- linear_dst_format == MESA_FORMAT_XRGB8888);
+ (linear_src_format == MESA_FORMAT_B8G8R8X8_UNORM &&
+ linear_dst_format == MESA_FORMAT_B8G8R8A8_UNORM) ||
+ (linear_src_format == MESA_FORMAT_B8G8R8A8_UNORM &&
+ linear_dst_format == MESA_FORMAT_B8G8R8X8_UNORM);
}
static bool
* example MESA_FORMAT_X8_Z24 and MESA_FORMAT_S8_Z24), and we can blit
* between those formats.
*/
- gl_format src_format = find_miptree(buffer_bit, src_irb)->format;
- gl_format dst_format = find_miptree(buffer_bit, dst_irb)->format;
+ mesa_format src_format = find_miptree(buffer_bit, src_irb)->format;
+ mesa_format dst_format = find_miptree(buffer_bit, dst_irb)->format;
return color_formats_match(src_format, dst_format);
}
const sampler_message_arg *args, int num_args);
void render_target_write();
- void emit_lrp(const struct brw_reg &dst,
- const struct brw_reg &src1,
- const struct brw_reg &src2,
- const struct brw_reg &src3);
-
/**
* Base-2 logarithm of the maximum number of samples that can be blended.
*/
* Then, we need to add the repeating sequence (0, 1, 0, 1, ...) to the
* result, since pixels n+1 and n+3 are in the right half of the subspan.
*/
- brw_ADD(&func, vec16(retype(X, BRW_REGISTER_TYPE_UW)),
+ emit_add(vec16(retype(X, BRW_REGISTER_TYPE_UW)),
stride(suboffset(R1, 4), 2, 4, 0), brw_imm_v(0x10101010));
/* Similarly, Y coordinates for subspans come from R1.2[31:16] through
* And we need to add the repeating sequence (0, 0, 1, 1, ...), since
* pixels n+2 and n+3 are in the bottom half of the subspan.
*/
- brw_ADD(&func, vec16(retype(Y, BRW_REGISTER_TYPE_UW)),
+ emit_add(vec16(retype(Y, BRW_REGISTER_TYPE_UW)),
stride(suboffset(R1, 5), 2, 4, 0), brw_imm_v(0x11001100));
/* Move the coordinates to UD registers. */
struct brw_reg t1_ud1 = vec1(retype(t1, BRW_REGISTER_TYPE_UD));
struct brw_reg t2_uw1 = retype(t2, BRW_REGISTER_TYPE_UW);
struct brw_reg r0_ud1 = vec1(retype(R0, BRW_REGISTER_TYPE_UD));
- brw_AND(&func, t1_ud1, r0_ud1, brw_imm_ud(0xc0));
- brw_SHR(&func, t1_ud1, t1_ud1, brw_imm_ud(5));
+ emit_and(t1_ud1, r0_ud1, brw_imm_ud(0xc0));
+ emit_shr(t1_ud1, t1_ud1, brw_imm_ud(5));
emit_mov(vec16(t2_uw1), brw_imm_v(0x3210));
- brw_ADD(&func, vec16(S), retype(t1_ud1, BRW_REGISTER_TYPE_UW),
- stride(t2_uw1, 1, 4, 0));
- brw_set_compression_control(&func, BRW_COMPRESSION_NONE);
- brw_ADD(&func, offset(S, 1),
- retype(t1_ud1, BRW_REGISTER_TYPE_UW),
- suboffset(stride(t2_uw1, 1, 4, 0), 2));
- brw_set_compression_control(&func, BRW_COMPRESSION_COMPRESSED);
+ emit_add(vec16(S), retype(t1_ud1, BRW_REGISTER_TYPE_UW),
+ stride(t2_uw1, 1, 4, 0));
+ emit_add_8(offset(S, 1),
+ retype(t1_ud1, BRW_REGISTER_TYPE_UW),
+ suboffset(stride(t2_uw1, 1, 4, 0), 2));
break;
}
default:
* X' = (X & ~0b1011) >> 1 | (Y & 0b1) << 2 | X & 0b1 (4)
* Y' = (Y & ~0b1) << 1 | (X & 0b1000) >> 2 | (X & 0b10) >> 1
*/
- brw_AND(&func, t1, X, brw_imm_uw(0xfff4)); /* X & ~0b1011 */
- brw_SHR(&func, t1, t1, brw_imm_uw(1)); /* (X & ~0b1011) >> 1 */
- brw_AND(&func, t2, Y, brw_imm_uw(1)); /* Y & 0b1 */
- brw_SHL(&func, t2, t2, brw_imm_uw(2)); /* (Y & 0b1) << 2 */
- brw_OR(&func, t1, t1, t2); /* (X & ~0b1011) >> 1 | (Y & 0b1) << 2 */
- brw_AND(&func, t2, X, brw_imm_uw(1)); /* X & 0b1 */
- brw_OR(&func, Xp, t1, t2);
- brw_AND(&func, t1, Y, brw_imm_uw(0xfffe)); /* Y & ~0b1 */
- brw_SHL(&func, t1, t1, brw_imm_uw(1)); /* (Y & ~0b1) << 1 */
- brw_AND(&func, t2, X, brw_imm_uw(8)); /* X & 0b1000 */
- brw_SHR(&func, t2, t2, brw_imm_uw(2)); /* (X & 0b1000) >> 2 */
- brw_OR(&func, t1, t1, t2); /* (Y & ~0b1) << 1 | (X & 0b1000) >> 2 */
- brw_AND(&func, t2, X, brw_imm_uw(2)); /* X & 0b10 */
- brw_SHR(&func, t2, t2, brw_imm_uw(1)); /* (X & 0b10) >> 1 */
- brw_OR(&func, Yp, t1, t2);
+ emit_and(t1, X, brw_imm_uw(0xfff4)); /* X & ~0b1011 */
+ emit_shr(t1, t1, brw_imm_uw(1)); /* (X & ~0b1011) >> 1 */
+ emit_and(t2, Y, brw_imm_uw(1)); /* Y & 0b1 */
+ emit_shl(t2, t2, brw_imm_uw(2)); /* (Y & 0b1) << 2 */
+ emit_or(t1, t1, t2); /* (X & ~0b1011) >> 1 | (Y & 0b1) << 2 */
+ emit_and(t2, X, brw_imm_uw(1)); /* X & 0b1 */
+ emit_or(Xp, t1, t2);
+ emit_and(t1, Y, brw_imm_uw(0xfffe)); /* Y & ~0b1 */
+ emit_shl(t1, t1, brw_imm_uw(1)); /* (Y & ~0b1) << 1 */
+ emit_and(t2, X, brw_imm_uw(8)); /* X & 0b1000 */
+ emit_shr(t2, t2, brw_imm_uw(2)); /* (X & 0b1000) >> 2 */
+ emit_or(t1, t1, t2); /* (Y & ~0b1) << 1 | (X & 0b1000) >> 2 */
+ emit_and(t2, X, brw_imm_uw(2)); /* X & 0b10 */
+ emit_shr(t2, t2, brw_imm_uw(1)); /* (X & 0b10) >> 1 */
+ emit_or(Yp, t1, t2);
SWAP_XY_AND_XPYP();
} else {
/* Applying the same logic as above, but in reverse, we obtain the
* X' = (X & ~0b101) << 1 | (Y & 0b10) << 2 | (Y & 0b1) << 1 | X & 0b1
* Y' = (Y & ~0b11) >> 1 | (X & 0b100) >> 2
*/
- brw_AND(&func, t1, X, brw_imm_uw(0xfffa)); /* X & ~0b101 */
- brw_SHL(&func, t1, t1, brw_imm_uw(1)); /* (X & ~0b101) << 1 */
- brw_AND(&func, t2, Y, brw_imm_uw(2)); /* Y & 0b10 */
- brw_SHL(&func, t2, t2, brw_imm_uw(2)); /* (Y & 0b10) << 2 */
- brw_OR(&func, t1, t1, t2); /* (X & ~0b101) << 1 | (Y & 0b10) << 2 */
- brw_AND(&func, t2, Y, brw_imm_uw(1)); /* Y & 0b1 */
- brw_SHL(&func, t2, t2, brw_imm_uw(1)); /* (Y & 0b1) << 1 */
- brw_OR(&func, t1, t1, t2); /* (X & ~0b101) << 1 | (Y & 0b10) << 2
+ emit_and(t1, X, brw_imm_uw(0xfffa)); /* X & ~0b101 */
+ emit_shl(t1, t1, brw_imm_uw(1)); /* (X & ~0b101) << 1 */
+ emit_and(t2, Y, brw_imm_uw(2)); /* Y & 0b10 */
+ emit_shl(t2, t2, brw_imm_uw(2)); /* (Y & 0b10) << 2 */
+ emit_or(t1, t1, t2); /* (X & ~0b101) << 1 | (Y & 0b10) << 2 */
+ emit_and(t2, Y, brw_imm_uw(1)); /* Y & 0b1 */
+ emit_shl(t2, t2, brw_imm_uw(1)); /* (Y & 0b1) << 1 */
+ emit_or(t1, t1, t2); /* (X & ~0b101) << 1 | (Y & 0b10) << 2
| (Y & 0b1) << 1 */
- brw_AND(&func, t2, X, brw_imm_uw(1)); /* X & 0b1 */
- brw_OR(&func, Xp, t1, t2);
- brw_AND(&func, t1, Y, brw_imm_uw(0xfffc)); /* Y & ~0b11 */
- brw_SHR(&func, t1, t1, brw_imm_uw(1)); /* (Y & ~0b11) >> 1 */
- brw_AND(&func, t2, X, brw_imm_uw(4)); /* X & 0b100 */
- brw_SHR(&func, t2, t2, brw_imm_uw(2)); /* (X & 0b100) >> 2 */
- brw_OR(&func, Yp, t1, t2);
+ emit_and(t2, X, brw_imm_uw(1)); /* X & 0b1 */
+ emit_or(Xp, t1, t2);
+ emit_and(t1, Y, brw_imm_uw(0xfffc)); /* Y & ~0b11 */
+ emit_shr(t1, t1, brw_imm_uw(1)); /* (Y & ~0b11) >> 1 */
+ emit_and(t2, X, brw_imm_uw(4)); /* X & 0b100 */
+ emit_shr(t2, t2, brw_imm_uw(2)); /* (X & 0b100) >> 2 */
+ emit_or(Yp, t1, t2);
SWAP_XY_AND_XPYP();
}
}
* where X' = (X & ~0b1) << 1 | (S & 0b1) << 1 | (X & 0b1)
* Y' = (Y & ~0b1) << 1 | (S & 0b10) | (Y & 0b1)
*/
- brw_AND(&func, t1, X, brw_imm_uw(0xfffe)); /* X & ~0b1 */
+ emit_and(t1, X, brw_imm_uw(0xfffe)); /* X & ~0b1 */
if (!s_is_zero) {
- brw_AND(&func, t2, S, brw_imm_uw(1)); /* S & 0b1 */
- brw_OR(&func, t1, t1, t2); /* (X & ~0b1) | (S & 0b1) */
+ emit_and(t2, S, brw_imm_uw(1)); /* S & 0b1 */
+ emit_or(t1, t1, t2); /* (X & ~0b1) | (S & 0b1) */
}
- brw_SHL(&func, t1, t1, brw_imm_uw(1)); /* (X & ~0b1) << 1
+ emit_shl(t1, t1, brw_imm_uw(1)); /* (X & ~0b1) << 1
| (S & 0b1) << 1 */
- brw_AND(&func, t2, X, brw_imm_uw(1)); /* X & 0b1 */
- brw_OR(&func, Xp, t1, t2);
- brw_AND(&func, t1, Y, brw_imm_uw(0xfffe)); /* Y & ~0b1 */
- brw_SHL(&func, t1, t1, brw_imm_uw(1)); /* (Y & ~0b1) << 1 */
+ emit_and(t2, X, brw_imm_uw(1)); /* X & 0b1 */
+ emit_or(Xp, t1, t2);
+ emit_and(t1, Y, brw_imm_uw(0xfffe)); /* Y & ~0b1 */
+ emit_shl(t1, t1, brw_imm_uw(1)); /* (Y & ~0b1) << 1 */
if (!s_is_zero) {
- brw_AND(&func, t2, S, brw_imm_uw(2)); /* S & 0b10 */
- brw_OR(&func, t1, t1, t2); /* (Y & ~0b1) << 1 | (S & 0b10) */
+ emit_and(t2, S, brw_imm_uw(2)); /* S & 0b10 */
+ emit_or(t1, t1, t2); /* (Y & ~0b1) << 1 | (S & 0b10) */
}
- brw_AND(&func, t2, Y, brw_imm_uw(1)); /* Y & 0b1 */
- brw_OR(&func, Yp, t1, t2);
+ emit_and(t2, Y, brw_imm_uw(1)); /* Y & 0b1 */
+ emit_or(Yp, t1, t2);
break;
case 8:
/* encode_msaa(8, IMS, X, Y, S) = (X', Y', 0)
* | (X & 0b1)
* Y' = (Y & ~0b1) << 1 | (S & 0b10) | (Y & 0b1)
*/
- brw_AND(&func, t1, X, brw_imm_uw(0xfffe)); /* X & ~0b1 */
- brw_SHL(&func, t1, t1, brw_imm_uw(2)); /* (X & ~0b1) << 2 */
+ emit_and(t1, X, brw_imm_uw(0xfffe)); /* X & ~0b1 */
+ emit_shl(t1, t1, brw_imm_uw(2)); /* (X & ~0b1) << 2 */
if (!s_is_zero) {
- brw_AND(&func, t2, S, brw_imm_uw(4)); /* S & 0b100 */
- brw_OR(&func, t1, t1, t2); /* (X & ~0b1) << 2 | (S & 0b100) */
- brw_AND(&func, t2, S, brw_imm_uw(1)); /* S & 0b1 */
- brw_SHL(&func, t2, t2, brw_imm_uw(1)); /* (S & 0b1) << 1 */
- brw_OR(&func, t1, t1, t2); /* (X & ~0b1) << 2 | (S & 0b100)
+ emit_and(t2, S, brw_imm_uw(4)); /* S & 0b100 */
+ emit_or(t1, t1, t2); /* (X & ~0b1) << 2 | (S & 0b100) */
+ emit_and(t2, S, brw_imm_uw(1)); /* S & 0b1 */
+ emit_shl(t2, t2, brw_imm_uw(1)); /* (S & 0b1) << 1 */
+ emit_or(t1, t1, t2); /* (X & ~0b1) << 2 | (S & 0b100)
| (S & 0b1) << 1 */
}
- brw_AND(&func, t2, X, brw_imm_uw(1)); /* X & 0b1 */
- brw_OR(&func, Xp, t1, t2);
- brw_AND(&func, t1, Y, brw_imm_uw(0xfffe)); /* Y & ~0b1 */
- brw_SHL(&func, t1, t1, brw_imm_uw(1)); /* (Y & ~0b1) << 1 */
+ emit_and(t2, X, brw_imm_uw(1)); /* X & 0b1 */
+ emit_or(Xp, t1, t2);
+ emit_and(t1, Y, brw_imm_uw(0xfffe)); /* Y & ~0b1 */
+ emit_shl(t1, t1, brw_imm_uw(1)); /* (Y & ~0b1) << 1 */
if (!s_is_zero) {
- brw_AND(&func, t2, S, brw_imm_uw(2)); /* S & 0b10 */
- brw_OR(&func, t1, t1, t2); /* (Y & ~0b1) << 1 | (S & 0b10) */
+ emit_and(t2, S, brw_imm_uw(2)); /* S & 0b10 */
+ emit_or(t1, t1, t2); /* (Y & ~0b1) << 1 | (S & 0b10) */
}
- brw_AND(&func, t2, Y, brw_imm_uw(1)); /* Y & 0b1 */
- brw_OR(&func, Yp, t1, t2);
+ emit_and(t2, Y, brw_imm_uw(1)); /* Y & 0b1 */
+ emit_or(Yp, t1, t2);
break;
}
SWAP_XY_AND_XPYP();
* Y' = (Y & ~0b11) >> 1 | (Y & 0b1)
* S = (Y & 0b10) | (X & 0b10) >> 1
*/
- brw_AND(&func, t1, X, brw_imm_uw(0xfffc)); /* X & ~0b11 */
- brw_SHR(&func, t1, t1, brw_imm_uw(1)); /* (X & ~0b11) >> 1 */
- brw_AND(&func, t2, X, brw_imm_uw(1)); /* X & 0b1 */
- brw_OR(&func, Xp, t1, t2);
- brw_AND(&func, t1, Y, brw_imm_uw(0xfffc)); /* Y & ~0b11 */
- brw_SHR(&func, t1, t1, brw_imm_uw(1)); /* (Y & ~0b11) >> 1 */
- brw_AND(&func, t2, Y, brw_imm_uw(1)); /* Y & 0b1 */
- brw_OR(&func, Yp, t1, t2);
- brw_AND(&func, t1, Y, brw_imm_uw(2)); /* Y & 0b10 */
- brw_AND(&func, t2, X, brw_imm_uw(2)); /* X & 0b10 */
- brw_SHR(&func, t2, t2, brw_imm_uw(1)); /* (X & 0b10) >> 1 */
- brw_OR(&func, S, t1, t2);
+ emit_and(t1, X, brw_imm_uw(0xfffc)); /* X & ~0b11 */
+ emit_shr(t1, t1, brw_imm_uw(1)); /* (X & ~0b11) >> 1 */
+ emit_and(t2, X, brw_imm_uw(1)); /* X & 0b1 */
+ emit_or(Xp, t1, t2);
+ emit_and(t1, Y, brw_imm_uw(0xfffc)); /* Y & ~0b11 */
+ emit_shr(t1, t1, brw_imm_uw(1)); /* (Y & ~0b11) >> 1 */
+ emit_and(t2, Y, brw_imm_uw(1)); /* Y & 0b1 */
+ emit_or(Yp, t1, t2);
+ emit_and(t1, Y, brw_imm_uw(2)); /* Y & 0b10 */
+ emit_and(t2, X, brw_imm_uw(2)); /* X & 0b10 */
+ emit_shr(t2, t2, brw_imm_uw(1)); /* (X & 0b10) >> 1 */
+ emit_or(S, t1, t2);
break;
case 8:
/* decode_msaa(8, IMS, X, Y, 0) = (X', Y', S)
* Y' = (Y & ~0b11) >> 1 | (Y & 0b1)
* S = (X & 0b100) | (Y & 0b10) | (X & 0b10) >> 1
*/
- brw_AND(&func, t1, X, brw_imm_uw(0xfff8)); /* X & ~0b111 */
- brw_SHR(&func, t1, t1, brw_imm_uw(2)); /* (X & ~0b111) >> 2 */
- brw_AND(&func, t2, X, brw_imm_uw(1)); /* X & 0b1 */
- brw_OR(&func, Xp, t1, t2);
- brw_AND(&func, t1, Y, brw_imm_uw(0xfffc)); /* Y & ~0b11 */
- brw_SHR(&func, t1, t1, brw_imm_uw(1)); /* (Y & ~0b11) >> 1 */
- brw_AND(&func, t2, Y, brw_imm_uw(1)); /* Y & 0b1 */
- brw_OR(&func, Yp, t1, t2);
- brw_AND(&func, t1, X, brw_imm_uw(4)); /* X & 0b100 */
- brw_AND(&func, t2, Y, brw_imm_uw(2)); /* Y & 0b10 */
- brw_OR(&func, t1, t1, t2); /* (X & 0b100) | (Y & 0b10) */
- brw_AND(&func, t2, X, brw_imm_uw(2)); /* X & 0b10 */
- brw_SHR(&func, t2, t2, brw_imm_uw(1)); /* (X & 0b10) >> 1 */
- brw_OR(&func, S, t1, t2);
+ emit_and(t1, X, brw_imm_uw(0xfff8)); /* X & ~0b111 */
+ emit_shr(t1, t1, brw_imm_uw(2)); /* (X & ~0b111) >> 2 */
+ emit_and(t2, X, brw_imm_uw(1)); /* X & 0b1 */
+ emit_or(Xp, t1, t2);
+ emit_and(t1, Y, brw_imm_uw(0xfffc)); /* Y & ~0b11 */
+ emit_shr(t1, t1, brw_imm_uw(1)); /* (Y & ~0b11) >> 1 */
+ emit_and(t2, Y, brw_imm_uw(1)); /* Y & 0b1 */
+ emit_or(Yp, t1, t2);
+ emit_and(t1, X, brw_imm_uw(4)); /* X & 0b100 */
+ emit_and(t2, Y, brw_imm_uw(2)); /* Y & 0b10 */
+ emit_or(t1, t1, t2); /* (X & 0b100) | (Y & 0b10) */
+ emit_and(t2, X, brw_imm_uw(2)); /* X & 0b10 */
+ emit_shr(t2, t2, brw_imm_uw(1)); /* (X & 0b10) >> 1 */
+ emit_or(S, t1, t2);
break;
}
s_is_zero = false;
emit_mov(Xp_f, X);
emit_mov(Yp_f, Y);
/* Scale and offset */
- brw_MUL(&func, X_f, Xp_f, x_transform.multiplier);
- brw_MUL(&func, Y_f, Yp_f, y_transform.multiplier);
- brw_ADD(&func, X_f, X_f, x_transform.offset);
- brw_ADD(&func, Y_f, Y_f, y_transform.offset);
+ emit_mul(X_f, Xp_f, x_transform.multiplier);
+ emit_mul(Y_f, Yp_f, y_transform.multiplier);
+ emit_add(X_f, X_f, x_transform.offset);
+ emit_add(Y_f, Y_f, y_transform.offset);
if (key->blit_scaled && key->blend) {
/* Translate coordinates to lay out the samples in a rectangular grid
* roughly corresponding to sample locations.
*/
- brw_MUL(&func, X_f, X_f, brw_imm_f(key->x_scale));
- brw_MUL(&func, Y_f, Y_f, brw_imm_f(key->y_scale));
+ emit_mul(X_f, X_f, brw_imm_f(key->x_scale));
+ emit_mul(Y_f, Y_f, brw_imm_f(key->y_scale));
/* Adjust coordinates so that integers represent pixel centers rather
* than pixel edges.
*/
- brw_ADD(&func, X_f, X_f, brw_imm_f(-0.5));
- brw_ADD(&func, Y_f, Y_f, brw_imm_f(-0.5));
+ emit_add(X_f, X_f, brw_imm_f(-0.5));
+ emit_add(Y_f, Y_f, brw_imm_f(-0.5));
/* Clamp the X, Y texture coordinates to properly handle the sampling of
* texels on texture edges.
/* Store the fractional parts to be used as bilinear interpolation
* coefficients.
*/
- brw_FRC(&func, x_frac, X_f);
- brw_FRC(&func, y_frac, Y_f);
+ emit_frc(x_frac, X_f);
+ emit_frc(y_frac, Y_f);
/* Round the float coordinates down to nearest integer */
- brw_RNDD(&func, Xp_f, X_f);
- brw_RNDD(&func, Yp_f, Y_f);
- brw_MUL(&func, X_f, Xp_f, brw_imm_f(1 / key->x_scale));
- brw_MUL(&func, Y_f, Yp_f, brw_imm_f(1 / key->y_scale));
+ emit_rndd(Xp_f, X_f);
+ emit_rndd(Yp_f, Y_f);
+ emit_mul(X_f, Xp_f, brw_imm_f(1 / key->x_scale));
+ emit_mul(Y_f, Yp_f, brw_imm_f(1 / key->y_scale));
SWAP_XY_AND_XPYP();
} else if (!key->bilinear_filter) {
/* Round the float coordinates down to nearest integer by moving to
* that maxe up a pixel). So we need to multiply our X and Y coordinates
* each by 2 and then add 1.
*/
- brw_SHL(&func, t1, X, brw_imm_w(1));
- brw_SHL(&func, t2, Y, brw_imm_w(1));
- brw_ADD(&func, Xp, t1, brw_imm_w(1));
- brw_ADD(&func, Yp, t2, brw_imm_w(1));
+ emit_shl(t1, X, brw_imm_w(1));
+ emit_shl(t2, Y, brw_imm_w(1));
+ emit_add(Xp, t1, brw_imm_w(1));
+ emit_add(Yp, t2, brw_imm_w(1));
SWAP_XY_AND_XPYP();
}
* Since we have already sampled from sample 0, all we need to do is
* skip the remaining fetches and averaging if MCS is zero.
*/
- brw_CMP(&func, vec16(brw_null_reg()), BRW_CONDITIONAL_NZ,
- mcs_data, brw_imm_ud(0));
- brw_IF(&func, BRW_EXECUTE_16);
+ emit_cmp_if(BRW_CONDITIONAL_NZ, mcs_data, brw_imm_ud(0));
}
/* Do count_trailing_one_bits(i) times */
/* Scale the result down by a factor of num_samples */
/* TODO: should use a smaller loop bound for non-RGBA formats */
for (int j = 0; j < 4; ++j) {
- brw_MUL(&func, offset(texture_data[0], 2*j),
+ emit_mul(offset(texture_data[0], 2*j),
offset(vec8(texture_data[0]), 2*j),
brw_imm_f(1.0/num_samples));
}
}
if (key->tex_layout == INTEL_MSAA_LAYOUT_CMS)
- brw_ENDIF(&func);
-}
-
-void
-brw_blorp_blit_program::emit_lrp(const struct brw_reg &dst,
- const struct brw_reg &src1,
- const struct brw_reg &src2,
- const struct brw_reg &src3)
-{
- brw_set_access_mode(&func, BRW_ALIGN_16);
- brw_set_compression_control(&func, BRW_COMPRESSION_NONE);
- brw_LRP(&func, dst, src1, src2, src3);
- brw_set_compression_control(&func, BRW_COMPRESSION_2NDHALF);
- brw_LRP(&func, sechalf(dst), sechalf(src1), sechalf(src2), sechalf(src3));
- brw_set_compression_control(&func, BRW_COMPRESSION_COMPRESSED);
- brw_set_access_mode(&func, BRW_ALIGN_1);
+ emit_endif();
}
void
s_is_zero = false;
/* Compute pixel coordinates */
- brw_ADD(&func, vec16(x_sample_coords), Xp_f,
+ emit_add(vec16(x_sample_coords), Xp_f,
brw_imm_f((float)(i & 0x1) * (1.0 / key->x_scale)));
- brw_ADD(&func, vec16(y_sample_coords), Yp_f,
+ emit_add(vec16(y_sample_coords), Yp_f,
brw_imm_f((float)((i >> 1) & 0x1) * (1.0 / key->y_scale)));
emit_mov(vec16(X), x_sample_coords);
emit_mov(vec16(Y), y_sample_coords);
* | 6 | 7 | | 7 | 1 |
* --------- ---------
*/
- brw_FRC(&func, vec16(t1_f), x_sample_coords);
- brw_FRC(&func, vec16(t2_f), y_sample_coords);
- brw_MUL(&func, vec16(t1_f), t1_f, brw_imm_f(key->x_scale));
- brw_MUL(&func, vec16(t2_f), t2_f, brw_imm_f(key->x_scale * key->y_scale));
- brw_ADD(&func, vec16(t1_f), t1_f, t2_f);
+ emit_frc(vec16(t1_f), x_sample_coords);
+ emit_frc(vec16(t2_f), y_sample_coords);
+ emit_mul(vec16(t1_f), t1_f, brw_imm_f(key->x_scale));
+ emit_mul(vec16(t2_f), t2_f, brw_imm_f(key->x_scale * key->y_scale));
+ emit_add(vec16(t1_f), t1_f, t2_f);
emit_mov(vec16(S), t1_f);
if (num_samples == 8) {
/* Map the sample index to a sample number */
- brw_CMP(&func, vec16(brw_null_reg()), BRW_CONDITIONAL_L,
- S, brw_imm_d(4));
- brw_IF(&func, BRW_EXECUTE_16);
+ emit_cmp_if(BRW_CONDITIONAL_L, S, brw_imm_d(4));
{
emit_mov(vec16(t2), brw_imm_d(5));
emit_if_eq_mov(S, 1, vec16(t2), 2);
emit_if_eq_mov(S, 2, vec16(t2), 4);
emit_if_eq_mov(S, 3, vec16(t2), 6);
}
- brw_ELSE(&func);
+ emit_else();
{
emit_mov(vec16(t2), brw_imm_d(0));
emit_if_eq_mov(S, 5, vec16(t2), 3);
emit_if_eq_mov(S, 6, vec16(t2), 7);
emit_if_eq_mov(S, 7, vec16(t2), 1);
}
- brw_ENDIF(&func);
+ emit_endif();
emit_mov(vec16(S), t2);
}
texel_fetch(texture_data[i]);
wm_prog_key.texture_data_type = BRW_REGISTER_TYPE_F;
break;
case GL_UNSIGNED_INT:
- if (src_mt->format == MESA_FORMAT_S8) {
+ if (src_mt->format == MESA_FORMAT_S_UINT8) {
/* We process stencil as though it's an unsigned normalized color */
wm_prog_key.texture_data_type = BRW_REGISTER_TYPE_F;
} else {