PIPE_VIDEO_CAP_NPOT_TEXTURES
);
- buffer_width = pot_buffers ? util_next_power_of_two(width) : align(width, MACROBLOCK_WIDTH);
- buffer_height = pot_buffers ? util_next_power_of_two(height) : align(height, MACROBLOCK_HEIGHT);
+ buffer_width = pot_buffers ? util_next_power_of_two(width) : align(width, VL_MACROBLOCK_WIDTH);
+ buffer_height = pot_buffers ? util_next_power_of_two(height) : align(height, VL_MACROBLOCK_HEIGHT);
switch (u_reduce_video_profile(profile)) {
case PIPE_VIDEO_CODEC_MPEG12:
#define vl_defines_h
/* constants usually used with all known codecs */
-#define MACROBLOCK_WIDTH 16
-#define MACROBLOCK_HEIGHT 16
+#define VL_MACROBLOCK_WIDTH 16
+#define VL_MACROBLOCK_HEIGHT 16
-#define BLOCK_WIDTH 8
-#define BLOCK_HEIGHT 8
+#define VL_BLOCK_WIDTH 8
+#define VL_BLOCK_HEIGHT 8
#define VL_MAX_PLANES 3
#define VL_MAX_REF_FRAMES 2
o_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1);
/*
- * scale = (BLOCK_WIDTH, BLOCK_HEIGHT) / (dst.width, dst.height)
+ * scale = (VL_BLOCK_WIDTH, VL_BLOCK_HEIGHT) / (dst.width, dst.height)
*
- * t_vpos = vpos + 7 / BLOCK_WIDTH
+ * t_vpos = vpos + 7 / VL_BLOCK_WIDTH
* o_vpos.xy = t_vpos * scale
*
* o_addr = calc_addr(...)
*/
scale = ureg_imm2f(shader,
- (float)BLOCK_WIDTH / idct->buffer_width,
- (float)BLOCK_HEIGHT / idct->buffer_height);
+ (float)VL_BLOCK_WIDTH / idct->buffer_width,
+ (float)VL_BLOCK_HEIGHT / idct->buffer_height);
ureg_MAD(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), vpos, scale, scale);
ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), ureg_imm1f(shader, 1.0f));
o_r_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1);
/*
- * scale = (BLOCK_WIDTH, BLOCK_HEIGHT) / (dst.width, dst.height)
+ * scale = (VL_BLOCK_WIDTH, VL_BLOCK_HEIGHT) / (dst.width, dst.height)
*
* t_vpos = vpos + vrect
* o_vpos.xy = t_vpos * scale
*/
scale = ureg_imm2f(shader,
- (float)BLOCK_WIDTH / idct->buffer_width,
- (float)BLOCK_HEIGHT / idct->buffer_height);
+ (float)VL_BLOCK_WIDTH / idct->buffer_width,
+ (float)VL_BLOCK_HEIGHT / idct->buffer_height);
ureg_ADD(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), vpos, vrect);
ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), ureg_src(t_tex), scale);
ureg_MUL(shader, ureg_writemask(t_start, TGSI_WRITEMASK_XY), vpos, scale);
calc_addr(shader, o_l_addr, ureg_src(t_tex), ureg_src(t_start), false, false, idct->buffer_width / 4);
- calc_addr(shader, o_r_addr, vrect, ureg_imm1f(shader, 0.0f), true, true, BLOCK_WIDTH / 4);
+ calc_addr(shader, o_r_addr, vrect, ureg_imm1f(shader, 0.0f), true, true, VL_BLOCK_WIDTH / 4);
ureg_release_temporary(shader, t_tex);
ureg_release_temporary(shader, t_start);
for (i = 0; i < idct->nr_of_render_targets; ++i) {
struct ureg_src s_addr[2];
- increment_addr(shader, r, r_addr, true, true, i - (signed)idct->nr_of_render_targets / 2, BLOCK_HEIGHT);
+ increment_addr(shader, r, r_addr, true, true, i - (signed)idct->nr_of_render_targets / 2, VL_BLOCK_HEIGHT);
s_addr[0] = ureg_src(r[0]);
s_addr[1] = ureg_src(r[1]);
o_r_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, first_output + VS_O_R_ADDR1);
scale = ureg_imm2f(shader,
- (float)BLOCK_WIDTH / idct->buffer_width,
- (float)BLOCK_HEIGHT / idct->buffer_height);
+ (float)VL_BLOCK_WIDTH / idct->buffer_width,
+ (float)VL_BLOCK_HEIGHT / idct->buffer_height);
ureg_MUL(shader, ureg_writemask(tex, TGSI_WRITEMASK_Z),
ureg_scalar(vrect, TGSI_SWIZZLE_X),
- ureg_imm1f(shader, BLOCK_WIDTH / idct->nr_of_render_targets));
+ ureg_imm1f(shader, VL_BLOCK_WIDTH / idct->nr_of_render_targets));
ureg_MUL(shader, ureg_writemask(t_start, TGSI_WRITEMASK_XY), vpos, scale);
- calc_addr(shader, o_l_addr, vrect, ureg_imm1f(shader, 0.0f), false, false, BLOCK_WIDTH / 4);
+ calc_addr(shader, o_l_addr, vrect, ureg_imm1f(shader, 0.0f), false, false, VL_BLOCK_WIDTH / 4);
calc_addr(shader, o_r_addr, ureg_src(tex), ureg_src(t_start), true, false, idct->buffer_height / 4);
ureg_MOV(shader, ureg_writemask(o_r_addr[0], TGSI_WRITEMASK_Z), ureg_src(tex));
struct pipe_box rect =
{
0, 0, 0,
- BLOCK_WIDTH / 4,
- BLOCK_HEIGHT,
+ VL_BLOCK_WIDTH / 4,
+ VL_BLOCK_HEIGHT,
1
};
if (!f)
goto error_map;
- for(i = 0; i < BLOCK_HEIGHT; ++i)
- for(j = 0; j < BLOCK_WIDTH; ++j)
+ for(i = 0; i < VL_BLOCK_HEIGHT; ++i)
+ for(j = 0; j < VL_BLOCK_WIDTH; ++j)
// transpose and scale
f[i * pitch + j] = ((const float (*)[8])const_matrix)[j][i] * scale;
o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
/*
- * block_scale = (MACROBLOCK_WIDTH, MACROBLOCK_HEIGHT) / (dst.width, dst.height)
+ * block_scale = (VL_MACROBLOCK_WIDTH, VL_MACROBLOCK_HEIGHT) / (dst.width, dst.height)
*
* t_vpos = (vpos + vrect) * block_scale
* o_vpos.xy = t_vpos
vmv[1] = ureg_DECL_vs_input(shader, VS_I_MV_BOTTOM);
t_vpos = calc_position(r, shader, ureg_imm2f(shader,
- (float)MACROBLOCK_WIDTH / r->buffer_width,
- (float)MACROBLOCK_HEIGHT / r->buffer_height)
+ (float)VL_MACROBLOCK_WIDTH / r->buffer_width,
+ (float)VL_MACROBLOCK_HEIGHT / r->buffer_height)
);
o_vmv[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP);
{
const float y_scale =
r->buffer_height / 2 *
- r->macroblock_size / MACROBLOCK_HEIGHT;
+ r->macroblock_size / VL_MACROBLOCK_HEIGHT;
struct ureg_program *shader;
struct ureg_src tc[2], sampler;
struct ureg_dst o_vpos, o_flags;
struct vertex2f scale = {
- (float)BLOCK_WIDTH / r->buffer_width * MACROBLOCK_WIDTH / r->macroblock_size,
- (float)BLOCK_HEIGHT / r->buffer_height * MACROBLOCK_HEIGHT / r->macroblock_size
+ (float)VL_BLOCK_WIDTH / r->buffer_width * VL_MACROBLOCK_WIDTH / r->macroblock_size,
+ (float)VL_BLOCK_HEIGHT / r->buffer_height * VL_MACROBLOCK_HEIGHT / r->macroblock_size
};
unsigned label;
ureg_scalar(vpos, TGSI_SWIZZLE_Z), ureg_imm1f(shader, 0.5f));
ureg_MOV(shader, ureg_writemask(o_flags, TGSI_WRITEMASK_W), ureg_imm1f(shader, -1.0f));
- if (r->macroblock_size == MACROBLOCK_HEIGHT) { //TODO
+ if (r->macroblock_size == VL_MACROBLOCK_HEIGHT) { //TODO
ureg_IF(shader, ureg_scalar(vpos, TGSI_SWIZZLE_W), &label);
ureg_CMP(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_XY),
/*rs_state.sprite_coord_enable */
rs_state.sprite_coord_mode = PIPE_SPRITE_COORD_UPPER_LEFT;
rs_state.point_quad_rasterization = true;
- rs_state.point_size = BLOCK_WIDTH;
+ rs_state.point_size = VL_BLOCK_WIDTH;
rs_state.gl_rasterization_rules = true;
rs_state.depth_clip = 1;
r->rs_state = r->pipe->create_rasterizer_state(r->pipe, &rs_state);
renderer->pipe->bind_fragment_sampler_states(renderer->pipe, 1, &renderer->sampler_ref);
util_draw_arrays_instanced(renderer->pipe, PIPE_PRIM_QUADS, 0, 4, 0,
- renderer->buffer_width / MACROBLOCK_WIDTH *
- renderer->buffer_height / MACROBLOCK_HEIGHT);
+ renderer->buffer_width / VL_MACROBLOCK_WIDTH *
+ renderer->buffer_height / VL_MACROBLOCK_HEIGHT);
buffer->surface_cleared = true;
}
memset(&res_tmpl, 0, sizeof(res_tmpl));
res_tmpl.target = PIPE_TEXTURE_2D;
res_tmpl.format = dec->zscan_source_format;
- res_tmpl.width0 = dec->blocks_per_line * BLOCK_WIDTH * BLOCK_HEIGHT;
+ res_tmpl.width0 = dec->blocks_per_line * VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT;
res_tmpl.height0 = align(dec->num_blocks, dec->blocks_per_line) / dec->blocks_per_line;
res_tmpl.depth0 = 1;
res_tmpl.array_size = 1;
return NULL;
if (!vl_vb_init(&buffer->vertex_stream, dec->base.context,
- dec->base.width / MACROBLOCK_WIDTH,
- dec->base.height / MACROBLOCK_HEIGHT))
+ dec->base.width / VL_MACROBLOCK_WIDTH,
+ dec->base.height / VL_MACROBLOCK_HEIGHT))
goto error_vertex_buffer;
if (!init_mc_buffer(dec, buffer))
unsigned width, unsigned height, unsigned max_references,
bool expect_chunked_decode)
{
- const unsigned block_size_pixels = BLOCK_WIDTH * BLOCK_HEIGHT;
+ const unsigned block_size_pixels = VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT;
const struct format_config *format_config;
struct vl_mpeg12_decoder *dec;
dec->blocks_per_line = MAX2(util_next_power_of_two(dec->base.width) / block_size_pixels, 4);
dec->num_blocks = (dec->base.width * dec->base.height) / block_size_pixels;
- dec->width_in_macroblocks = align(dec->base.width, MACROBLOCK_WIDTH) / MACROBLOCK_WIDTH;
+ dec->width_in_macroblocks = align(dec->base.width, VL_MACROBLOCK_WIDTH) / VL_MACROBLOCK_WIDTH;
dec->expect_chunked_decode = expect_chunked_decode;
/* TODO: Implement 422, 444 */
dec->quads = vl_vb_upload_quads(dec->base.context);
dec->pos = vl_vb_upload_pos(
dec->base.context,
- dec->base.width / MACROBLOCK_WIDTH,
- dec->base.height / MACROBLOCK_HEIGHT
+ dec->base.width / VL_MACROBLOCK_WIDTH,
+ dec->base.height / VL_MACROBLOCK_HEIGHT
);
dec->ves_ycbcr = vl_vb_get_ves_ycbcr(dec->base.context);
}
if (!vl_mc_init(&dec->mc_y, dec->base.context, dec->base.width, dec->base.height,
- MACROBLOCK_HEIGHT, format_config->mc_scale,
+ VL_MACROBLOCK_HEIGHT, format_config->mc_scale,
mc_vert_shader_callback, mc_frag_shader_callback, dec))
goto error_mc_y;
// TODO
if (!vl_mc_init(&dec->mc_c, dec->base.context, dec->base.width, dec->base.height,
- BLOCK_HEIGHT, format_config->mc_scale,
+ VL_BLOCK_HEIGHT, format_config->mc_scale,
mc_vert_shader_callback, mc_frag_shader_callback, dec))
goto error_mc_c;
templat = *tmpl;
templat.width = pot_buffers ? util_next_power_of_two(tmpl->width)
- : align(tmpl->width, MACROBLOCK_WIDTH);
+ : align(tmpl->width, VL_MACROBLOCK_WIDTH);
templat.height = pot_buffers ? util_next_power_of_two(tmpl->height)
- : align(tmpl->height, MACROBLOCK_HEIGHT);
+ : align(tmpl->height, VL_MACROBLOCK_HEIGHT);
if (tmpl->interlaced)
templat.height /= 2;
o_vtex = MALLOC(zscan->num_channels * sizeof(struct ureg_dst));
scale = ureg_imm2f(shader,
- (float)BLOCK_WIDTH / zscan->buffer_width,
- (float)BLOCK_HEIGHT / zscan->buffer_height);
+ (float)VL_BLOCK_WIDTH / zscan->buffer_width,
+ (float)VL_BLOCK_HEIGHT / zscan->buffer_height);
vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
for (i = 0; i < zscan->num_channels; ++i) {
ureg_ADD(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y),
- ureg_imm1f(shader, 1.0f / (zscan->blocks_per_line * BLOCK_WIDTH) * (i - (signed)zscan->num_channels / 2)));
+ ureg_imm1f(shader, 1.0f / (zscan->blocks_per_line * VL_BLOCK_WIDTH)
+ * (i - (signed)zscan->num_channels / 2)));
ureg_MAD(shader, ureg_writemask(o_vtex[i], TGSI_WRITEMASK_X), vrect,
ureg_imm1f(shader, 1.0f / zscan->blocks_per_line), ureg_src(tmp));
struct pipe_sampler_view *
vl_zscan_layout(struct pipe_context *pipe, const int layout[64], unsigned blocks_per_line)
{
- const unsigned total_size = blocks_per_line * BLOCK_WIDTH * BLOCK_HEIGHT;
+ const unsigned total_size = blocks_per_line * VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT;
int patched_layout[64];
struct pipe_box rect =
{
0, 0, 0,
- BLOCK_WIDTH * blocks_per_line,
- BLOCK_HEIGHT,
+ VL_BLOCK_WIDTH * blocks_per_line,
+ VL_BLOCK_HEIGHT,
1
};
memset(&res_tmpl, 0, sizeof(res_tmpl));
res_tmpl.target = PIPE_TEXTURE_2D;
res_tmpl.format = PIPE_FORMAT_R32_FLOAT;
- res_tmpl.width0 = BLOCK_WIDTH * blocks_per_line;
- res_tmpl.height0 = BLOCK_HEIGHT;
+ res_tmpl.width0 = VL_BLOCK_WIDTH * blocks_per_line;
+ res_tmpl.height0 = VL_BLOCK_HEIGHT;
res_tmpl.depth0 = 1;
res_tmpl.array_size = 1;
res_tmpl.usage = PIPE_USAGE_IMMUTABLE;
goto error_map;
for (i = 0; i < blocks_per_line; ++i)
- for (y = 0; y < BLOCK_HEIGHT; ++y)
- for (x = 0; x < BLOCK_WIDTH; ++x) {
- float addr = patched_layout[x + y * BLOCK_WIDTH] +
- i * BLOCK_WIDTH * BLOCK_HEIGHT;
+ for (y = 0; y < VL_BLOCK_HEIGHT; ++y)
+ for (x = 0; x < VL_BLOCK_WIDTH; ++x) {
+ float addr = patched_layout[x + y * VL_BLOCK_WIDTH] +
+ i * VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT;
addr /= total_size;
- f[i * BLOCK_WIDTH + y * pitch + x] = addr;
+ f[i * VL_BLOCK_WIDTH + y * pitch + x] = addr;
}
pipe->transfer_unmap(pipe, buf_transfer);
memset(&res_tmpl, 0, sizeof(res_tmpl));
res_tmpl.target = PIPE_TEXTURE_3D;
res_tmpl.format = PIPE_FORMAT_R8_UNORM;
- res_tmpl.width0 = BLOCK_WIDTH * zscan->blocks_per_line;
- res_tmpl.height0 = BLOCK_HEIGHT;
+ res_tmpl.width0 = VL_BLOCK_WIDTH * zscan->blocks_per_line;
+ res_tmpl.height0 = VL_BLOCK_HEIGHT;
res_tmpl.depth0 = 2;
res_tmpl.array_size = 1;
res_tmpl.usage = PIPE_USAGE_IMMUTABLE;
struct pipe_box rect =
{
0, 0, intra ? 1 : 0,
- BLOCK_WIDTH,
- BLOCK_HEIGHT,
+ VL_BLOCK_WIDTH,
+ VL_BLOCK_HEIGHT,
1
};
goto error_map;
for (i = 0; i < zscan->blocks_per_line; ++i)
- for (y = 0; y < BLOCK_HEIGHT; ++y)
- for (x = 0; x < BLOCK_WIDTH; ++x)
- data[i * BLOCK_WIDTH + y * pitch + x] = matrix[x + y * BLOCK_WIDTH];
+ for (y = 0; y < VL_BLOCK_HEIGHT; ++y)
+ for (x = 0; x < VL_BLOCK_WIDTH; ++x)
+ data[i * VL_BLOCK_WIDTH + y * pitch + x] = matrix[x + y * VL_BLOCK_WIDTH];
pipe->transfer_unmap(pipe, buf_transfer);