#define SCALE_FACTOR_16_TO_9 (32768.0f / 256.0f)
+#define STAGE1_SCALE 4.0f
+#define STAGE2_SCALE (SCALE_FACTOR_16_TO_9 / STAGE1_SCALE)
+
struct vertex_shader_consts
{
struct vertex4f norm;
VS_O_VPOS,
VS_O_BLOCK,
VS_O_TEX,
- VS_O_START,
- VS_O_STEP
+ VS_O_START
};
static const float const_matrix[8][8] = {
};
static void *
-create_vert_shader(struct vl_idct *idct)
+create_vert_shader(struct vl_idct *idct, bool calc_src_cords)
{
struct ureg_program *shader;
- struct ureg_src norm, bs;
+ struct ureg_src scale;
struct ureg_src vrect, vpos;
- struct ureg_dst scale, t_vpos;
- struct ureg_dst o_vpos, o_block, o_tex, o_start, o_step;
+ struct ureg_dst t_vpos;
+ struct ureg_dst o_vpos, o_block, o_tex, o_start;
shader = ureg_create(TGSI_PROCESSOR_VERTEX);
if (!shader)
return NULL;
- norm = ureg_DECL_constant(shader, 0);
- bs = ureg_imm2f(shader, BLOCK_WIDTH, BLOCK_HEIGHT);
-
- scale = ureg_DECL_temporary(shader);
t_vpos = ureg_DECL_temporary(shader);
vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
- o_block = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK);
- o_tex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX);
- o_start = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_START);
- o_step = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_STEP);
/*
- * scale = norm * mbs;
+ * scale = (BLOCK_WIDTH, BLOCK_HEIGHT) / (dst.width, dst.height)
*
* t_vpos = vpos + vrect
* o_vpos.xy = t_vpos * scale
* o_block = vrect
* o_tex = t_pos
* o_start = vpos * scale
- * o_step = norm
*
*/
- ureg_MUL(shader, ureg_writemask(scale, TGSI_WRITEMASK_XY), norm, bs);
+ scale = ureg_imm2f(shader,
+ (float)BLOCK_WIDTH / idct->destination->width0,
+ (float)BLOCK_HEIGHT / idct->destination->height0);
ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect);
- ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), ureg_src(scale));
+ ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), scale);
ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
- ureg_MOV(shader, ureg_writemask(o_tex, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
- ureg_MOV(shader, ureg_writemask(o_block, TGSI_WRITEMASK_XY), vrect);
- ureg_MUL(shader, ureg_writemask(o_start, TGSI_WRITEMASK_XY), vpos, ureg_src(scale));
- ureg_MOV(shader, ureg_writemask(o_step, TGSI_WRITEMASK_XY), norm);
+ if(calc_src_cords) {
+ o_block = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK);
+ o_tex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX);
+ o_start = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_START);
+
+ ureg_MOV(shader, ureg_writemask(o_block, TGSI_WRITEMASK_XY), vrect);
+ ureg_MOV(shader, ureg_writemask(o_tex, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
+ ureg_MUL(shader, ureg_writemask(o_start, TGSI_WRITEMASK_XY), vpos, scale);
+ }
ureg_release_temporary(shader, t_vpos);
- ureg_release_temporary(shader, scale);
ureg_END(shader);
}
static void
-matrix_mul(struct ureg_program *shader, struct ureg_dst dst,
- struct ureg_src tc[2], struct ureg_src sampler[2],
- struct ureg_src start[2], struct ureg_src step[2],
- float scale[2])
+fetch_one(struct ureg_program *shader, struct ureg_dst m[2],
+ struct ureg_src tc, struct ureg_src sampler,
+ struct ureg_src start, struct ureg_src block, float height)
{
- struct ureg_dst t_tc[2], m[2][2], tmp[2];
+ struct ureg_dst t_tc, tmp;
unsigned i, j;
- for(i = 0; i < 2; ++i) {
- t_tc[i] = ureg_DECL_temporary(shader);
- for(j = 0; j < 2; ++j)
- m[i][j] = ureg_DECL_temporary(shader);
- tmp[i] = ureg_DECL_temporary(shader);
- }
+ t_tc = ureg_DECL_temporary(shader);
+ tmp = ureg_DECL_temporary(shader);
+
+ m[0] = ureg_DECL_temporary(shader);
+ m[1] = ureg_DECL_temporary(shader);
/*
- * m[0..1][0] = ?
- * tmp[0..1] = dot4(m[0..1][0], m[0..1][1])
- * fragment = tmp[0] + tmp[1]
+ * t_tc.x = right_side ? start.x : tc.x
+ * t_tc.y = right_side ? tc.y : start.y
+ * m[0..1].xyzw = tex(t_tc++, sampler)
*/
- ureg_MOV(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_X), start[0]);
- ureg_MOV(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_Y), tc[0]);
-
- ureg_MOV(shader, ureg_writemask(t_tc[1], TGSI_WRITEMASK_X), tc[1]);
- ureg_MOV(shader, ureg_writemask(t_tc[1], TGSI_WRITEMASK_Y), start[1]);
+ ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_scalar(tc, TGSI_SWIZZLE_X));
+ ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y), ureg_scalar(start, TGSI_SWIZZLE_Y));
+ ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Z), ureg_scalar(block, TGSI_SWIZZLE_X));
for(i = 0; i < 2; ++i) {
for(j = 0; j < 4; ++j) {
/* Nouveau and r600g can't writemask tex dst regs (yet?), do in two steps */
- ureg_TEX(shader, tmp[0], TGSI_TEXTURE_2D, ureg_src(t_tc[0]), sampler[0]);
- ureg_MOV(shader, ureg_writemask(m[i][0], TGSI_WRITEMASK_X << j), ureg_scalar(ureg_src(tmp[0]), TGSI_SWIZZLE_X));
+ ureg_TEX(shader, tmp, TGSI_TEXTURE_3D, ureg_src(t_tc), sampler);
+ ureg_MOV(shader, ureg_writemask(m[i], TGSI_WRITEMASK_X << j), ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X));
- ureg_TEX(shader, tmp[1], TGSI_TEXTURE_2D, ureg_src(t_tc[1]), sampler[1]);
- ureg_MOV(shader, ureg_writemask(m[i][1], TGSI_WRITEMASK_X << j), ureg_scalar(ureg_src(tmp[1]), TGSI_SWIZZLE_X));
-
- ureg_ADD(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_X), ureg_src(t_tc[0]), step[0]);
- ureg_ADD(shader, ureg_writemask(t_tc[1], TGSI_WRITEMASK_Y), ureg_src(t_tc[1]), step[1]);
+ if(i != 1 || j != 3) /* skip the last add */
+ ureg_ADD(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y),
+ ureg_src(t_tc), ureg_imm1f(shader, 1.0f / height));
}
+ }
+
+ ureg_release_temporary(shader, t_tc);
+ ureg_release_temporary(shader, tmp);
+}
+
+static void
+fetch_four(struct ureg_program *shader, struct ureg_dst m[2],
+ struct ureg_src tc, struct ureg_src sampler,
+ struct ureg_src start, bool right_side, float width)
+{
+ struct ureg_dst t_tc;
- if(scale[0] != 1.0f)
- ureg_MUL(shader, m[i][0], ureg_src(m[i][0]), ureg_scalar(ureg_imm1f(shader, scale[0]), TGSI_SWIZZLE_X));
+ t_tc = ureg_DECL_temporary(shader);
+ m[0] = ureg_DECL_temporary(shader);
+ m[1] = ureg_DECL_temporary(shader);
- if(scale[1] != 1.0f)
- ureg_MUL(shader, m[i][1], ureg_src(m[i][1]), ureg_scalar(ureg_imm1f(shader, scale[1]), TGSI_SWIZZLE_X));
+ /*
+ * t_tc.x = right_side ? start.x : tc.x
+ * t_tc.y = right_side ? tc.y : start.y
+ * m[0..1] = tex(t_tc++, sampler)
+ */
+ if(right_side) {
+ ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_scalar(start, TGSI_SWIZZLE_Y));
+ ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y), ureg_scalar(tc, TGSI_SWIZZLE_X));
+ } else {
+ ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_scalar(start, TGSI_SWIZZLE_X));
+ ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_Y), ureg_scalar(tc, TGSI_SWIZZLE_Y));
}
- ureg_DP4(shader, ureg_writemask(tmp[0], TGSI_WRITEMASK_X), ureg_src(m[0][0]), ureg_src(m[0][1]));
- ureg_DP4(shader, ureg_writemask(tmp[1], TGSI_WRITEMASK_X), ureg_src(m[1][0]), ureg_src(m[1][1]));
- ureg_ADD(shader, ureg_writemask(dst, TGSI_WRITEMASK_X), ureg_src(tmp[0]), ureg_src(tmp[1]));
+ ureg_TEX(shader, m[0], TGSI_TEXTURE_2D, ureg_src(t_tc), sampler);
+ ureg_ADD(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_src(t_tc), ureg_imm1f(shader, 1.0f / width));
+ ureg_TEX(shader, m[1], TGSI_TEXTURE_2D, ureg_src(t_tc), sampler);
+
+ ureg_release_temporary(shader, t_tc);
+}
+
+static void
+matrix_mul(struct ureg_program *shader, struct ureg_dst dst, struct ureg_dst l[2], struct ureg_dst r[2])
+{
+ struct ureg_dst tmp[2];
+ unsigned i;
+
+ for(i = 0; i < 2; ++i) {
+ tmp[i] = ureg_DECL_temporary(shader);
+ }
+
+ /*
+ * tmp[0..1] = dot4(m[0][0..1], m[1][0..1])
+ * dst = tmp[0] + tmp[1]
+ */
+ ureg_DP4(shader, ureg_writemask(tmp[0], TGSI_WRITEMASK_X), ureg_src(l[0]), ureg_src(r[0]));
+ ureg_DP4(shader, ureg_writemask(tmp[1], TGSI_WRITEMASK_X), ureg_src(l[1]), ureg_src(r[1]));
+ ureg_ADD(shader, dst, ureg_src(tmp[0]), ureg_src(tmp[1]));
for(i = 0; i < 2; ++i) {
- ureg_release_temporary(shader, t_tc[i]);
- for(j = 0; j < 2; ++j)
- ureg_release_temporary(shader, m[i][j]);
ureg_release_temporary(shader, tmp[i]);
}
}
static void *
create_transpose_frag_shader(struct vl_idct *idct)
{
+ struct pipe_resource *transpose = idct->textures.individual.transpose;
+ struct pipe_resource *intermediate = idct->textures.individual.intermediate;
+
struct ureg_program *shader;
- struct ureg_src tc[2], sampler[2];
- struct ureg_src start[2], step[2];
- struct ureg_dst fragment;
- float scale[2];
+
+ struct ureg_src block, tex, sampler[2];
+ struct ureg_src start[2];
+
+ struct ureg_dst m[2][2];
+ struct ureg_dst tmp, fragment;
shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
if (!shader)
return NULL;
- tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
- tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_LINEAR);
+ block = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
+ tex = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_CONSTANT);
+
+ sampler[0] = ureg_DECL_sampler(shader, 0);
+ sampler[1] = ureg_DECL_sampler(shader, 1);
start[0] = ureg_imm1f(shader, 0.0f);
start[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT);
- step[0] = ureg_imm1f(shader, 1.0f / BLOCK_HEIGHT);
- step[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_STEP, TGSI_INTERPOLATE_CONSTANT);
-
- sampler[0] = ureg_DECL_sampler(shader, 0);
- sampler[1] = ureg_DECL_sampler(shader, 2);
-
- scale[0] = 1.0f;
- scale[1] = SCALE_FACTOR_16_TO_9;
+ fetch_four(shader, m[0], block, sampler[0], start[0], false, transpose->width0);
+ fetch_one(shader, m[1], tex, sampler[1], start[1], block, intermediate->height0);
fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
- matrix_mul(shader, fragment, tc, sampler, start, step, scale);
+ tmp = ureg_DECL_temporary(shader);
+ matrix_mul(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), m[0], m[1]);
+ ureg_MUL(shader, fragment, ureg_src(tmp), ureg_imm1f(shader, STAGE2_SCALE));
+
+ ureg_release_temporary(shader, tmp);
+ ureg_release_temporary(shader, m[0][0]);
+ ureg_release_temporary(shader, m[0][1]);
+ ureg_release_temporary(shader, m[1][0]);
+ ureg_release_temporary(shader, m[1][1]);
ureg_END(shader);
static void *
create_matrix_frag_shader(struct vl_idct *idct)
{
+ struct pipe_resource *matrix = idct->textures.individual.matrix;
+ struct pipe_resource *source = idct->textures.individual.source;
+
struct ureg_program *shader;
+
struct ureg_src tc[2], sampler[2];
- struct ureg_src start[2], step[2];
- struct ureg_dst fragment;
- float scale[2];
+ struct ureg_src start[2];
+
+ struct ureg_dst l[2], r[2];
+ struct ureg_dst t_tc, tmp, fragment[BLOCK_WIDTH];
+
+ unsigned i;
shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
if (!shader)
return NULL;
+ t_tc = ureg_DECL_temporary(shader);
+ tmp = ureg_DECL_temporary(shader);
+
tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX, TGSI_INTERPOLATE_LINEAR);
tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_BLOCK, TGSI_INTERPOLATE_LINEAR);
+ sampler[0] = ureg_DECL_sampler(shader, 1);
+ sampler[1] = ureg_DECL_sampler(shader, 0);
+
start[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_START, TGSI_INTERPOLATE_CONSTANT);
start[1] = ureg_imm1f(shader, 0.0f);
- step[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_STEP, TGSI_INTERPOLATE_CONSTANT);
- step[1] = ureg_imm1f(shader, 1.0f / BLOCK_WIDTH);
-
- sampler[0] = ureg_DECL_sampler(shader, 3);
- sampler[1] = ureg_DECL_sampler(shader, 1);
-
- scale[0] = 1.0f;
- scale[1] = 1.0f;
-
- fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+ for (i = 0; i < BLOCK_WIDTH; ++i)
+ fragment[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, i);
+
+ fetch_four(shader, l, tc[0], sampler[0], start[0], false, source->width0);
+ ureg_MUL(shader, l[0], ureg_src(l[0]), ureg_scalar(ureg_imm1f(shader, STAGE1_SCALE), TGSI_SWIZZLE_X));
+ ureg_MUL(shader, l[1], ureg_src(l[1]), ureg_scalar(ureg_imm1f(shader, STAGE1_SCALE), TGSI_SWIZZLE_X));
+
+ for (i = 0; i < BLOCK_WIDTH; ++i) {
+ ureg_MOV(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_X), ureg_imm1f(shader, 1.0f / BLOCK_WIDTH * i));
+ fetch_four(shader, r, ureg_src(t_tc), sampler[1], start[1], true, matrix->width0);
+ matrix_mul(shader, fragment[i], l, r);
+ ureg_release_temporary(shader, r[0]);
+ ureg_release_temporary(shader, r[1]);
+ }
- matrix_mul(shader, fragment, tc, sampler, start, step, scale);
+ ureg_release_temporary(shader, t_tc);
+ ureg_release_temporary(shader, tmp);
+ ureg_release_temporary(shader, l[0]);
+ ureg_release_temporary(shader, l[1]);
ureg_END(shader);
struct pipe_box rect =
{
0, 0, 0,
- idct->destination->width0,
- idct->destination->height0,
+ idct->textures.individual.source->width0,
+ idct->textures.individual.source->height0,
1
};
idct->tex_transfer = idct->pipe->get_transfer
(
-#if 1
idct->pipe, idct->textures.individual.source,
-#else
- idct->pipe, idct->destination,
-#endif
u_subresource(0, 0),
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
&rect
static bool
init_shaders(struct vl_idct *idct)
{
- assert(idct);
+ idct->matrix_vs = create_vert_shader(idct, true);
+ idct->matrix_fs = create_matrix_frag_shader(idct);
- assert(idct->vs = create_vert_shader(idct));
- assert(idct->transpose_fs = create_transpose_frag_shader(idct));
- assert(idct->matrix_fs = create_matrix_frag_shader(idct));
- assert(idct->eb_fs = create_empty_block_frag_shader(idct));
+ idct->transpose_vs = create_vert_shader(idct, true);
+ idct->transpose_fs = create_transpose_frag_shader(idct);
- return true;
+ idct->eb_vs = create_vert_shader(idct, false);
+ idct->eb_fs = create_empty_block_frag_shader(idct);
+
+ return
+ idct->transpose_vs != NULL && idct->transpose_fs != NULL &&
+ idct->matrix_vs != NULL && idct->matrix_fs != NULL &&
+ idct->eb_vs != NULL && idct->eb_fs != NULL;
}
static void
cleanup_shaders(struct vl_idct *idct)
{
- assert(idct);
-
- idct->pipe->delete_vs_state(idct->pipe, idct->vs);
+ idct->pipe->delete_vs_state(idct->pipe, idct->transpose_vs);
idct->pipe->delete_fs_state(idct->pipe, idct->transpose_fs);
+
+ idct->pipe->delete_vs_state(idct->pipe, idct->matrix_vs);
idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs);
+
+ idct->pipe->delete_vs_state(idct->pipe, idct->eb_vs);
idct->pipe->delete_fs_state(idct->pipe, idct->eb_fs);
}
struct pipe_resource template;
struct pipe_sampler_view sampler_view;
struct pipe_vertex_element vertex_elems[2];
+ unsigned i;
idct->max_blocks =
align(idct->destination->width0, BLOCK_WIDTH) / BLOCK_WIDTH *
align(idct->destination->height0, BLOCK_HEIGHT) / BLOCK_HEIGHT *
idct->destination->depth0;
- unsigned i;
-
memset(&template, 0, sizeof(struct pipe_resource));
- template.target = PIPE_TEXTURE_2D;
- template.format = PIPE_FORMAT_R16_SNORM;
template.last_level = 0;
- template.width0 = 8;
- template.height0 = 8;
template.depth0 = 1;
- template.usage = PIPE_USAGE_IMMUTABLE;
template.bind = PIPE_BIND_SAMPLER_VIEW;
template.flags = 0;
- idct->textures.individual.matrix = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
- idct->textures.individual.transpose = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
-
- template.format = idct->destination->format;
- template.width0 = idct->destination->width0;
+ template.target = PIPE_TEXTURE_2D;
+ template.format = PIPE_FORMAT_R16G16B16A16_SNORM;
+ template.width0 = idct->destination->width0 / 4;
template.height0 = idct->destination->height0;
- template.depth0 = idct->destination->depth0;
- template.usage = PIPE_USAGE_DYNAMIC;
+ template.depth0 = 1;
+ template.usage = PIPE_USAGE_STREAM;
idct->textures.individual.source = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
+ template.target = PIPE_TEXTURE_3D;
+ template.format = PIPE_FORMAT_R16_SNORM;
+ template.width0 = idct->destination->width0 / 8;
+ template.depth0 = 8;
template.usage = PIPE_USAGE_STATIC;
idct->textures.individual.intermediate = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
for (i = 0; i < 4; ++i) {
+ if(idct->textures.all[i] == NULL)
+ return false; /* a texture failed to allocate */
+
u_sampler_view_default_template(&sampler_view, idct->textures.all[i], idct->textures.all[i]->format);
idct->sampler_views.all[i] = idct->pipe->create_sampler_view(idct->pipe, idct->textures.all[i], &sampler_view);
}
sizeof(struct vertex2f) * 4 * idct->max_blocks
);
+ if(idct->vertex_bufs.individual.quad.buffer == NULL)
+ return false;
+
idct->vertex_bufs.individual.pos.stride = sizeof(struct vertex2f);
idct->vertex_bufs.individual.pos.max_index = 4 * idct->max_blocks - 1;
idct->vertex_bufs.individual.pos.buffer_offset = 0;
sizeof(struct vertex2f) * 4 * idct->max_blocks
);
+ if(idct->vertex_bufs.individual.pos.buffer == NULL)
+ return false;
+
/* Rect element */
vertex_elems[0].src_offset = 0;
vertex_elems[0].instance_divisor = 0;
idct->vertex_elems_state = idct->pipe->create_vertex_elements_state(idct->pipe, 2, vertex_elems);
- idct->vs_const_buf = pipe_buffer_create
- (
- idct->pipe->screen,
- PIPE_BIND_CONSTANT_BUFFER,
- sizeof(struct vertex_shader_consts)
- );
-
return true;
}
assert(idct);
- pipe_resource_reference(&idct->vs_const_buf, NULL);
-
for (i = 0; i < 4; ++i) {
pipe_sampler_view_reference(&idct->sampler_views.all[i], NULL);
pipe_resource_reference(&idct->textures.all[i], NULL);
init_constants(struct vl_idct *idct)
{
struct pipe_transfer *buf_transfer;
- struct vertex_shader_consts *vs_consts;
struct vertex2f *v;
- short *s;
-
- struct pipe_box rect =
- {
- 0, 0, 0,
- BLOCK_WIDTH,
- BLOCK_HEIGHT,
- 1
- };
- unsigned i, j, pitch;
+ unsigned i;
/* quad vectors */
v = pipe_buffer_map
for ( i = 0; i < idct->max_blocks; ++i)
memcpy(v + i * 4, &const_quad, sizeof(const_quad));
pipe_buffer_unmap(idct->pipe, idct->vertex_bufs.individual.quad.buffer, buf_transfer);
-
- /* transposed matrix */
- buf_transfer = idct->pipe->get_transfer
- (
- idct->pipe, idct->textures.individual.transpose,
- u_subresource(0, 0),
- PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
- &rect
- );
- pitch = buf_transfer->stride / util_format_get_blocksize(buf_transfer->resource->format);
-
- s = idct->pipe->transfer_map(idct->pipe, buf_transfer);
- for(i = 0; i < BLOCK_HEIGHT; ++i)
- for(j = 0; j < BLOCK_WIDTH; ++j)
- s[i * pitch + j] = const_matrix[j][i] * (1 << 15); // transpose
-
- idct->pipe->transfer_unmap(idct->pipe, buf_transfer);
- idct->pipe->transfer_destroy(idct->pipe, buf_transfer);
-
- /* matrix */
- buf_transfer = idct->pipe->get_transfer
- (
- idct->pipe, idct->textures.individual.matrix,
- u_subresource(0, 0),
- PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
- &rect
- );
- pitch = buf_transfer->stride / util_format_get_blocksize(buf_transfer->resource->format);
-
- s = idct->pipe->transfer_map(idct->pipe, buf_transfer);
- for(i = 0; i < BLOCK_HEIGHT; ++i)
- for(j = 0; j < BLOCK_WIDTH; ++j)
- s[i * pitch + j] = const_matrix[i][j] * (1 << 15);
-
- idct->pipe->transfer_unmap(idct->pipe, buf_transfer);
- idct->pipe->transfer_destroy(idct->pipe, buf_transfer);
-
- /* normalisation constants */
- vs_consts = pipe_buffer_map
- (
- idct->pipe, idct->vs_const_buf,
- PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
- &buf_transfer
- );
-
- vs_consts->norm.x = 1.0f / idct->destination->width0;
- vs_consts->norm.y = 1.0f / idct->destination->height0;
-
- pipe_buffer_unmap(idct->pipe, idct->vs_const_buf, buf_transfer);
}
static void
idct->num_blocks = 0;
idct->num_empty_blocks = 0;
- idct->viewport.scale[0] = idct->destination->width0;
- idct->viewport.scale[1] = idct->destination->height0;
- idct->viewport.scale[2] = 1;
- idct->viewport.scale[3] = 1;
- idct->viewport.translate[0] = 0;
- idct->viewport.translate[1] = 0;
- idct->viewport.translate[2] = 0;
- idct->viewport.translate[3] = 0;
+ idct->viewport[0].scale[0] = idct->textures.individual.intermediate->width0;
+ idct->viewport[0].scale[1] = idct->textures.individual.intermediate->height0;
- idct->fb_state.width = idct->destination->width0;
- idct->fb_state.height = idct->destination->height0;
- idct->fb_state.nr_cbufs = 1;
- idct->fb_state.zsbuf = NULL;
+ idct->viewport[1].scale[0] = idct->destination->width0;
+ idct->viewport[1].scale[1] = idct->destination->height0;
+
+ idct->fb_state[0].width = idct->textures.individual.intermediate->width0;
+ idct->fb_state[0].height = idct->textures.individual.intermediate->height0;
+
+ idct->fb_state[0].nr_cbufs = 8;
+ for(i = 0; i < 8; ++i) {
+ idct->fb_state[0].cbufs[i] = idct->pipe->screen->get_tex_surface(
+ idct->pipe->screen, idct->textures.individual.intermediate, 0, 0, i,
+ PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
+ }
+
+ idct->fb_state[1].width = idct->destination->width0;
+ idct->fb_state[1].height = idct->destination->height0;
+
+ idct->fb_state[1].nr_cbufs = 1;
+ idct->fb_state[1].cbufs[0] = idct->pipe->screen->get_tex_surface(
+ idct->pipe->screen, idct->destination, 0, 0, 0,
+ PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
+
+ for(i = 0; i < 2; ++i) {
+ idct->viewport[i].scale[2] = 1;
+ idct->viewport[i].scale[3] = 1;
+ idct->viewport[i].translate[0] = 0;
+ idct->viewport[i].translate[1] = 0;
+ idct->viewport[i].translate[2] = 0;
+ idct->viewport[i].translate[3] = 0;
+
+ idct->fb_state[i].zsbuf = NULL;
+ }
for (i = 0; i < 4; ++i) {
memset(&sampler, 0, sizeof(sampler));
{
unsigned i;
+ for(i = 0; i < 8; ++i) {
+ idct->pipe->screen->tex_surface_destroy(idct->fb_state[0].cbufs[i]);
+ }
+
+ idct->pipe->screen->tex_surface_destroy(idct->fb_state[1].cbufs[0]);
+
for (i = 0; i < 4; ++i)
idct->pipe->delete_sampler_state(idct->pipe, idct->samplers.all[i]);
}
+struct pipe_resource *
+vl_idct_upload_matrix(struct pipe_context *pipe)
+{
+ struct pipe_resource template, *matrix;
+ struct pipe_transfer *buf_transfer;
+ unsigned i, j, pitch;
+ float *f;
+
+ struct pipe_box rect =
+ {
+ 0, 0, 0,
+ BLOCK_WIDTH,
+ BLOCK_HEIGHT,
+ 1
+ };
+
+ memset(&template, 0, sizeof(struct pipe_resource));
+ template.target = PIPE_TEXTURE_2D;
+ template.format = PIPE_FORMAT_R32G32B32A32_FLOAT;
+ template.last_level = 0;
+ template.width0 = 2;
+ template.height0 = 8;
+ template.depth0 = 1;
+ template.usage = PIPE_USAGE_IMMUTABLE;
+ template.bind = PIPE_BIND_SAMPLER_VIEW;
+ template.flags = 0;
+
+ matrix = pipe->screen->resource_create(pipe->screen, &template);
+
+ /* matrix */
+ buf_transfer = pipe->get_transfer
+ (
+ pipe, matrix,
+ u_subresource(0, 0),
+ PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
+ &rect
+ );
+ pitch = buf_transfer->stride / sizeof(float);
+
+ f = pipe->transfer_map(pipe, buf_transfer);
+ for(i = 0; i < BLOCK_HEIGHT; ++i)
+ for(j = 0; j < BLOCK_WIDTH; ++j)
+ f[i * pitch + j] = const_matrix[j][i]; // transpose
+
+ pipe->transfer_unmap(pipe, buf_transfer);
+ pipe->transfer_destroy(pipe, buf_transfer);
+
+ return matrix;
+}
+
bool
-vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe, struct pipe_resource *dst)
+vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe, struct pipe_resource *dst, struct pipe_resource *matrix)
{
assert(idct && pipe && dst);
idct->pipe = pipe;
+ pipe_resource_reference(&idct->textures.individual.matrix, matrix);
+ pipe_resource_reference(&idct->textures.individual.transpose, matrix);
pipe_resource_reference(&idct->destination, dst);
- init_state(idct);
-
- if(!init_shaders(idct))
+ if(!init_buffers(idct))
return false;
- if(!init_buffers(idct)) {
- cleanup_shaders(idct);
+ if(!init_shaders(idct)) {
+ cleanup_buffers(idct);
return false;
}
- idct->surfaces.intermediate = idct->pipe->screen->get_tex_surface(
- idct->pipe->screen, idct->textures.individual.intermediate, 0, 0, 0,
- PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
-
- idct->surfaces.destination = idct->pipe->screen->get_tex_surface(
- idct->pipe->screen, idct->destination, 0, 0, 0,
- PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET);
+ init_state(idct);
init_constants(idct);
xfer_buffers_map(idct);
void
vl_idct_cleanup(struct vl_idct *idct)
{
- idct->pipe->screen->tex_surface_destroy(idct->surfaces.destination);
- idct->pipe->screen->tex_surface_destroy(idct->surfaces.intermediate);
-
cleanup_shaders(idct);
cleanup_buffers(idct);
struct vertex2f v, *v_dst;
unsigned tex_pitch;
+ unsigned nr_components;
short *texels;
unsigned i;
assert(idct);
if(block) {
- tex_pitch = idct->tex_transfer->stride / util_format_get_blocksize(idct->tex_transfer->resource->format);
+ nr_components = util_format_get_nr_components(idct->tex_transfer->resource->format);
+
+ tex_pitch = idct->tex_transfer->stride / sizeof(short);
texels = idct->texels + y * tex_pitch * BLOCK_HEIGHT + x * BLOCK_WIDTH;
for (i = 0; i < BLOCK_HEIGHT; ++i)
- memcpy(texels + i * tex_pitch, block + i * BLOCK_WIDTH, BLOCK_WIDTH * 2);
+ memcpy(texels + i * tex_pitch, block + i * BLOCK_WIDTH, BLOCK_WIDTH * sizeof(short));
/* non empty blocks fills the vector buffer from left to right */
v_dst = idct->vectors + idct->num_blocks * 4;
{
xfer_buffers_unmap(idct);
- idct->pipe->set_constant_buffer(idct->pipe, PIPE_SHADER_VERTEX, 0, idct->vs_const_buf);
-
if(idct->num_blocks > 0) {
/* first stage */
- idct->fb_state.cbufs[0] = idct->surfaces.intermediate;
- idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state);
- idct->pipe->set_viewport_state(idct->pipe, &idct->viewport);
+ idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state[0]);
+ idct->pipe->set_viewport_state(idct->pipe, &idct->viewport[0]);
idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
- idct->pipe->set_fragment_sampler_views(idct->pipe, 4, idct->sampler_views.all);
- idct->pipe->bind_fragment_sampler_states(idct->pipe, 4, idct->samplers.all);
- idct->pipe->bind_vs_state(idct->pipe, idct->vs);
- idct->pipe->bind_fs_state(idct->pipe, idct->transpose_fs);
+ idct->pipe->set_fragment_sampler_views(idct->pipe, 2, idct->sampler_views.stage[0]);
+ idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[0]);
+ idct->pipe->bind_vs_state(idct->pipe, idct->matrix_vs);
+ idct->pipe->bind_fs_state(idct->pipe, idct->matrix_fs);
util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, idct->num_blocks * 4);
- idct->pipe->flush(idct->pipe, PIPE_FLUSH_RENDER_CACHE, NULL);
-
/* second stage */
- idct->fb_state.cbufs[0] = idct->surfaces.destination;
- idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state);
- idct->pipe->set_viewport_state(idct->pipe, &idct->viewport);
+ idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state[1]);
+ idct->pipe->set_viewport_state(idct->pipe, &idct->viewport[1]);
idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
- idct->pipe->set_fragment_sampler_views(idct->pipe, 4, idct->sampler_views.all);
- idct->pipe->bind_fragment_sampler_states(idct->pipe, 4, idct->samplers.all);
- idct->pipe->bind_vs_state(idct->pipe, idct->vs);
- idct->pipe->bind_fs_state(idct->pipe, idct->matrix_fs);
+ idct->pipe->set_fragment_sampler_views(idct->pipe, 2, idct->sampler_views.stage[1]);
+ idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[1]);
+ idct->pipe->bind_vs_state(idct->pipe, idct->transpose_vs);
+ idct->pipe->bind_fs_state(idct->pipe, idct->transpose_fs);
util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, idct->num_blocks * 4);
}
if(idct->num_empty_blocks > 0) {
/* empty block handling */
- idct->fb_state.cbufs[0] = idct->surfaces.destination;
- idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state);
- idct->pipe->set_viewport_state(idct->pipe, &idct->viewport);
+ idct->pipe->set_framebuffer_state(idct->pipe, &idct->fb_state[1]);
+ idct->pipe->set_viewport_state(idct->pipe, &idct->viewport[1]);
idct->pipe->set_vertex_buffers(idct->pipe, 2, idct->vertex_bufs.all);
idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
- idct->pipe->set_fragment_sampler_views(idct->pipe, 4, idct->sampler_views.all);
- idct->pipe->bind_fragment_sampler_states(idct->pipe, 4, idct->samplers.all);
- idct->pipe->bind_vs_state(idct->pipe, idct->vs);
+ idct->pipe->bind_vs_state(idct->pipe, idct->eb_vs);
idct->pipe->bind_fs_state(idct->pipe, idct->eb_fs);
util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS,
idct->num_empty_blocks * 4);
}
- idct->pipe->flush(idct->pipe, PIPE_FLUSH_RENDER_CACHE, NULL);
-
idct->num_blocks = 0;
idct->num_empty_blocks = 0;
xfer_buffers_map(idct);