struct qreg s = src[0 * 4 + 0];
struct qreg t = src[0 * 4 + 1];
+ struct qreg r = src[0 * 4 + 2];
uint32_t unit = tgsi_inst->Src[1].Register.Index;
struct qreg proj = c->undef;
t = qir_FMUL(c, t, proj);
}
+ struct qreg texture_u[] = {
+ add_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit),
+ add_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, unit),
+ add_uniform(c, QUNIFORM_CONSTANT, 0),
+ add_uniform(c, QUNIFORM_CONSTANT, 0),
+ };
+ uint32_t next_texture_u = 0;
+
/* There is no native support for GL texture rectangle coordinates, so
* we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
* 1]).
get_temp_for_uniform(c,
QUNIFORM_TEXRECT_SCALE_Y,
unit));
- }
+ } else if (tgsi_inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
+ tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE) {
+ struct qreg ma = qir_FMAXABS(c, qir_FMAXABS(c, s, t), r);
+ struct qreg rcp_ma = qir_RCP(c, ma);
+ s = qir_FMUL(c, s, rcp_ma);
+ t = qir_FMUL(c, t, rcp_ma);
+ r = qir_FMUL(c, r, rcp_ma);
- qir_TEX_T(c, t, add_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit));
+ texture_u[2] = add_uniform(c, QUNIFORM_TEXTURE_CONFIG_P2, unit);
- struct qreg sampler_p1 = add_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1,
- unit);
- if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXB) {
- qir_TEX_B(c, src[0 * 4 + 3], sampler_p1);
- qir_TEX_S(c, s, add_uniform(c, QUNIFORM_CONSTANT, 0));
- } else {
- qir_TEX_S(c, s, sampler_p1);
+ qir_TEX_R(c, r, texture_u[next_texture_u++]);
}
+ qir_TEX_T(c, t, texture_u[next_texture_u++]);
+
+ if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXB)
+ qir_TEX_B(c, src[0 * 4 + 3], texture_u[next_texture_u++]);
+
+ qir_TEX_S(c, s, texture_u[next_texture_u++]);
+
c->num_texture_samples++;
struct qreg r4 = qir_TEX_RESULT(c);
struct pipe_sampler_view *texture = texstate->textures[unit];
struct vc4_resource *rsc = vc4_resource(texture->texture);
+ bool is_cube = texture->target == PIPE_TEXTURE_CUBE;
+
cl_reloc(vc4, &vc4->uniforms, rsc->bo,
rsc->slices[0].offset | texture->u.tex.last_level |
+ is_cube << 9 |
((rsc->vc4_format & 7) << 4));
}
(translate_wrap(sampler->wrap_s) << 0));
}
+static void
+write_texture_p2(struct vc4_context *vc4,
+ struct vc4_texture_stateobj *texstate,
+ uint32_t unit)
+{
+ struct pipe_sampler_view *texture = texstate->textures[unit];
+ struct vc4_resource *rsc = vc4_resource(texture->texture);
+
+ cl_u32(&vc4->uniforms, (1 << 30) | rsc->cube_map_stride);
+}
+
static uint32_t
get_texrect_scale(struct vc4_texture_stateobj *texstate,
enum quniform_contents contents,
write_texture_p1(vc4, texstate, uinfo->data[i]);
break;
+ case QUNIFORM_TEXTURE_CONFIG_P2:
+ write_texture_p2(vc4, texstate, uinfo->data[i]);
+ break;
+
case QUNIFORM_TEXRECT_SCALE_X:
case QUNIFORM_TEXRECT_SCALE_Y:
cl_u32(&vc4->uniforms,
if (trans->map) {
if (ptrans->usage & PIPE_TRANSFER_WRITE) {
- vc4_store_tiled_image(rsc->bo->map + slice->offset,
+ vc4_store_tiled_image(rsc->bo->map + slice->offset +
+ ptrans->box.z * rsc->cube_map_stride,
slice->stride,
trans->map, ptrans->stride,
slice->tiling, rsc->cpp,
trans->map = malloc(ptrans->stride * ptrans->box.height);
if (usage & PIPE_TRANSFER_READ) {
vc4_load_tiled_image(trans->map, ptrans->stride,
- buf + slice->offset,
+ buf + slice->offset +
+ box->z * rsc->cube_map_stride,
slice->stride,
slice->tiling, rsc->cpp,
&ptrans->box);
return buf + slice->offset +
box->y / util_format_get_blockheight(format) * ptrans->stride +
box->x / util_format_get_blockwidth(format) * rsc->cpp +
- box->z * slice->size;
+ box->z * rsc->cube_map_stride;
}
struct pipe_resource *prsc = &rsc->base.b;
uint32_t width = prsc->width0;
uint32_t height = prsc->height0;
- uint32_t depth = prsc->depth0;
uint32_t offset = 0;
uint32_t utile_w = vc4_utile_width(rsc->cpp);
uint32_t utile_h = vc4_utile_height(rsc->cpp);
slice->stride = level_width * rsc->cpp;
slice->size = level_height * slice->stride;
- /* Note, since we have cubes but no 3D, depth is invariant
- * with miplevel.
- */
- offset += slice->size * depth;
+ offset += slice->size;
}
/* The texture base pointer that has to point to level 0 doesn't have
for (int i = 0; i <= prsc->last_level; i++)
rsc->slices[i].offset += page_align_offset;
}
+
+ /* Cube map faces appear as whole miptrees at a page-aligned offset
+ * from the first face's miptree.
+ */
+ if (prsc->target == PIPE_TEXTURE_CUBE) {
+ rsc->cube_map_stride = align(rsc->slices[0].offset +
+ rsc->slices[0].size, 4096);
+ }
}
static struct vc4_resource *
rsc->bo = vc4_bo_alloc(vc4_screen(pscreen),
rsc->slices[0].offset +
- rsc->slices[0].size * prsc->depth0,
+ rsc->slices[0].size +
+ rsc->cube_map_stride * (prsc->array_size - 1),
"resource");
if (!rsc->bo)
goto fail;
struct drm_gem_cma_object *tex;
uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]);
uint32_t p1 = *(uint32_t *)(uniform_data_u + sample->p_offset[1]);
+ uint32_t p2 = (sample->p_offset[2] != ~0 ?
+ *(uint32_t *)(uniform_data_u + sample->p_offset[2]) : 0);
+ uint32_t p3 = (sample->p_offset[3] != ~0 ?
+ *(uint32_t *)(uniform_data_u + sample->p_offset[3]) : 0);
uint32_t *validated_p0 = exec->uniforms_v + sample->p_offset[0];
uint32_t offset = p0 & ~0xfff;
uint32_t miplevels = (p0 & 15);
uint32_t height = (p1 >> 20) & 2047;
uint32_t cpp, tiling_format, utile_w, utile_h;
uint32_t i;
+ uint32_t cube_map_stride = 0;
enum vc4_texture_data_type type;
if (width == 0)
height = 2048;
if (p0 & (1 << 9)) {
- DRM_ERROR("Cube maps unsupported\n");
- return false;
+ if ((p2 & (3 << 30)) == (1 << 30))
+ cube_map_stride = p2 & 0x3ffff000;
+ if ((p3 & (3 << 30)) == (1 << 30)) {
+ if (cube_map_stride) {
+ DRM_ERROR("Cube map stride set twice\n");
+ return -EINVAL;
+ }
+
+ cube_map_stride = p3 & 0x3ffff000;
+ }
+ if (!cube_map_stride) {
+ DRM_ERROR("Cube map stride not set\n");
+ return -EINVAL;
+ }
}
type = ((p0 >> 4) & 15) | ((p1 >> 31) << 4);
if (!vc4_use_bo(exec, texture_handle_index, VC4_MODE_RENDER, &tex))
return false;
- if (!check_tex_size(exec, tex, offset, tiling_format,
- width, height, cpp)) {
+ if (!check_tex_size(exec, tex, offset + cube_map_stride * 5,
+ tiling_format, width, height, cpp)) {
return false;
}