Previously, we computed sampler counts when generating the SAMPLER_STATE
table. By computing it earlier, we should be able to shorten a bunch of
loops.
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Paul Berry <stereotype441@gmail.com>
/** Upload a SAMPLER_STATE table. */
void (*upload_sampler_state_table)(struct brw_context *brw,
struct gl_program *prog,
/** Upload a SAMPLER_STATE table. */
void (*upload_sampler_state_table)(struct brw_context *brw,
struct gl_program *prog,
- uint32_t *sampler_count,
+ uint32_t sampler_count,
uint32_t *sst_offset,
uint32_t *sdc_offset);
uint32_t *sst_offset,
uint32_t *sdc_offset);
if (ctx->NewState)
_mesa_update_state( ctx );
if (ctx->NewState)
_mesa_update_state( ctx );
+ /* Find the highest sampler unit used by each shader program. A bit-count
+ * won't work since ARB programs use the texture unit number as the sampler
+ * index.
+ */
+ brw->wm.sampler_count = _mesa_fls(ctx->FragmentProgram._Current->Base.SamplersUsed);
+ brw->vs.sampler_count = _mesa_fls(ctx->VertexProgram._Current->Base.SamplersUsed);
+
/* We have to validate the textures *before* checking for fallbacks;
* otherwise, the software fallback won't be able to rely on the
* texture state, the firstLevel and lastLevel fields won't be
/* We have to validate the textures *before* checking for fallbacks;
* otherwise, the software fallback won't be able to rely on the
* texture state, the firstLevel and lastLevel fields won't be
static void
brw_upload_sampler_state_table(struct brw_context *brw,
struct gl_program *prog,
static void
brw_upload_sampler_state_table(struct brw_context *brw,
struct gl_program *prog,
- uint32_t *sampler_count,
+ uint32_t sampler_count,
uint32_t *sst_offset,
uint32_t *sdc_offset)
{
uint32_t *sst_offset,
uint32_t *sdc_offset)
{
GLbitfield SamplersUsed = prog->SamplersUsed;
GLbitfield SamplersUsed = prog->SamplersUsed;
- /* ARB programs use the texture unit number as the sampler index, so we
- * need to find the highest unit used. A bit-count will not work.
- */
- *sampler_count = _mesa_fls(SamplersUsed);
-
- if (*sampler_count == 0)
+ if (sampler_count == 0)
return;
samplers = brw_state_batch(brw, AUB_TRACE_SAMPLER_STATE,
return;
samplers = brw_state_batch(brw, AUB_TRACE_SAMPLER_STATE,
- *sampler_count * sizeof(*samplers),
+ sampler_count * sizeof(*samplers),
- memset(samplers, 0, *sampler_count * sizeof(*samplers));
+ memset(samplers, 0, sampler_count * sizeof(*samplers));
- for (unsigned s = 0; s < *sampler_count; s++) {
+ for (unsigned s = 0; s < sampler_count; s++) {
if (SamplersUsed & (1 << s)) {
const unsigned unit = prog->SamplerUnits[s];
if (ctx->Texture.Unit[unit]._ReallyEnabled)
if (SamplersUsed & (1 << s)) {
const unsigned unit = prog->SamplerUnits[s];
if (ctx->Texture.Unit[unit]._ReallyEnabled)
/* BRW_NEW_FRAGMENT_PROGRAM */
struct gl_program *fs = (struct gl_program *) brw->fragment_program;
brw->vtbl.upload_sampler_state_table(brw, fs,
/* BRW_NEW_FRAGMENT_PROGRAM */
struct gl_program *fs = (struct gl_program *) brw->fragment_program;
brw->vtbl.upload_sampler_state_table(brw, fs,
- &brw->wm.sampler_count,
&brw->wm.sampler_offset,
brw->wm.sdc_offset);
}
&brw->wm.sampler_offset,
brw->wm.sdc_offset);
}
/* BRW_NEW_VERTEX_PROGRAM */
struct gl_program *vs = (struct gl_program *) brw->vertex_program;
brw->vtbl.upload_sampler_state_table(brw, vs,
/* BRW_NEW_VERTEX_PROGRAM */
struct gl_program *vs = (struct gl_program *) brw->vertex_program;
brw->vtbl.upload_sampler_state_table(brw, vs,
- &brw->vs.sampler_count,
&brw->vs.sampler_offset,
brw->vs.sdc_offset);
}
&brw->vs.sampler_offset,
brw->vs.sdc_offset);
}
static void
gen7_upload_sampler_state_table(struct brw_context *brw,
struct gl_program *prog,
static void
gen7_upload_sampler_state_table(struct brw_context *brw,
struct gl_program *prog,
- uint32_t *sampler_count,
+ uint32_t sampler_count,
uint32_t *sst_offset,
uint32_t *sdc_offset)
{
uint32_t *sst_offset,
uint32_t *sdc_offset)
{
GLbitfield SamplersUsed = prog->SamplersUsed;
GLbitfield SamplersUsed = prog->SamplersUsed;
- *sampler_count = _mesa_fls(SamplersUsed);
-
- if (*sampler_count == 0)
+ if (sampler_count == 0)
return;
samplers = brw_state_batch(brw, AUB_TRACE_SAMPLER_STATE,
return;
samplers = brw_state_batch(brw, AUB_TRACE_SAMPLER_STATE,
- *sampler_count * sizeof(*samplers),
+ sampler_count * sizeof(*samplers),
- memset(samplers, 0, *sampler_count * sizeof(*samplers));
+ memset(samplers, 0, sampler_count * sizeof(*samplers));
- for (unsigned s = 0; s < *sampler_count; s++) {
+ for (unsigned s = 0; s < sampler_count; s++) {
if (SamplersUsed & (1 << s)) {
const unsigned unit = prog->SamplerUnits[s];
if (ctx->Texture.Unit[unit]._ReallyEnabled)
if (SamplersUsed & (1 << s)) {
const unsigned unit = prog->SamplerUnits[s];
if (ctx->Texture.Unit[unit]._ReallyEnabled)