This fixes the gl_PointSize transform feedback test.
Point size clamping should happen at the rasterizer stage,
i.e. after the vertex and geometry shaders and transform feedback.
Drivers are expected to do this by themselves.
value[3] = ctx->Point.Threshold;
}
return;
- case STATE_POINT_SIZE_IMPL_CLAMP:
- {
- /* for implementation clamp only in vs */
- GLfloat minImplSize;
- GLfloat maxImplSize;
- if (ctx->Point.PointSprite) {
- minImplSize = ctx->Const.MinPointSizeAA;
- maxImplSize = ctx->Const.MaxPointSize;
- }
- else if (ctx->Point.SmoothFlag || ctx->Multisample._Enabled) {
- minImplSize = ctx->Const.MinPointSizeAA;
- maxImplSize = ctx->Const.MaxPointSizeAA;
- }
- else {
- minImplSize = ctx->Const.MinPointSize;
- maxImplSize = ctx->Const.MaxPointSize;
- }
- value[0] = ctx->Point.Size;
- value[1] = minImplSize;
- value[2] = maxImplSize;
- value[3] = ctx->Point.Threshold;
- }
- return;
case STATE_LIGHT_SPOT_DIR_NORMALIZED:
{
/* here, state[2] is the light number */
case STATE_FOG_PARAMS_OPTIMIZED:
return _NEW_FOG;
case STATE_POINT_SIZE_CLAMPED:
- case STATE_POINT_SIZE_IMPL_CLAMP:
return _NEW_POINT | _NEW_MULTISAMPLE;
case STATE_LIGHT_SPOT_DIR_NORMALIZED:
case STATE_LIGHT_POSITION:
case STATE_POINT_SIZE_CLAMPED:
append(dst, "pointSizeClamped");
break;
- case STATE_POINT_SIZE_IMPL_CLAMP:
- append(dst, "pointSizeImplClamp");
- break;
case STATE_LIGHT_SPOT_DIR_NORMALIZED:
append(dst, "lightSpotDirNormalized");
break;
STATE_TEXRECT_SCALE,
STATE_FOG_PARAMS_OPTIMIZED, /* for faster fog calc */
STATE_POINT_SIZE_CLAMPED, /* includes implementation dependent size clamp */
- STATE_POINT_SIZE_IMPL_CLAMP, /* for implementation clamp only in vs */
STATE_LIGHT_SPOT_DIR_NORMALIZED, /* pre-normalized spot dir */
STATE_LIGHT_POSITION, /* object vs eye space */
STATE_LIGHT_POSITION_NORMALIZED, /* object vs eye space */
struct ureg_src samplers[PIPE_MAX_SAMPLERS];
struct ureg_src systemValues[SYSTEM_VALUE_MAX];
- /* Extra info for handling point size clamping in vertex shader */
- struct ureg_dst pointSizeResult; /**< Actual point size output register */
- struct ureg_src pointSizeConst; /**< Point size range constant register */
- GLint pointSizeOutIndex; /**< Temp point size output register */
- GLboolean prevInstWrotePointSize;
-
const GLuint *inputMapping;
const GLuint *outputMapping;
return t->temps[index];
case PROGRAM_OUTPUT:
- if (t->procType == TGSI_PROCESSOR_VERTEX && index == VERT_RESULT_PSIZ)
- t->prevInstWrotePointSize = GL_TRUE;
-
if (t->procType == TGSI_PROCESSOR_VERTEX)
assert(index < VERT_RESULT_MAX);
else if (t->procType == TGSI_PROCESSOR_FRAGMENT)
t->inputMapping = inputMapping;
t->outputMapping = outputMapping;
t->ureg = ureg;
- t->pointSizeOutIndex = -1;
- t->prevInstWrotePointSize = GL_FALSE;
if (program->shader_program) {
for (i = 0; i < program->shader_program->NumUserUniformStorage; i++) {
outputSemanticName[i],
outputSemanticIndex[i]);
}
- if ((outputSemanticName[i] == TGSI_SEMANTIC_PSIZE) && proginfo->Id) {
- /* Writing to the point size result register requires special
- * handling to implement clamping.
- */
- static const gl_state_index pointSizeClampState[STATE_LENGTH]
- = { STATE_INTERNAL, STATE_POINT_SIZE_IMPL_CLAMP, (gl_state_index)0, (gl_state_index)0, (gl_state_index)0 };
- /* XXX: note we are modifying the incoming shader here! Need to
- * do this before emitting the constant decls below, or this
- * will be missed.
- */
- unsigned pointSizeClampConst =
- _mesa_add_state_reference(proginfo->Parameters,
- pointSizeClampState);
- struct ureg_dst psizregtemp = ureg_DECL_temporary(ureg);
- t->pointSizeConst = ureg_DECL_constant(ureg, pointSizeClampConst);
- t->pointSizeResult = t->outputs[i];
- t->pointSizeOutIndex = i;
- t->outputs[i] = psizregtemp;
- }
}
if (passthrough_edgeflags)
emit_edgeflags(t);
set_insn_start(t, ureg_get_instruction_number(ureg));
compile_tgsi_instruction(t, (glsl_to_tgsi_instruction *)iter.get(),
clamp_color);
-
- if (t->prevInstWrotePointSize && proginfo->Id) {
- /* The previous instruction wrote to the (fake) vertex point size
- * result register. Now we need to clamp that value to the min/max
- * point size range, putting the result into the real point size
- * register.
- * Note that we can't do this easily at the end of program due to
- * possible early return.
- */
- set_insn_start(t, ureg_get_instruction_number(ureg));
- ureg_MAX(t->ureg,
- ureg_writemask(t->outputs[t->pointSizeOutIndex], WRITEMASK_X),
- ureg_src(t->outputs[t->pointSizeOutIndex]),
- ureg_swizzle(t->pointSizeConst, 1,1,1,1));
- ureg_MIN(t->ureg, ureg_writemask(t->pointSizeResult, WRITEMASK_X),
- ureg_src(t->outputs[t->pointSizeOutIndex]),
- ureg_swizzle(t->pointSizeConst, 2,2,2,2));
- }
- t->prevInstWrotePointSize = GL_FALSE;
}
/* Fix up all emitted labels:
struct ureg_src samplers[PIPE_MAX_SAMPLERS];
struct ureg_src systemValues[SYSTEM_VALUE_MAX];
- /* Extra info for handling point size clamping in vertex shader */
- struct ureg_dst pointSizeResult; /**< Actual point size output register */
- struct ureg_src pointSizeConst; /**< Point size range constant register */
- GLint pointSizeOutIndex; /**< Temp point size output register */
- GLboolean prevInstWrotePointSize;
-
const GLuint *inputMapping;
const GLuint *outputMapping;
return t->temps[index];
case PROGRAM_OUTPUT:
- if (t->procType == TGSI_PROCESSOR_VERTEX && index == VERT_RESULT_PSIZ)
- t->prevInstWrotePointSize = GL_TRUE;
-
if (t->procType == TGSI_PROCESSOR_VERTEX)
assert(index < VERT_RESULT_MAX);
else if (t->procType == TGSI_PROCESSOR_FRAGMENT)
t->inputMapping = inputMapping;
t->outputMapping = outputMapping;
t->ureg = ureg;
- t->pointSizeOutIndex = -1;
- t->prevInstWrotePointSize = GL_FALSE;
/*_mesa_print_program(program);*/
t->outputs[i] = ureg_DECL_output( ureg,
outputSemanticName[i],
outputSemanticIndex[i] );
- if ((outputSemanticName[i] == TGSI_SEMANTIC_PSIZE) && program->Id) {
- /* Writing to the point size result register requires special
- * handling to implement clamping.
- */
- static const gl_state_index pointSizeClampState[STATE_LENGTH]
- = { STATE_INTERNAL, STATE_POINT_SIZE_IMPL_CLAMP, 0, 0, 0 };
- /* XXX: note we are modifying the incoming shader here! Need to
- * do this before emitting the constant decls below, or this
- * will be missed:
- */
- unsigned pointSizeClampConst =
- _mesa_add_state_reference(program->Parameters,
- pointSizeClampState);
- struct ureg_dst psizregtemp = ureg_DECL_temporary( ureg );
- t->pointSizeConst = ureg_DECL_constant( ureg, pointSizeClampConst );
- t->pointSizeResult = t->outputs[i];
- t->pointSizeOutIndex = i;
- t->outputs[i] = psizregtemp;
- }
}
if (passthrough_edgeflags)
emit_edgeflags( t, program );
for (i = 0; i < program->NumInstructions; i++) {
set_insn_start( t, ureg_get_instruction_number( ureg ));
compile_instruction( t, &program->Instructions[i], clamp_color );
-
- if (t->prevInstWrotePointSize && program->Id) {
- /* The previous instruction wrote to the (fake) vertex point size
- * result register. Now we need to clamp that value to the min/max
- * point size range, putting the result into the real point size
- * register.
- * Note that we can't do this easily at the end of program due to
- * possible early return.
- */
- set_insn_start( t, ureg_get_instruction_number( ureg ));
- ureg_MAX( t->ureg,
- ureg_writemask(t->outputs[t->pointSizeOutIndex], WRITEMASK_X),
- ureg_src(t->outputs[t->pointSizeOutIndex]),
- ureg_swizzle(t->pointSizeConst, 1,1,1,1));
- ureg_MIN( t->ureg, ureg_writemask(t->pointSizeResult, WRITEMASK_X),
- ureg_src(t->outputs[t->pointSizeOutIndex]),
- ureg_swizzle(t->pointSizeConst, 2,2,2,2));
- }
- t->prevInstWrotePointSize = GL_FALSE;
}
/* Fix up all emitted labels: