extern const struct tnl_pipeline_stage *intel_pipeline[];
-GLboolean
+bool
i830CreateContext(const struct gl_config * mesaVis,
__DRIcontext * driContextPriv,
void *sharedContextPrivate)
struct intel_context *intel = &i830->intel;
struct gl_context *ctx = &intel->ctx;
if (!i830)
- return GL_FALSE;
+ return false;
i830InitVtbl(i830);
i830InitDriverFunctions(&functions);
if (!intelInitContext(intel, __DRI_API_OPENGL, mesaVis, driContextPriv,
sharedContextPrivate, &functions)) {
FREE(i830);
- return GL_FALSE;
+ return false;
}
_math_matrix_ctr(&intel->ViewportMatrix);
_tnl_allow_vertex_fog(ctx, 1);
_tnl_allow_pixel_fog(ctx, 0);
- return GL_TRUE;
+ return true;
}
struct intel_region *depth_region);
/* i830_context.c
*/
-extern GLboolean
+extern bool
i830CreateContext(const struct gl_config * mesaVis,
__DRIcontext * driContextPriv,
void *sharedContextPrivate);
case GL_STENCIL_TEST:
{
- GLboolean hw_stencil = GL_FALSE;
+ bool hw_stencil = false;
if (ctx->DrawBuffer) {
struct intel_renderbuffer *irbStencil
= intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_STENCIL);
* environments are treated identically.
*
* \todo
- * This function should return \c GLboolean. When \c GL_FALSE is returned,
+ * This function should return \c bool. When \c false is returned,
* it means that an environment is selected that the hardware cannot do. This
* is the way the Radeon and R200 drivers work.
*
GLuint args_A[3];
GLuint rgb_shift;
GLuint alpha_shift;
- GLboolean need_factor = 0;
+ bool need_factor = 0;
int i;
unsigned used;
static const GLuint tex_blend_rgb[3] = {
static void
emit_texblend(struct i830_context *i830, GLuint unit, GLuint blendUnit,
- GLboolean last_stage)
+ bool last_stage)
{
struct gl_texture_unit *texUnit = &i830->intel.ctx.Texture.Unit[unit];
GLuint tmp[I830_TEXBLEND_SIZE], tmp_sz;
i830->state.TexBlendWordsUsed[blendUnit] = tmp_sz;
}
- I830_ACTIVESTATE(i830, I830_UPLOAD_TEXBLEND(blendUnit), GL_TRUE);
+ I830_ACTIVESTATE(i830, I830_UPLOAD_TEXBLEND(blendUnit), true);
}
static void
i830->state.TexBlendWordsUsed[unit] = tmp_sz;
}
- I830_ACTIVESTATE(i830, I830_UPLOAD_TEXBLEND(unit), GL_TRUE);
+ I830_ACTIVESTATE(i830, I830_UPLOAD_TEXBLEND(unit), true);
}
void
struct gl_context *ctx = &i830->intel.ctx;
GLuint unit, last_stage = 0, blendunit = 0;
- I830_ACTIVESTATE(i830, I830_UPLOAD_TEXBLEND_ALL, GL_FALSE);
+ I830_ACTIVESTATE(i830, I830_UPLOAD_TEXBLEND_ALL, false);
if (ctx->Texture._EnabledUnits) {
for (unit = 0; unit < ctx->Const.MaxTextureUnits; unit++)
* efficient, but this has gotten complex enough that we need
* something which is understandable and reliable.
*/
-static GLboolean
+static bool
i830_update_tex_unit(struct intel_context *intel, GLuint unit, GLuint ss3)
{
struct gl_context *ctx = &intel->ctx;
}
if (!intel_finalize_mipmap_tree(intel, unit))
- return GL_FALSE;
+ return false;
/* Get first image here, since intelObj->firstLevel will get set in
* the intel_finalize_mipmap_tree() call above.
mipFilt = MIPFILTER_LINEAR;
break;
default:
- return GL_FALSE;
+ return false;
}
if (sampler->MaxAnisotropy > 1.0) {
magFilt = FILTER_LINEAR;
break;
default:
- return GL_FALSE;
+ return false;
}
}
/* 3D textures not available on i830
*/
if (tObj->Target == GL_TEXTURE_3D)
- return GL_FALSE;
+ return false;
state[I830_TEXREG_MCS] = (_3DSTATE_MAP_COORD_SET_CMD |
MAP_UNIT(unit) |
border[1],
border[2]);
- I830_ACTIVESTATE(i830, I830_UPLOAD_TEX(unit), GL_TRUE);
+ I830_ACTIVESTATE(i830, I830_UPLOAD_TEX(unit), true);
/* memcmp was already disabled, but definitely won't work as the
* region might now change and that wouldn't be detected:
*/
I830_STATECHANGE(i830, I830_UPLOAD_TEX(unit));
- return GL_TRUE;
+ return true;
}
i830UpdateTextureState(struct intel_context *intel)
{
struct i830_context *i830 = i830_context(&intel->ctx);
- GLboolean ok = GL_TRUE;
+ bool ok = true;
GLuint i;
for (i = 0; i < I830_TEX_UNITS && ok; i++) {
case 0:{
struct i830_context *i830 = i830_context(&intel->ctx);
if (i830->state.active & I830_UPLOAD_TEX(i))
- I830_ACTIVESTATE(i830, I830_UPLOAD_TEX(i), GL_FALSE);
+ I830_ACTIVESTATE(i830, I830_UPLOAD_TEX(i), false);
if (i830->state.tex_buffer[i] != NULL) {
drm_intel_bo_unreference(i830->state.tex_buffer[i]);
}
case TEXTURE_3D_BIT:
default:
- ok = GL_FALSE;
+ ok = false;
break;
}
}
#define FILE_DEBUG_FLAG DEBUG_STATE
-static GLboolean i830_check_vertex_size(struct intel_context *intel,
- GLuint expected);
+static bool i830_check_vertex_size(struct intel_context *intel,
+ GLuint expected);
#define SZ_TO_HW(sz) ((sz-2)&0x3)
#define EMIT_SZ(sz) (EMIT_1F + (sz) - 1)
/* Pull apart the vertex format registers and figure out how large a
* vertex is supposed to be.
*/
-static GLboolean
+static bool
i830_check_vertex_size(struct intel_context *intel, GLuint expected)
{
struct i830_context *i830 = i830_context(&intel->ctx);
}
if (!colorRegions[0]) {
- FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_TRUE);
+ FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, true);
}
else {
- FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_FALSE);
+ FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, false);
}
/* Check for depth fallback. */
if (irbDepth && irbDepth->region) {
- FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_FALSE);
+ FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, false);
depthRegion = irbDepth->region;
} else if (irbDepth && !irbDepth->region) {
- FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_TRUE);
+ FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, true);
depthRegion = NULL;
} else { /* !irbDepth */
/* No fallback is needed because there is no depth buffer. */
- FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_FALSE);
+ FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, false);
depthRegion = NULL;
}
/* Check for stencil fallback. */
if (irbStencil && irbStencil->region) {
assert(irbStencil->Base.Format == MESA_FORMAT_S8_Z24);
- FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_FALSE);
+ FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, false);
} else if (irbStencil && !irbStencil->region) {
- FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_TRUE);
+ FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, true);
} else { /* !irbStencil */
/* No fallback is needed because there is no stencil buffer. */
- FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_FALSE);
+ FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, false);
}
/* If we have a (packed) stencil buffer attached but no depth buffer,
extern const struct tnl_pipeline_stage *intel_pipeline[];
-GLboolean
+bool
i915CreateContext(int api,
const struct gl_config * mesaVis,
__DRIcontext * driContextPriv,
struct gl_context *ctx = &intel->ctx;
if (!i915)
- return GL_FALSE;
+ return false;
i915InitVtbl(i915);
if (!intelInitContext(intel, api, mesaVis, driContextPriv,
sharedContextPrivate, &functions)) {
FREE(i915);
- return GL_FALSE;
+ return false;
}
_math_matrix_ctr(&intel->ViewportMatrix);
ctx->Const.FragmentProgram.LowInt = ctx->Const.FragmentProgram.HighInt =
ctx->Const.FragmentProgram.MediumInt;
- ctx->FragmentProgram._MaintainTexEnvProgram = GL_TRUE;
+ ctx->FragmentProgram._MaintainTexEnvProgram = true;
/* FINISHME: Are there other options that should be enabled for software
* FINISHME: vertex shaders?
*/
- ctx->ShaderCompilerOptions[MESA_SHADER_VERTEX].EmitCondCodes = GL_TRUE;
+ ctx->ShaderCompilerOptions[MESA_SHADER_VERTEX].EmitCondCodes = true;
struct gl_shader_compiler_options *const fs_options =
& ctx->ShaderCompilerOptions[MESA_SHADER_FRAGMENT];
fs_options->MaxIfDepth = 0;
- fs_options->EmitNoNoise = GL_TRUE;
- fs_options->EmitNoPow = GL_TRUE;
- fs_options->EmitNoMainReturn = GL_TRUE;
- fs_options->EmitNoIndirectInput = GL_TRUE;
- fs_options->EmitNoIndirectOutput = GL_TRUE;
- fs_options->EmitNoIndirectUniform = GL_TRUE;
- fs_options->EmitNoIndirectTemp = GL_TRUE;
+ fs_options->EmitNoNoise = true;
+ fs_options->EmitNoPow = true;
+ fs_options->EmitNoMainReturn = true;
+ fs_options->EmitNoIndirectInput = true;
+ fs_options->EmitNoIndirectOutput = true;
+ fs_options->EmitNoIndirectUniform = true;
+ fs_options->EmitNoIndirectTemp = true;
ctx->Const.MaxDrawBuffers = 1;
_tnl_allow_vertex_fog(ctx, 0);
_tnl_allow_pixel_fog(ctx, 1);
- return GL_TRUE;
+ return true;
}
{
struct gl_fragment_program FragProg;
- GLboolean translated;
- GLboolean params_uptodate;
- GLboolean on_hardware;
- GLboolean error; /* If program is malformed for any reason. */
+ bool translated;
+ bool params_uptodate;
+ bool on_hardware;
+ bool error; /* If program is malformed for any reason. */
/** Record of which phases R registers were last written in. */
GLuint register_phases[16];
/* Helpers for i915_fragprog.c:
*/
GLuint wpos_tex;
- GLboolean depth_written;
+ bool depth_written;
struct
{
/*======================================================================
* i915_context.c
*/
-extern GLboolean i915CreateContext(int api,
- const struct gl_config * mesaVis,
- __DRIcontext * driContextPriv,
- void *sharedContextPrivate);
+extern bool i915CreateContext(int api,
+ const struct gl_config * mesaVis,
+ __DRIcontext * driContextPriv,
+ void *sharedContextPrivate);
/*======================================================================
#include "i915_context.h"
#include "i915_debug.h"
-static GLboolean debug( struct debug_stream *stream, const char *name, GLuint len )
+static bool
+debug(struct debug_stream *stream, const char *name, GLuint len)
{
GLuint i;
GLuint *ptr = (GLuint *)(stream->ptr + stream->offset);
if (len == 0) {
printf("Error - zero length packet (0x%08x)\n", stream->ptr[0]);
assert(0);
- return GL_FALSE;
+ return false;
}
if (stream->print_addresses)
stream->offset += len * sizeof(GLuint);
- return GL_TRUE;
+ return true;
}
}
}
-static GLboolean debug_prim( struct debug_stream *stream, const char *name,
- GLboolean dump_floats,
- GLuint len )
+static bool
+debug_prim(struct debug_stream *stream, const char *name,
+ bool dump_floats, GLuint len)
{
GLuint *ptr = (GLuint *)(stream->ptr + stream->offset);
const char *prim = get_prim_name( ptr[0] );
stream->offset += len * sizeof(GLuint);
- return GL_TRUE;
+ return true;
}
-static GLboolean debug_program( struct debug_stream *stream, const char *name, GLuint len )
+static bool
+debug_program(struct debug_stream *stream, const char *name, GLuint len)
{
GLuint *ptr = (GLuint *)(stream->ptr + stream->offset);
if (len == 0) {
printf("Error - zero length packet (0x%08x)\n", stream->ptr[0]);
assert(0);
- return GL_FALSE;
+ return false;
}
if (stream->print_addresses)
i915_disassemble_program( ptr, len );
stream->offset += len * sizeof(GLuint);
- return GL_TRUE;
+ return true;
}
-static GLboolean debug_chain( struct debug_stream *stream, const char *name, GLuint len )
+static bool
+debug_chain(struct debug_stream *stream, const char *name, GLuint len)
{
GLuint *ptr = (GLuint *)(stream->ptr + stream->offset);
GLuint old_offset = stream->offset + len * sizeof(GLuint);
old_offset, stream->offset );
- return GL_TRUE;
+ return true;
}
-static GLboolean debug_variable_length_prim( struct debug_stream *stream )
+static bool
+debug_variable_length_prim(struct debug_stream *stream)
{
GLuint *ptr = (GLuint *)(stream->ptr + stream->offset);
const char *prim = get_prim_name( ptr[0] );
printf("\n");
stream->offset += len * sizeof(GLuint);
- return GL_TRUE;
+ return true;
}
} \
} while (0)
-static GLboolean debug_load_immediate( struct debug_stream *stream,
- const char *name,
- GLuint len )
+static bool
+debug_load_immediate(struct debug_stream *stream, const char *name, GLuint len)
{
GLuint *ptr = (GLuint *)(stream->ptr + stream->offset);
GLuint bits = (ptr[0] >> 4) & 0xff;
stream->offset += len * sizeof(GLuint);
- return GL_TRUE;
+ return true;
}
-static GLboolean debug_load_indirect( struct debug_stream *stream,
- const char *name,
- GLuint len )
+static bool
+debug_load_indirect(struct debug_stream *stream, const char *name, GLuint len)
{
GLuint *ptr = (GLuint *)(stream->ptr + stream->offset);
GLuint bits = (ptr[0] >> 8) & 0x3f;
stream->offset += len * sizeof(GLuint);
- return GL_TRUE;
+ return true;
}
static void BR13( struct debug_stream *stream,
printf("\t0x%08x -- color\n", val);
}
-static GLboolean debug_copy_blit( struct debug_stream *stream,
- const char *name,
- GLuint len )
+static bool
+debug_copy_blit(struct debug_stream *stream, const char *name, GLuint len)
{
GLuint *ptr = (GLuint *)(stream->ptr + stream->offset);
int j = 0;
stream->offset += len * sizeof(GLuint);
assert(j == len);
- return GL_TRUE;
+ return true;
}
-static GLboolean debug_color_blit( struct debug_stream *stream,
- const char *name,
- GLuint len )
+static bool
+debug_color_blit(struct debug_stream *stream, const char *name, GLuint len)
{
GLuint *ptr = (GLuint *)(stream->ptr + stream->offset);
int j = 0;
stream->offset += len * sizeof(GLuint);
assert(j == len);
- return GL_TRUE;
+ return true;
}
-static GLboolean debug_modes4( struct debug_stream *stream,
- const char *name,
- GLuint len )
+static bool
+debug_modes4(struct debug_stream *stream, const char *name, GLuint len)
{
GLuint *ptr = (GLuint *)(stream->ptr + stream->offset);
int j = 0;
stream->offset += len * sizeof(GLuint);
assert(j == len);
- return GL_TRUE;
+ return true;
}
-static GLboolean debug_map_state( struct debug_stream *stream,
- const char *name,
- GLuint len )
+static bool
+debug_map_state(struct debug_stream *stream, const char *name, GLuint len)
{
GLuint *ptr = (GLuint *)(stream->ptr + stream->offset);
int j = 0;
stream->offset += len * sizeof(GLuint);
assert(j == len);
- return GL_TRUE;
+ return true;
}
-static GLboolean debug_sampler_state( struct debug_stream *stream,
- const char *name,
- GLuint len )
+static bool
+debug_sampler_state(struct debug_stream *stream, const char *name, GLuint len)
{
GLuint *ptr = (GLuint *)(stream->ptr + stream->offset);
int j = 0;
stream->offset += len * sizeof(GLuint);
assert(j == len);
- return GL_TRUE;
+ return true;
}
-static GLboolean debug_dest_vars( struct debug_stream *stream,
- const char *name,
- GLuint len )
+static bool
+debug_dest_vars(struct debug_stream *stream, const char *name, GLuint len)
{
GLuint *ptr = (GLuint *)(stream->ptr + stream->offset);
int j = 0;
stream->offset += len * sizeof(GLuint);
assert(j == len);
- return GL_TRUE;
+ return true;
}
-static GLboolean debug_buf_info( struct debug_stream *stream,
- const char *name,
- GLuint len )
+static bool
+debug_buf_info(struct debug_stream *stream, const char *name, GLuint len)
{
GLuint *ptr = (GLuint *)(stream->ptr + stream->offset);
int j = 0;
stream->offset += len * sizeof(GLuint);
assert(j == len);
- return GL_TRUE;
+ return true;
}
-static GLboolean i915_debug_packet( struct debug_stream *stream )
+static bool
+i915_debug_packet(struct debug_stream *stream)
{
GLuint *ptr = (GLuint *)(stream->ptr + stream->offset);
GLuint cmd = *ptr;
return debug(stream, "MI_FLUSH", 1);
case 0xA:
debug(stream, "MI_BATCH_BUFFER_END", 1);
- return GL_FALSE;
+ return false;
case 0x22:
return debug(stream, "MI_LOAD_REGISTER_IMM", 3);
case 0x31:
{
struct debug_stream stream;
GLuint bytes = (end - start) * 4;
- GLboolean done = GL_FALSE;
+ bool done = false;
printf("\n\nBATCH: (%d)\n", bytes / 4);
return !p->error;
}
else
- return GL_TRUE;
+ return true;
}
static GLboolean
(void) _tnl_program_string(ctx, target, prog);
/* XXX check if program is legal, within limits */
- return GL_TRUE;
+ return true;
}
void
* efficient, but this has gotten complex enough that we need
* something which is understandable and reliable.
*/
-static GLboolean
+static bool
i915_update_tex_unit(struct intel_context *intel, GLuint unit, GLuint ss3)
{
struct gl_context *ctx = &intel->ctx;
}
if (!intel_finalize_mipmap_tree(intel, unit))
- return GL_FALSE;
+ return false;
/* Get first image here, since intelObj->firstLevel will get set in
* the intel_finalize_mipmap_tree() call above.
mipFilt = MIPFILTER_LINEAR;
break;
default:
- return GL_FALSE;
+ return false;
}
if (sampler->MaxAnisotropy > 1.0) {
magFilt = FILTER_LINEAR;
break;
default:
- return GL_FALSE;
+ return false;
}
}
if (sampler->CompareMode == GL_COMPARE_R_TO_TEXTURE_ARB &&
tObj->Target != GL_TEXTURE_3D) {
if (tObj->Target == GL_TEXTURE_1D)
- return GL_FALSE;
+ return false;
state[I915_TEXREG_SS2] |=
(SS2_SHADOW_ENABLE |
wr == GL_CLAMP ||
ws == GL_CLAMP_TO_BORDER ||
wt == GL_CLAMP_TO_BORDER || wr == GL_CLAMP_TO_BORDER))
- return GL_FALSE;
+ return false;
/* Only support TEXCOORDMODE_CLAMP_EDGE and TEXCOORDMODE_CUBE (not
* used) when using cube map texture coordinates
if (tObj->Target == GL_TEXTURE_CUBE_MAP_ARB &&
(((ws != GL_CLAMP) && (ws != GL_CLAMP_TO_EDGE)) ||
((wt != GL_CLAMP) && (wt != GL_CLAMP_TO_EDGE))))
- return GL_FALSE;
+ return false;
state[I915_TEXREG_SS3] = ss3; /* SS3_NORMALIZED_COORDS */
}
- I915_ACTIVESTATE(i915, I915_UPLOAD_TEX(unit), GL_TRUE);
+ I915_ACTIVESTATE(i915, I915_UPLOAD_TEX(unit), true);
/* memcmp was already disabled, but definitely won't work as the
* region might now change and that wouldn't be detected:
*/
DBG(TEXTURE, "state[I915_TEXREG_MS4] = 0x%x\n", state[I915_TEXREG_MS4]);
#endif
- return GL_TRUE;
+ return true;
}
void
i915UpdateTextureState(struct intel_context *intel)
{
- GLboolean ok = GL_TRUE;
+ bool ok = true;
GLuint i;
for (i = 0; i < I915_TEX_UNITS && ok; i++) {
case 0:{
struct i915_context *i915 = i915_context(&intel->ctx);
if (i915->state.active & I915_UPLOAD_TEX(i))
- I915_ACTIVESTATE(i915, I915_UPLOAD_TEX(i), GL_FALSE);
+ I915_ACTIVESTATE(i915, I915_UPLOAD_TEX(i), false);
if (i915->state.tex_buffer[i] != NULL) {
drm_intel_bo_unreference(i915->state.tex_buffer[i]);
break;
}
default:
- ok = GL_FALSE;
+ ok = false;
break;
}
}
/* Pull apart the vertex format registers and figure out how large a
* vertex is supposed to be.
*/
-static GLboolean
+static bool
i915_check_vertex_size(struct intel_context *intel, GLuint expected)
{
struct i915_context *i915 = i915_context(&intel->ctx);
break;
default:
fprintf(stderr, "bad texcoord fmt %d\n", i);
- return GL_FALSE;
+ return false;
}
lis2 >>= S2_TEXCOORD_FMT1_SHIFT;
}
/* Check for depth fallback. */
if (irbDepth && irbDepth->region) {
- FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_FALSE);
+ FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, false);
depthRegion = irbDepth->region;
} else if (irbDepth && !irbDepth->region) {
- FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_TRUE);
+ FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, true);
depthRegion = NULL;
} else { /* !irbDepth */
/* No fallback is needed because there is no depth buffer. */
- FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_FALSE);
+ FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, false);
depthRegion = NULL;
}
/* Check for stencil fallback. */
if (irbStencil && irbStencil->region) {
assert(irbStencil->Base.Format == MESA_FORMAT_S8_Z24);
- FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_FALSE);
+ FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, false);
} else if (irbStencil && !irbStencil->region) {
- FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_TRUE);
+ FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, true);
} else { /* !irbStencil */
/* No fallback is needed because there is no stencil buffer. */
- FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_FALSE);
+ FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, false);
}
/* If we have a (packed) stencil buffer attached but no depth buffer,
/* Heuristic to choose between the two render paths:
*/
-static GLboolean
+static bool
choose_render(struct intel_context *intel, struct vertex_buffer *VB)
{
int vertsz = intel->vertex_size;
cost_render, cost_fallback);
if (cost_render > cost_fallback)
- return GL_FALSE;
+ return false;
- return GL_TRUE;
+ return true;
}
*/
if (intel->RenderIndex != 0 ||
!intel_validate_render(ctx, VB) || !choose_render(intel, VB)) {
- return GL_TRUE;
+ return true;
}
tnl->clipspace.new_inputs |= VERT_BIT_POS;
INTEL_FIREVERTICES(intel);
- return GL_FALSE; /* finished the pipe */
+ return false; /* finished the pipe */
}
static const struct tnl_pipeline_stage _intel_render_stage = {
intel->vtbl.emit_state(intel);
- intel->no_batch_wrap = GL_TRUE;
+ intel->no_batch_wrap = true;
/*printf("%s *", __progname);*/
OUT_BATCH(0);
ADVANCE_BATCH();
- intel->no_batch_wrap = GL_FALSE;
+ intel->no_batch_wrap = false;
/* printf(">"); */
}
* depends on the state just emitted. emit_state should be making sure we
* have the space for this.
*/
- intel->no_batch_wrap = GL_TRUE;
+ intel->no_batch_wrap = true;
#if 0
printf("emitting %d..%d=%d vertices size %d\n", offset,
ADVANCE_BATCH();
}
- intel->no_batch_wrap = GL_FALSE;
+ intel->no_batch_wrap = false;
drm_intel_bo_unreference(vb_bo);
}
struct intel_context *intel = intel_context(ctx); \
GLuint color[n] = { 0, }, spec[n] = { 0, }; \
GLuint coloroffset = intel->coloroffset; \
- GLboolean specoffset = intel->specoffset; \
+ bool specoffset = intel->specoffset; \
(void) color; (void) spec; (void) coloroffset; (void) specoffset;
struct intel_context *intel = intel_context(ctx);
GLuint flags = ctx->_TriangleCaps;
const struct gl_fragment_program *fprog = ctx->FragmentProgram._Current;
- GLboolean have_wpos = (fprog && (fprog->Base.InputsRead & FRAG_BIT_WPOS));
+ bool have_wpos = (fprog && (fprog->Base.InputsRead & FRAG_BIT_WPOS));
GLuint index = 0;
if (INTEL_DEBUG & DEBUG_STATE)
* \param bit one of INTEL_FALLBACK_x flags.
*/
void
-intelFallback(struct intel_context *intel, GLbitfield bit, GLboolean mode)
+intelFallback(struct intel_context *intel, GLbitfield bit, bool mode)
{
struct gl_context *ctx = &intel->ctx;
TNLcontext *tnl = TNL_CONTEXT(ctx);
GLuint first_tmp;
GLuint last_tmp;
- GLboolean need_direction;
+ bool need_direction;
struct brw_vue_map vue_map;
};
struct brw_indirect v0_ptr, /* from */
struct brw_indirect v1_ptr, /* to */
struct brw_reg t0,
- GLboolean force_edgeflag );
+ bool force_edgeflag );
void brw_clip_init_planes( struct brw_clip_compile *c );
void brw_clip_emit_vue(struct brw_clip_compile *c,
struct brw_indirect vert,
- GLboolean allocate,
- GLboolean eot,
+ bool allocate,
+ bool eot,
GLuint header);
void brw_clip_kill_thread(struct brw_clip_compile *c);
brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_L, c->reg.t, brw_imm_f(1.0));
brw_IF(p, BRW_EXECUTE_1);
{
- brw_clip_interp_vertex(c, newvtx0, vtx0, vtx1, c->reg.t0, GL_FALSE);
- brw_clip_interp_vertex(c, newvtx1, vtx1, vtx0, c->reg.t1, GL_FALSE);
+ brw_clip_interp_vertex(c, newvtx0, vtx0, vtx1, c->reg.t0, false);
+ brw_clip_interp_vertex(c, newvtx1, vtx1, vtx0, c->reg.t1, false);
brw_clip_emit_vue(c, newvtx0, 1, 0, (_3DPRIM_LINESTRIP << 2) | R02_PRIM_START);
brw_clip_emit_vue(c, newvtx1, 0, 1, (_3DPRIM_LINESTRIP << 2) | R02_PRIM_END);
brw_MOV(p, get_addr_reg(vtxOut), get_addr_reg(vtxPrev) );
brw_set_predicate_control(p, BRW_PREDICATE_NONE);
- brw_clip_interp_vertex(c, vtxOut, vtxPrev, vtx, c->reg.t, GL_FALSE);
+ brw_clip_interp_vertex(c, vtxOut, vtxPrev, vtx, c->reg.t, false);
/* *outlist_ptr++ = vtxOut;
* nr_verts++;
brw_MOV(p, get_addr_reg(vtxOut), get_addr_reg(vtx) );
brw_set_predicate_control(p, BRW_PREDICATE_NONE);
- brw_clip_interp_vertex(c, vtxOut, vtx, vtxPrev, c->reg.t, GL_TRUE);
+ brw_clip_interp_vertex(c, vtxOut, vtx, vtxPrev, c->reg.t, true);
/* *outlist_ptr++ = vtxOut;
* nr_verts++;
* Output clipped polygon as an unfilled primitive:
*/
static void emit_lines(struct brw_clip_compile *c,
- GLboolean do_offset)
+ bool do_offset)
{
struct brw_compile *p = &c->func;
struct brw_instruction *loop;
static void emit_points(struct brw_clip_compile *c,
- GLboolean do_offset )
+ bool do_offset )
{
struct brw_compile *p = &c->func;
struct brw_instruction *loop;
static void emit_primitives( struct brw_clip_compile *c,
GLuint mode,
- GLboolean do_offset )
+ bool do_offset )
{
switch (mode) {
case CLIP_FILL:
struct brw_indirect v0_ptr, /* from */
struct brw_indirect v1_ptr, /* to */
struct brw_reg t0,
- GLboolean force_edgeflag)
+ bool force_edgeflag)
{
struct brw_compile *p = &c->func;
struct brw_reg tmp = get_tmp(c);
void brw_clip_emit_vue(struct brw_clip_compile *c,
struct brw_indirect vert,
- GLboolean allocate,
- GLboolean eot,
+ bool allocate,
+ bool eot,
GLuint header)
{
struct brw_compile *p = &c->func;
brw_init_queryobj_functions(functions);
}
-GLboolean brwCreateContext( int api,
- const struct gl_config *mesaVis,
- __DRIcontext *driContextPriv,
- void *sharedContextPrivate)
+bool
+brwCreateContext(int api,
+ const struct gl_config *mesaVis,
+ __DRIcontext *driContextPriv,
+ void *sharedContextPrivate)
{
struct dd_function_table functions;
struct brw_context *brw = rzalloc(NULL, struct brw_context);
if (!brw) {
printf("%s: failed to alloc context\n", __FUNCTION__);
- return GL_FALSE;
+ return false;
}
brwInitVtbl( brw );
sharedContextPrivate, &functions )) {
printf("%s: failed to init intel context\n", __FUNCTION__);
FREE(brw);
- return GL_FALSE;
+ return false;
}
/* Initialize swrast, tnl driver tables: */
/* We want the GLSL compiler to emit code that uses condition codes */
for (i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
- ctx->ShaderCompilerOptions[i].EmitCondCodes = GL_TRUE;
- ctx->ShaderCompilerOptions[i].EmitNVTempInitialization = GL_TRUE;
- ctx->ShaderCompilerOptions[i].EmitNoNoise = GL_TRUE;
- ctx->ShaderCompilerOptions[i].EmitNoMainReturn = GL_TRUE;
- ctx->ShaderCompilerOptions[i].EmitNoIndirectInput = GL_TRUE;
- ctx->ShaderCompilerOptions[i].EmitNoIndirectOutput = GL_TRUE;
+ ctx->ShaderCompilerOptions[i].EmitCondCodes = true;
+ ctx->ShaderCompilerOptions[i].EmitNVTempInitialization = true;
+ ctx->ShaderCompilerOptions[i].EmitNoNoise = true;
+ ctx->ShaderCompilerOptions[i].EmitNoMainReturn = true;
+ ctx->ShaderCompilerOptions[i].EmitNoIndirectInput = true;
+ ctx->ShaderCompilerOptions[i].EmitNoIndirectOutput = true;
ctx->ShaderCompilerOptions[i].EmitNoIndirectUniform =
(i == MESA_SHADER_FRAGMENT);
ctx->ShaderCompilerOptions[i].EmitNoIndirectTemp =
(i == MESA_SHADER_FRAGMENT);
- ctx->ShaderCompilerOptions[i].LowerClipDistance = GL_TRUE;
+ ctx->ShaderCompilerOptions[i].LowerClipDistance = true;
}
ctx->Const.VertexProgram.MaxNativeInstructions = (16 * 1024);
that affect provoking vertex decision. Always use last vertex
convention for quad primitive which works as expected for now. */
if (intel->gen >= 6)
- ctx->Const.QuadsFollowProvokingVertexConvention = GL_FALSE;
+ ctx->Const.QuadsFollowProvokingVertexConvention = false;
if (intel->is_g4x || intel->gen >= 5) {
brw->CMD_VF_STATISTICS = GM45_3DSTATE_VF_STATISTICS;
brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_GM45;
- brw->has_surface_tile_offset = GL_TRUE;
+ brw->has_surface_tile_offset = true;
if (intel->gen < 6)
- brw->has_compr4 = GL_TRUE;
- brw->has_aa_line_parameters = GL_TRUE;
- brw->has_pln = GL_TRUE;
+ brw->has_compr4 = true;
+ brw->has_aa_line_parameters = true;
+ brw->has_pln = true;
} else {
brw->CMD_VF_STATISTICS = GEN4_3DSTATE_VF_STATISTICS;
brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_965;
brw->urb.size = 256;
brw->vs_max_threads = 16;
brw->wm_max_threads = 8 * 4;
- brw->has_negative_rhw_bug = GL_TRUE;
+ brw->has_negative_rhw_bug = true;
}
if (INTEL_DEBUG & DEBUG_SINGLE_THREAD) {
intel->batch.need_workaround_flush = true;
- ctx->VertexProgram._MaintainTnlProgram = GL_TRUE;
- ctx->FragmentProgram._MaintainTexEnvProgram = GL_TRUE;
+ ctx->VertexProgram._MaintainTnlProgram = true;
+ ctx->FragmentProgram._MaintainTexEnvProgram = true;
brw_draw_init( brw );
ctx->Const.UniformBooleanTrue = 1;
}
- return GL_TRUE;
+ return true;
}
struct brw_vertex_program {
struct gl_vertex_program program;
GLuint id;
- GLboolean use_const_buffer;
+ bool use_const_buffer;
};
GLuint nr_params; /**< number of float params/constants */
GLuint nr_pull_params;
- GLboolean error;
+ bool error;
int dispatch_width;
uint32_t prog_offset_16;
struct intel_context intel; /**< base class, must be first field */
GLuint primitive; /**< Hardware primitive, such as _3DPRIM_TRILIST. */
- GLboolean emit_state_always;
- GLboolean has_surface_tile_offset;
- GLboolean has_compr4;
- GLboolean has_negative_rhw_bug;
- GLboolean has_aa_line_parameters;
- GLboolean has_pln;
- GLboolean new_vs_backend;
+ bool emit_state_always;
+ bool has_surface_tile_offset;
+ bool has_compr4;
+ bool has_negative_rhw_bug;
+ bool has_aa_line_parameters;
+ bool has_pln;
+ bool new_vs_backend;
struct {
struct brw_state_flags dirty;
GLuint csize; /* constant buffer size in urb registers */
GLuint sfsize; /* setup data size in urb registers */
- GLboolean constrained;
+ bool constrained;
GLuint max_vs_entries; /* Maximum number of VS entries */
GLuint max_gs_entries; /* Maximum number of GS entries */
struct {
struct brw_gs_prog_data *prog_data;
- GLboolean prog_active;
+ bool prog_active;
/** Offset in the program cache to the CLIP program pre-gen6 */
uint32_t prog_offset;
uint32_t state_offset;
struct brw_query_object *obj;
drm_intel_bo *bo;
int index;
- GLboolean active;
+ bool active;
} query;
/* Used to give every program string a unique id
*/
char *name;
int nsrc;
int ndst;
- GLboolean is_arith;
+ bool is_arith;
};
extern const struct brw_instruction_info brw_opcodes[128];
/*======================================================================
* brw_context.c
*/
-GLboolean brwCreateContext( int api,
- const struct gl_config *mesaVis,
- __DRIcontext *driContextPriv,
- void *sharedContextPrivate);
+bool brwCreateContext(int api,
+ const struct gl_config *mesaVis,
+ __DRIcontext *driContextPriv,
+ void *sharedContextPrivate);
/*======================================================================
* brw_queryobj.c
return brw->cache.bo->offset + prog_offset;
}
-GLboolean brw_do_cubemap_normalize(struct exec_list *instructions);
+bool brw_do_cubemap_normalize(struct exec_list *instructions);
#endif
extern "C" {
-GLboolean
+bool
brw_do_cubemap_normalize(exec_list *instructions)
{
brw_cubemap_normalize_visitor v;
/* May fail if out of video memory for texture or vbo upload, or on
* fallback conditions.
*/
-static GLboolean brw_try_draw_prims( struct gl_context *ctx,
+static bool brw_try_draw_prims( struct gl_context *ctx,
const struct gl_client_array *arrays[],
const struct _mesa_prim *prim,
GLuint nr_prims,
{
struct intel_context *intel = intel_context(ctx);
struct brw_context *brw = brw_context(ctx);
- GLboolean retval = GL_FALSE;
- GLboolean warn = GL_FALSE;
+ bool retval = false;
+ bool warn = false;
GLuint i;
if (ctx->NewState)
*/
if (dri_bufmgr_check_aperture_space(brw->state.validated_bos,
brw->state.validated_bo_count)) {
- static GLboolean warned;
+ static bool warned;
intel_batchbuffer_flush(intel);
/* Validate the state after we flushed the batch (which would have
if (!warned &&
dri_bufmgr_check_aperture_space(brw->state.validated_bos,
brw->state.validated_bo_count)) {
- warn = GL_TRUE;
- warned = GL_TRUE;
+ warn = true;
+ warned = true;
}
}
- intel->no_batch_wrap = GL_TRUE;
+ intel->no_batch_wrap = true;
brw_upload_state(brw);
}
else
brw_emit_prim(brw, &prim[i], brw->primitive);
- intel->no_batch_wrap = GL_FALSE;
+ intel->no_batch_wrap = false;
- retval = GL_TRUE;
+ retval = true;
}
if (intel->always_flush_batch)
GLuint min_index,
GLuint max_index )
{
- GLboolean retval;
+ bool retval;
if (!_mesa_check_conditional_render(ctx))
return;
* Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays.
*/
static GLuint get_surface_type( GLenum type, GLuint size,
- GLenum format, GLboolean normalized )
+ GLenum format, bool normalized )
{
if (unlikely(INTEL_DEBUG & DEBUG_VERTS))
printf("type %s size %d normalized %d\n",
* isn't an issue at this point.
*/
if (brw->vb.nr_enabled >= BRW_VEP_MAX) {
- intel->Fallback = GL_TRUE; /* boolean, not bitfield */
+ intel->Fallback = true; /* boolean, not bitfield */
return;
}
/* Position array not properly enabled:
*/
if (input->attrib == VERT_ATTRIB_POS && glarray->StrideB == 0) {
- intel->Fallback = GL_TRUE; /* boolean, not bitfield */
+ intel->Fallback = true; /* boolean, not bitfield */
return;
}
struct brw_instruction *current;
GLuint flag_value;
- GLboolean single_program_flow;
+ bool single_program_flow;
bool compressed;
struct brw_context *brw;
}
/** Do two brw_regs refer to the same register? */
-static INLINE GLboolean
+static INLINE bool
brw_same_reg(struct brw_reg r1, struct brw_reg r2)
{
return r1.file == r2.file && r1.nr == r2.nr;
GLuint msg_control,
GLuint msg_type,
GLuint msg_length,
- GLboolean header_present,
+ bool header_present,
GLuint pixel_scoreboard_clear,
GLuint response_length,
GLuint end_of_thread,
struct brw_reg dest,
GLuint msg_reg_nr,
struct brw_reg src0,
- GLboolean allocate,
- GLboolean used,
+ bool allocate,
+ bool used,
GLuint msg_length,
GLuint response_length,
- GLboolean eot,
- GLboolean writes_complete,
+ bool eot,
+ bool writes_complete,
GLuint offset,
GLuint swizzle);
struct brw_reg dest,
GLuint msg_reg_nr,
struct brw_reg src0,
- GLboolean allocate,
+ bool allocate,
GLuint response_length,
- GLboolean eot);
+ bool eot);
void brw_fb_WRITE(struct brw_compile *p,
int dispatch_width,
GLuint binding_table_index,
GLuint msg_length,
GLuint response_length,
- GLboolean eot,
- GLboolean header_present);
+ bool eot,
+ bool header_present);
void brw_SAMPLE(struct brw_compile *p,
struct brw_reg dest,
GLuint msg_type,
GLuint response_length,
GLuint msg_length,
- GLboolean eot,
+ bool eot,
GLuint header_present,
GLuint simd_mode);
struct brw_instruction *insn,
GLuint function,
GLuint integer_type,
- GLboolean low_precision,
- GLboolean saturate,
+ bool low_precision,
+ bool saturate,
GLuint dataType )
{
struct brw_context *brw = p->brw;
static void brw_set_ff_sync_message(struct brw_compile *p,
struct brw_instruction *insn,
- GLboolean allocate,
+ bool allocate,
GLuint response_length,
- GLboolean end_of_thread)
+ bool end_of_thread)
{
struct brw_context *brw = p->brw;
struct intel_context *intel = &brw->intel;
static void brw_set_urb_message( struct brw_compile *p,
struct brw_instruction *insn,
- GLboolean allocate,
- GLboolean used,
+ bool allocate,
+ bool used,
GLuint msg_length,
GLuint response_length,
- GLboolean end_of_thread,
- GLboolean complete,
+ bool end_of_thread,
+ bool complete,
GLuint offset,
GLuint swizzle_control )
{
GLuint msg_control,
GLuint msg_type,
GLuint msg_length,
- GLboolean header_present,
+ bool header_present,
GLuint pixel_scoreboard_clear,
GLuint response_length,
GLuint end_of_thread,
GLuint msg_type,
GLuint response_length,
GLuint msg_length,
- GLboolean eot,
+ bool eot,
GLuint header_present,
GLuint simd_mode)
{
msg_control,
msg_type,
mlen,
- GL_TRUE, /* header_present */
+ true, /* header_present */
0, /* pixel scoreboard */
send_commit_msg, /* response_length */
0, /* eot */
GLuint binding_table_index,
GLuint msg_length,
GLuint response_length,
- GLboolean eot,
- GLboolean header_present)
+ bool eot,
+ bool header_present)
{
struct intel_context *intel = &p->brw->intel;
struct brw_instruction *insn;
GLuint msg_type,
GLuint response_length,
GLuint msg_length,
- GLboolean eot,
+ bool eot,
GLuint header_present,
GLuint simd_mode)
{
struct intel_context *intel = &p->brw->intel;
- GLboolean need_stall = 0;
+ bool need_stall = 0;
if (writemask == 0) {
/*printf("%s: zero writemask??\n", __FUNCTION__); */
/* printf("need stall %x %x\n", newmask , writemask); */
}
else {
- GLboolean dispatch_16 = GL_FALSE;
+ bool dispatch_16 = false;
struct brw_reg m1 = brw_message_reg(msg_reg_nr);
guess_execution_size(p, p->current, dest);
if (p->current->header.execution_size == BRW_EXECUTE_16)
- dispatch_16 = GL_TRUE;
+ dispatch_16 = true;
newmask = ~newmask & WRITEMASK_XYZW;
struct brw_reg dest,
GLuint msg_reg_nr,
struct brw_reg src0,
- GLboolean allocate,
- GLboolean used,
+ bool allocate,
+ bool used,
GLuint msg_length,
GLuint response_length,
- GLboolean eot,
- GLboolean writes_complete,
+ bool eot,
+ bool writes_complete,
GLuint offset,
GLuint swizzle)
{
struct brw_reg dest,
GLuint msg_reg_nr,
struct brw_reg src0,
- GLboolean allocate,
+ bool allocate,
GLuint response_length,
- GLboolean eot)
+ bool eot)
{
struct intel_context *intel = &p->brw->intel;
struct brw_instruction *insn;
#define FILE_DEBUG_FLAG DEBUG_FALLBACKS
-static GLboolean do_check_fallback(struct brw_context *brw)
+static bool do_check_fallback(struct brw_context *brw)
{
struct gl_context *ctx = &brw->intel.ctx;
GLuint i;
if (brw->intel.no_rast) {
DBG("FALLBACK: rasterization disabled\n");
- return GL_TRUE;
+ return true;
}
/* _NEW_RENDERMODE
*/
if (ctx->RenderMode != GL_RENDER) {
DBG("FALLBACK: render mode\n");
- return GL_TRUE;
+ return true;
}
/* _NEW_TEXTURE:
struct gl_texture_image *texImage = tex_obj->Image[0][tex_obj->BaseLevel];
if (texImage->Border) {
DBG("FALLBACK: texture border\n");
- return GL_TRUE;
+ return true;
}
}
}
- return GL_FALSE;
+ return false;
}
static void check_fallback(struct brw_context *brw)
* field is treated as a boolean, not a bitmask. It's only set in a
* couple of places.
*/
-void intelFallback( struct intel_context *intel, GLuint bit, GLboolean mode )
+void intelFallback( struct intel_context *intel, GLuint bit, bool mode )
{
}
fs_visitor v(c, prog, shader);
if (!v.run()) {
- prog->LinkStatus = GL_FALSE;
+ prog->LinkStatus = false;
ralloc_strcat(&prog->InfoLog, v.fail_msg);
return false;
int force_sechalf_stack;
};
-GLboolean brw_do_channel_expressions(struct exec_list *instructions);
-GLboolean brw_do_vector_splitting(struct exec_list *instructions);
+bool brw_do_channel_expressions(struct exec_list *instructions);
+bool brw_do_vector_splitting(struct exec_list *instructions);
bool brw_fs_precompile(struct gl_context *ctx, struct gl_shader_program *prog);
return false;
}
-GLboolean
+bool
brw_do_channel_expressions(exec_list *instructions)
{
ir_channel_expressions_visitor v;
void
fs_visitor::generate_fb_write(fs_inst *inst)
{
- GLboolean eot = inst->eot;
+ bool eot = inst->eot;
struct brw_reg implied_header;
/* Header is 2 regs, g0 and g1 are the contents. g0 will be implied
fs_visitor::emit_fb_writes()
{
this->current_annotation = "FB write header";
- GLboolean header_present = GL_TRUE;
+ bool header_present = true;
int base_mrf = 2;
int nr = base_mrf;
int reg_width = c->dispatch_width / 8;
/* Provide consistent primitive order with brw_set_prim's
* optimization of single quads to trifans.
*/
- key->pv_first = GL_TRUE;
+ key->pv_first = true;
}
/* _NEW_TRANSFORM */
static void brw_gs_emit_vue(struct brw_gs_compile *c,
struct brw_reg vert,
- GLboolean last,
+ bool last,
GLuint header)
{
struct brw_compile *p = &c->func;
struct intel_context *intel = &c->func.brw->intel;
- GLboolean allocate = !last;
+ bool allocate = !last;
struct brw_reg temp;
if (intel->gen < 6)
};
static INLINE
-GLboolean brw_is_arithmetic_inst(const struct brw_instruction *inst)
+bool brw_is_arithmetic_inst(const struct brw_instruction *inst)
{
return brw_opcodes[inst->header.opcode].is_arith;
}
[BRW_REGISTER_TYPE_F] = 4
};
-static INLINE GLboolean
+static INLINE bool
brw_is_grf_written(const struct brw_instruction *inst,
int reg_index, int size,
int gen)
{
if (brw_opcodes[inst->header.opcode].ndst == 0)
- return GL_FALSE;
+ return false;
if (inst->bits1.da1.dest_address_mode != BRW_ADDRESS_DIRECT)
if (inst->bits1.ia1.dest_reg_file == BRW_GENERAL_REGISTER_FILE)
- return GL_TRUE;
+ return true;
if (inst->bits1.da1.dest_reg_file != BRW_GENERAL_REGISTER_FILE)
- return GL_FALSE;
+ return false;
const int reg_start = reg_index * REG_SIZE;
const int reg_end = reg_start + size;
return left < right;
}
-static GLboolean
+static bool
brw_is_mrf_written_alu(const struct brw_instruction *inst,
int reg_index, int size)
{
if (brw_opcodes[inst->header.opcode].ndst == 0)
- return GL_FALSE;
+ return false;
if (inst->bits1.da1.dest_reg_file != BRW_MESSAGE_REGISTER_FILE)
- return GL_FALSE;
+ return false;
if (inst->bits1.da1.dest_address_mode != BRW_ADDRESS_DIRECT)
- return GL_TRUE;
+ return true;
const int reg_start = reg_index * REG_SIZE;
const int reg_end = reg_start + size;
* consider that we are writing the register.
*/
if (is_compr4 && inst->header.execution_size != BRW_EXECUTE_16)
- return GL_TRUE;
+ return true;
/* Here we write mrf_{i} and mrf_{i+4}. So we read two times 8 elements */
if (is_compr4) {
const int right1 = MIN2(write_end1, reg_end);
if (left0 < right0 || left1 < right1)
- return GL_TRUE;
+ return true;
}
else {
int length;
const int right = MIN2(write_end, reg_end);
if (left < right)
- return GL_TRUE;
+ return true;
}
- return GL_FALSE;
+ return false;
}
/* SEND may perform an implicit mov to a mrf register */
-static GLboolean brw_is_mrf_written_send(const struct brw_instruction *inst,
- int reg_index, int size)
+static bool
+brw_is_mrf_written_send(const struct brw_instruction *inst,
+ int reg_index, int size)
{
const int reg_start = reg_index * REG_SIZE;
if (inst->header.opcode != BRW_OPCODE_SEND ||
inst->bits1.da1.src0_reg_file == 0)
- return GL_FALSE;
+ return false;
return left < right;
}
/* Specific path for message register since we need to handle the compr4 case */
-static INLINE GLboolean
+static INLINE bool
brw_is_mrf_written(const struct brw_instruction *inst, int reg_index, int size)
{
return (brw_is_mrf_written_alu(inst, reg_index, size) ||
brw_is_mrf_written_send(inst, reg_index, size));
}
-static INLINE GLboolean
+static INLINE bool
brw_is_mrf_read(const struct brw_instruction *inst,
int reg_index, int size, int gen)
{
if (inst->header.opcode != BRW_OPCODE_SEND)
- return GL_FALSE;
+ return false;
if (inst->bits2.da1.src0_address_mode != BRW_ADDRESS_DIRECT)
- return GL_TRUE;
+ return true;
const int reg_start = reg_index*REG_SIZE;
const int reg_end = reg_start + size;
return left < right;
}
-static INLINE GLboolean
+static INLINE bool
brw_is_grf_read(const struct brw_instruction *inst, int reg_index, int size)
{
int i, j;
if (brw_opcodes[inst->header.opcode].nsrc == 0)
- return GL_FALSE;
+ return false;
/* Look at first source. We must take into account register regions to
* monitor carefully the read. Note that we are a bit too conservative here
if (inst->bits2.da1.src0_address_mode != BRW_ADDRESS_DIRECT)
if (inst->bits1.ia1.src0_reg_file == BRW_GENERAL_REGISTER_FILE)
- return GL_TRUE;
+ return true;
if (inst->bits1.da1.src0_reg_file != BRW_GENERAL_REGISTER_FILE)
- return GL_FALSE;
+ return false;
const int reg_start = reg_index*REG_SIZE;
const int reg_end = reg_start + size;
const int left = write_start > reg_start ? write_start : reg_start;
const int right = write_end < reg_end ? write_end : reg_end;
if (left < right)
- return GL_TRUE;
+ return true;
write_start += hs;
}
row_start += vs;
if (inst->bits3.da1.src1_address_mode != BRW_ADDRESS_DIRECT)
if (inst->bits1.ia1.src1_reg_file == BRW_GENERAL_REGISTER_FILE)
- return GL_TRUE;
+ return true;
if (inst->bits1.da1.src1_reg_file != BRW_GENERAL_REGISTER_FILE)
- return GL_FALSE;
+ return false;
const int reg_start = reg_index*REG_SIZE;
const int reg_end = reg_start + size;
const int left = write_start > reg_start ? write_start : reg_start;
const int right = write_end < reg_end ? write_end : reg_end;
if (left < right)
- return GL_TRUE;
+ return true;
write_start += hs;
}
row_start += vs;
}
}
- return GL_FALSE;
+ return false;
}
-static INLINE GLboolean
+static INLINE bool
brw_is_control_done(const struct brw_instruction *mov) {
return
mov->header.dependency_control != 0 ||
mov->header.debug_control != 0;
}
-static INLINE GLboolean
+static INLINE bool
brw_is_predicated(const struct brw_instruction *mov) {
return mov->header.predicate_control != 0;
}
-static INLINE GLboolean
+static INLINE bool
brw_is_grf_to_mrf_mov(const struct brw_instruction *mov,
int *mrf_index,
int *grf_index,
- GLboolean *is_compr4)
+ bool *is_compr4)
{
if (brw_is_predicated(mov) ||
brw_is_control_done(mov) ||
mov->header.debug_control != 0)
- return GL_FALSE;
+ return false;
if (mov->bits1.da1.dest_address_mode != BRW_ADDRESS_DIRECT ||
mov->bits1.da1.dest_reg_file != BRW_MESSAGE_REGISTER_FILE ||
mov->bits1.da1.dest_reg_type != BRW_REGISTER_TYPE_F ||
mov->bits1.da1.dest_horiz_stride != BRW_HORIZONTAL_STRIDE_1 ||
mov->bits1.da1.dest_subreg_nr != 0)
- return GL_FALSE;
+ return false;
if (mov->bits2.da1.src0_address_mode != BRW_ADDRESS_DIRECT ||
mov->bits1.da1.src0_reg_file != BRW_GENERAL_REGISTER_FILE ||
mov->bits2.da1.src0_subreg_nr != 0 ||
mov->bits2.da1.src0_abs != 0 ||
mov->bits2.da1.src0_negate != 0)
- return GL_FALSE;
+ return false;
*grf_index = mov->bits2.da1.src0_reg_nr;
*mrf_index = mov->bits1.da1.dest_reg_nr & 0x0f;
*is_compr4 = (mov->bits1.da1.dest_reg_nr & BRW_MRF_COMPR4) != 0;
- return GL_TRUE;
+ return true;
}
-static INLINE GLboolean
+static INLINE bool
brw_is_grf_straight_write(const struct brw_instruction *inst, int grf_index)
{
/* remark: no problem to predicate a SEL instruction */
if ((!brw_is_predicated(inst) || inst->header.opcode == BRW_OPCODE_SEL) &&
- brw_is_control_done(inst) == GL_FALSE &&
+ brw_is_control_done(inst) == false &&
inst->header.execution_size == 4 &&
inst->header.access_mode == BRW_ALIGN_1 &&
inst->bits1.da1.dest_address_mode == BRW_ADDRESS_DIRECT &&
inst->bits1.da1.dest_reg_nr == grf_index &&
inst->bits1.da1.dest_subreg_nr == 0 &&
brw_is_arithmetic_inst(inst))
- return GL_TRUE;
+ return true;
- return GL_FALSE;
+ return false;
}
-static INLINE GLboolean
+static INLINE bool
brw_inst_are_equal(const struct brw_instruction *src0,
const struct brw_instruction *src1)
{
field_dst[3] = field_src[3];
}
-static void brw_remove_inst(struct brw_compile *p, const GLboolean *removeInst)
+static void brw_remove_inst(struct brw_compile *p, const bool *removeInst)
{
int i, nr_insn = 0, to = 0, from = 0;
}
for (i = 0; i < p->nr_insn; ++i)
- if (removeInst[i] == GL_FALSE)
+ if (removeInst[i] == false)
nr_insn++;
p->nr_insn = nr_insn;
}
const int gen = p->brw->intel.gen;
int i, j;
- GLboolean *removeInst = calloc(sizeof(GLboolean), p->nr_insn);
+ bool *removeInst = calloc(sizeof(bool), p->nr_insn);
for (i = 0; i < p->nr_insn; i++) {
if (removeInst[i])
continue;
const struct brw_instruction *mov = p->store + i;
int mrf_index, grf_index;
- GLboolean is_compr4;
+ bool is_compr4;
/* Only consider _straight_ grf-to-mrf moves */
if (!brw_is_grf_to_mrf_mov(mov, &mrf_index, &grf_index, &is_compr4))
const struct brw_instruction *inst = p->store + j;
if (brw_inst_are_equal(mov, inst)) {
- removeInst[j] = GL_TRUE;
+ removeInst[j] = true;
continue;
}
const int gen = brw->intel.gen;
const int simd16_size = 2*REG_SIZE;
- GLboolean *removeInst = calloc(sizeof(GLboolean), p->nr_insn);
+ bool *removeInst = calloc(sizeof(bool), p->nr_insn);
assert(removeInst);
for (i = 0; i < p->nr_insn; i++) {
struct brw_instruction *grf_inst = NULL;
const struct brw_instruction *mov = p->store + i;
int mrf_index, grf_index;
- GLboolean is_compr4;
+ bool is_compr4;
/* Only consider _straight_ grf-to-mrf moves */
if (!brw_is_grf_to_mrf_mov(mov, &mrf_index, &grf_index, &is_compr4))
/* Look where the register has been set */
prev = i;
- GLboolean potential_remove = GL_FALSE;
+ bool potential_remove = false;
while (prev--) {
/* If _one_ instruction writes the grf, we try to remove the mov */
struct brw_instruction *inst = p->store + prev;
if (brw_is_grf_straight_write(inst, grf_index)) {
- potential_remove = GL_TRUE;
+ potential_remove = true;
grf_inst = inst;
break;
}
}
- if (potential_remove == GL_FALSE)
+ if (potential_remove == false)
continue;
- removeInst[i] = GL_TRUE;
+ removeInst[i] = true;
/* Monitor first the section of code between the grf computation and the
* mov. Here we cannot read or write both mrf and grf register
brw_is_mrf_written(inst, mrf_index1, REG_SIZE) ||
brw_is_mrf_read(inst, mrf_index0, REG_SIZE, gen) ||
brw_is_mrf_read(inst, mrf_index1, REG_SIZE, gen)) {
- removeInst[i] = GL_FALSE;
+ removeInst[i] = false;
break;
}
}
continue;
if (brw_is_grf_read(inst, grf_index, simd16_size)) {
- removeInst[i] = GL_FALSE;
+ removeInst[i] = false;
break;
}
free(removeInst);
}
-static GLboolean
+static bool
is_single_channel_dp4(struct brw_instruction *insn)
{
if (insn->header.opcode != BRW_OPCODE_DP4 ||
insn->header.execution_size != BRW_EXECUTE_8 ||
insn->header.access_mode != BRW_ALIGN_16 ||
insn->bits1.da1.dest_reg_file != BRW_GENERAL_REGISTER_FILE)
- return GL_FALSE;
+ return false;
if (!is_power_of_two(insn->bits1.da16.dest_writemask))
- return GL_FALSE;
+ return false;
- return GL_TRUE;
+ return true;
}
/**
}
-static GLboolean brwIsProgramNative( struct gl_context *ctx,
- GLenum target,
- struct gl_program *prog )
+static GLboolean
+brwIsProgramNative(struct gl_context *ctx,
+ GLenum target,
+ struct gl_program *prog)
{
- return GL_TRUE;
+ return true;
}
static void
if (shader) {
ralloc_strcat(&shader->InfoLog, msg);
- shader->LinkStatus = GL_FALSE;
+ shader->LinkStatus = false;
}
}
-static GLboolean brwProgramStringNotify( struct gl_context *ctx,
- GLenum target,
- struct gl_program *prog )
+static GLboolean
+brwProgramStringNotify(struct gl_context *ctx,
+ GLenum target,
+ struct gl_program *prog)
{
struct brw_context *brw = brw_context(ctx);
int i;
shader_program = _mesa_lookup_shader_program(ctx, prog->Id);
if (shader_program
&& shader_program->_LinkedShaders[MESA_SHADER_FRAGMENT]) {
- return GL_TRUE;
+ return true;
}
}
else if (target == GL_VERTEX_PROGRAM_ARB) {
"i965 driver doesn't yet support uninlined function "
"calls. Move to using a single return statement at "
"the end of the function to work around it.\n");
- return GL_FALSE;
+ return false;
}
if (prog->Instructions[i].Opcode == OPCODE_RET) {
shader_error(ctx, prog,
"i965 driver doesn't yet support \"return\" "
"from main().\n");
- return GL_FALSE;
+ return false;
}
for (r = 0; r < _mesa_num_inst_src_regs(inst->Opcode); r++) {
prog->Instructions[i].SrcReg[r].File == PROGRAM_INPUT) {
shader_error(ctx, prog,
"Variable indexing of shader inputs unsupported\n");
- return GL_FALSE;
+ return false;
}
}
prog->Instructions[i].DstReg.File == PROGRAM_OUTPUT) {
shader_error(ctx, prog,
"Variable indexing of FS outputs unsupported\n");
- return GL_FALSE;
+ return false;
}
if (target == GL_FRAGMENT_PROGRAM_ARB) {
if ((prog->Instructions[i].DstReg.RelAddr &&
shader_error(ctx, prog,
"Variable indexing of variable arrays in the FS "
"unsupported\n");
- return GL_FALSE;
+ return false;
}
}
}
- return GL_TRUE;
+ return true;
}
/* Per-thread scratch space is a power-of-two multiple of 1KB. */
if (query->bo == NULL)
return;
- drm_intel_bo_map(query->bo, GL_FALSE);
+ drm_intel_bo_map(query->bo, false);
results = query->bo->virtual;
if (query->Base.Target == GL_TIME_ELAPSED_EXT) {
if (intel->gen >= 6)
query->Base.Id = id;
query->Base.Result = 0;
- query->Base.Active = GL_FALSE;
- query->Base.Ready = GL_TRUE;
+ query->Base.Active = false;
+ query->Base.Ready = true;
return &query->Base;
}
struct brw_query_object *query = (struct brw_query_object *)q;
brw_queryobj_get_results(ctx, query);
- query->Base.Ready = GL_TRUE;
+ query->Base.Ready = true;
}
static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
if (query->bo == NULL || !drm_intel_bo_busy(query->bo)) {
brw_queryobj_get_results(ctx, query);
- query->Base.Ready = GL_TRUE;
+ query->Base.Ready = true;
}
}
brw->query.bo = drm_intel_bo_alloc(intel->bufmgr, "query", 4096, 1);
/* clear target buffer */
- drm_intel_bo_map(brw->query.bo, GL_TRUE);
+ drm_intel_bo_map(brw->query.bo, true);
memset((char *)brw->query.bo->virtual, 0, 4096);
drm_intel_bo_unmap(brw->query.bo);
query->first_index = brw->query.index;
}
query->last_index = brw->query.index;
- brw->query.active = GL_TRUE;
+ brw->query.active = true;
}
/** Called at batchbuffer flush to get an ending PS_DEPTH_COUNT */
ADVANCE_BATCH();
}
- brw->query.active = GL_FALSE;
+ brw->query.active = false;
brw->query.index++;
}
switch (key->primitive) {
case SF_TRIANGLES:
c.nr_verts = 3;
- brw_emit_tri_setup( &c, GL_TRUE );
+ brw_emit_tri_setup( &c, true );
break;
case SF_LINES:
c.nr_verts = 2;
- brw_emit_line_setup( &c, GL_TRUE );
+ brw_emit_line_setup( &c, true );
break;
case SF_POINTS:
c.nr_verts = 1;
if (key->do_point_sprite)
- brw_emit_point_sprite_setup( &c, GL_TRUE );
+ brw_emit_point_sprite_setup( &c, true );
else
- brw_emit_point_setup( &c, GL_TRUE );
+ brw_emit_point_setup( &c, true );
break;
case SF_UNFILLED_TRIS:
c.nr_verts = 3;
};
-void brw_emit_tri_setup( struct brw_sf_compile *c, GLboolean allocate );
-void brw_emit_line_setup( struct brw_sf_compile *c, GLboolean allocate );
-void brw_emit_point_setup( struct brw_sf_compile *c, GLboolean allocate );
-void brw_emit_point_sprite_setup( struct brw_sf_compile *c, GLboolean allocate );
+void brw_emit_tri_setup( struct brw_sf_compile *c, bool allocate );
+void brw_emit_line_setup( struct brw_sf_compile *c, bool allocate );
+void brw_emit_point_setup( struct brw_sf_compile *c, bool allocate );
+void brw_emit_point_sprite_setup( struct brw_sf_compile *c, bool allocate );
void brw_emit_anyprim_setup( struct brw_sf_compile *c );
int brw_sf_compute_urb_entry_read_offset(struct intel_context *intel);
return brw_vec4_grf(vert.nr + off, sub * 4);
}
-static GLboolean have_attr(struct brw_sf_compile *c,
- GLuint attr)
+static bool
+have_attr(struct brw_sf_compile *c, GLuint attr)
{
return (c->key.attrs & BITFIELD64_BIT(attr)) ? 1 : 0;
}
}
-static GLboolean calculate_masks( struct brw_sf_compile *c,
- GLuint reg,
- GLushort *pc,
- GLushort *pc_persp,
- GLushort *pc_linear)
+static bool
+calculate_masks(struct brw_sf_compile *c,
+ GLuint reg,
+ GLushort *pc,
+ GLushort *pc_persp,
+ GLushort *pc_linear)
{
- GLboolean is_last_attr = (reg == c->nr_setup_regs - 1);
+ bool is_last_attr = (reg == c->nr_setup_regs - 1);
GLbitfield64 persp_mask;
GLbitfield64 linear_mask;
-void brw_emit_tri_setup( struct brw_sf_compile *c, GLboolean allocate)
+void brw_emit_tri_setup(struct brw_sf_compile *c, bool allocate)
{
struct brw_compile *p = &c->func;
GLuint i;
struct brw_reg a1 = offset(c->vert[1], i);
struct brw_reg a2 = offset(c->vert[2], i);
GLushort pc, pc_persp, pc_linear;
- GLboolean last = calculate_masks(c, i, &pc, &pc_persp, &pc_linear);
+ bool last = calculate_masks(c, i, &pc, &pc_persp, &pc_linear);
if (pc_persp)
{
-void brw_emit_line_setup( struct brw_sf_compile *c, GLboolean allocate)
+void brw_emit_line_setup(struct brw_sf_compile *c, bool allocate)
{
struct brw_compile *p = &c->func;
GLuint i;
struct brw_reg a0 = offset(c->vert[0], i);
struct brw_reg a1 = offset(c->vert[1], i);
GLushort pc, pc_persp, pc_linear;
- GLboolean last = calculate_masks(c, i, &pc, &pc_persp, &pc_linear);
+ bool last = calculate_masks(c, i, &pc, &pc_persp, &pc_linear);
if (pc_persp)
{
}
}
-void brw_emit_point_sprite_setup( struct brw_sf_compile *c, GLboolean allocate)
+void brw_emit_point_sprite_setup(struct brw_sf_compile *c, bool allocate)
{
struct brw_compile *p = &c->func;
GLuint i;
{
struct brw_reg a0 = offset(c->vert[0], i);
GLushort pc, pc_persp, pc_linear, pc_coord_replace;
- GLboolean last = calculate_masks(c, i, &pc, &pc_persp, &pc_linear);
+ bool last = calculate_masks(c, i, &pc, &pc_persp, &pc_linear);
pc_coord_replace = calculate_point_sprite_mask(c, i);
pc_persp &= ~pc_coord_replace;
/* Points setup - several simplifications as all attributes are
* constant across the face of the point (point sprites excluded!)
*/
-void brw_emit_point_setup( struct brw_sf_compile *c, GLboolean allocate)
+void brw_emit_point_setup(struct brw_sf_compile *c, bool allocate)
{
struct brw_compile *p = &c->func;
GLuint i;
{
struct brw_reg a0 = offset(c->vert[0], i);
GLushort pc, pc_persp, pc_linear;
- GLboolean last = calculate_masks(c, i, &pc, &pc_persp, &pc_linear);
+ bool last = calculate_masks(c, i, &pc, &pc_persp, &pc_linear);
if (pc_persp)
{
{
saveflag = p->flag_value;
brw_push_insn_state(p);
- brw_emit_tri_setup( c, GL_FALSE );
+ brw_emit_tri_setup( c, false );
brw_pop_insn_state(p);
p->flag_value = saveflag;
/* note - thread killed in subroutine, so must
{
saveflag = p->flag_value;
brw_push_insn_state(p);
- brw_emit_line_setup( c, GL_FALSE );
+ brw_emit_line_setup( c, false );
brw_pop_insn_state(p);
p->flag_value = saveflag;
/* note - thread killed in subroutine */
{
saveflag = p->flag_value;
brw_push_insn_state(p);
- brw_emit_point_sprite_setup( c, GL_FALSE );
+ brw_emit_point_sprite_setup( c, false );
brw_pop_insn_state(p);
p->flag_value = saveflag;
}
brw_land_fwd_jump(p, jmp);
- brw_emit_point_setup( c, GL_FALSE );
+ brw_emit_point_setup( c, false );
}
const GLfloat depth_scale = 1.0F / ctx->DrawBuffer->_DepthMaxF;
struct brw_sf_viewport *sfv;
GLfloat y_scale, y_bias;
- const GLboolean render_to_fbo = (ctx->DrawBuffer->Name != 0);
+ const bool render_to_fbo = (ctx->DrawBuffer->Name != 0);
const GLfloat *v = ctx->Viewport._WindowMap.m;
sfv = brw_state_batch(brw, AUB_TRACE_SF_VP_STATE,
}
if (!_mesa_ir_link_shader(ctx, prog))
- return GL_FALSE;
+ return false;
if (!brw_shader_precompile(ctx, prog))
- return GL_FALSE;
+ return false;
- return GL_TRUE;
+ return true;
}
if ((state->mesa | state->cache | state->brw) == 0)
return;
- brw->intel.Fallback = GL_FALSE; /* boolean, not bitfield */
+ brw->intel.Fallback = false; /* boolean, not bitfield */
/* do prepare stage for all atoms */
for (i = 0; i < num_atoms; i++) {
};
-static GLboolean check_urb_layout( struct brw_context *brw )
+static bool check_urb_layout(struct brw_context *brw)
{
brw->urb.vs_start = 0;
brw->urb.gs_start = brw->urb.nr_vs_entries * brw->urb.vsize;
src_reg orig_src,
int base_offset);
- GLboolean try_emit_sat(ir_expression *ir);
+ bool try_emit_sat(ir_expression *ir);
bool process_move_condition(ir_rvalue *ir);
vec4_visitor v(c, prog, shader);
if (!v.run()) {
- prog->LinkStatus = GL_FALSE;
+ prog->LinkStatus = false;
ralloc_strcat(&prog->InfoLog, v.fail_msg);
return false;
}
}
}
-GLboolean
+bool
vec4_visitor::try_emit_sat(ir_expression *ir)
{
ir_rvalue *sat_src = ir->as_rvalue_to_saturate();
if (0) {
_mesa_fprint_program_opt(stdout, &c.vp->program.Base, PROG_PRINT_DEBUG,
- GL_TRUE);
+ true);
}
/* Emit GEN4 code.
struct brw_reg stack;
struct {
- GLboolean used_in_src;
+ bool used_in_src;
struct brw_reg reg;
} output_regs[128];
struct brw_reg reg;
} current_const[3];
- GLboolean needs_stack;
+ bool needs_stack;
};
bool brw_vs_emit(struct gl_shader_program *prog, struct brw_vs_compile *c);
* are promoted to [0,0,0,1] for the purposes of this analysis.
*/
struct tracker {
- GLboolean twoside;
+ bool twoside;
GLubyte active[PROGRAM_OUTPUT+1][MAX_PROGRAM_TEMPS];
GLbitfield size_masks[4]; /**< one bit per fragment program input attrib */
};
/* Return the SrcReg index of the channels that can be immediate float operands
* instead of usage of PROGRAM_CONSTANT values through push/pull.
*/
-static GLboolean
+static bool
brw_vs_arg_can_be_immediate(enum prog_opcode opcode, int arg)
{
int opcode_array[] = {
*/
if (opcode == OPCODE_MAD || opcode == OPCODE_LRP) {
if (arg == 1 || arg == 2)
- return GL_TRUE;
+ return true;
}
if (opcode > ARRAY_SIZE(opcode_array))
- return GL_FALSE;
+ return false;
return arg == opcode_array[opcode] - 1;
}
*/
if (c->vp->program.Base.Parameters->NumParameters +
c->vp->program.Base.NumTemporaries + 20 > BRW_MAX_GRF)
- c->vp->use_const_buffer = GL_TRUE;
+ c->vp->use_const_buffer = true;
else
- c->vp->use_const_buffer = GL_FALSE;
+ c->vp->use_const_buffer = false;
/*printf("use_const_buffer = %d\n", c->vp->use_const_buffer);*/
}
if (inst->SrcReg[arg].RelAddr) {
- c->vp->use_const_buffer = GL_TRUE;
+ c->vp->use_const_buffer = true;
continue;
}
* case) we need them all in place anyway.
*/
if (constant == max_constant)
- c->vp->use_const_buffer = GL_TRUE;
+ c->vp->use_const_buffer = true;
/* Set up the references to the pull parameters if present. This backend
* uses a 1:1 mapping from Mesa IR's index to location in the pull constant
*/
struct brw_compile *p = &c->func;
struct brw_reg tmp = dst;
- GLboolean need_tmp = GL_FALSE;
+ bool need_tmp = false;
if (dst.file != BRW_GENERAL_REGISTER_FILE ||
dst.dw1.bits.writemask != 0xf)
- need_tmp = GL_TRUE;
+ need_tmp = true;
if (need_tmp)
tmp = get_tmp(c);
{
struct brw_compile *p = &c->func;
struct brw_reg tmp = dst;
- GLboolean need_tmp = GL_FALSE;
+ bool need_tmp = false;
if (dst.file != BRW_GENERAL_REGISTER_FILE ||
dst.dw1.bits.writemask != 0xf)
- need_tmp = GL_TRUE;
+ need_tmp = true;
if (need_tmp)
tmp = get_tmp(c);
struct brw_reg tmp = dst;
struct brw_reg tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
struct brw_reg arg0_ud = retype(arg0, BRW_REGISTER_TYPE_UD);
- GLboolean need_tmp = (dst.dw1.bits.writemask != 0xf ||
+ bool need_tmp = (dst.dw1.bits.writemask != 0xf ||
dst.file != BRW_GENERAL_REGISTER_FILE);
if (need_tmp) {
{
struct brw_compile *p = &c->func;
struct brw_reg tmp = dst;
- GLboolean need_tmp = (dst.file != BRW_GENERAL_REGISTER_FILE);
+ bool need_tmp = (dst.file != BRW_GENERAL_REGISTER_FILE);
if (need_tmp)
tmp = get_tmp(c);
{
const GLuint file = inst->SrcReg[argIndex].File;
const GLint index = inst->SrcReg[argIndex].Index;
- const GLboolean relAddr = inst->SrcReg[argIndex].RelAddr;
+ const bool relAddr = inst->SrcReg[argIndex].RelAddr;
if (brw_vs_arg_can_be_immediate(inst->Opcode, argIndex)) {
const struct prog_src_register *src = &inst->SrcReg[argIndex];
GLuint ones_mask = 0;
GLuint src_mask = 0;
GLubyte src_swz[4];
- GLboolean need_tmp = (src.Negate &&
+ bool need_tmp = (src.Negate &&
dst.file != BRW_GENERAL_REGISTER_FILE);
struct brw_reg tmp = dst;
GLuint i;
}
}
-static GLboolean
+static bool
accumulator_contains(struct brw_vs_compile *c, struct brw_reg val)
{
struct brw_compile *p = &c->func;
struct brw_instruction *prev_insn = &p->store[p->nr_insn - 1];
if (p->nr_insn == 0)
- return GL_FALSE;
+ return false;
if (val.address_mode != BRW_ADDRESS_DIRECT)
- return GL_FALSE;
+ return false;
if (val.negate || val.abs)
- return GL_FALSE;
+ return false;
switch (prev_insn->header.opcode) {
case BRW_OPCODE_MOV:
prev_insn->bits1.da1.dest_reg_nr == val.nr &&
prev_insn->bits1.da16.dest_subreg_nr == val.subnr / 16 &&
prev_insn->bits1.da16.dest_writemask == 0xf)
- return GL_TRUE;
+ return true;
else
- return GL_FALSE;
+ return false;
default:
- return GL_FALSE;
+ return false;
}
}
if (unlikely(INTEL_DEBUG & DEBUG_VS)) {
printf("vs-mesa:\n");
_mesa_fprint_program_opt(stdout, &c->vp->program.Base, PROG_PRINT_DEBUG,
- GL_TRUE);
+ true);
printf("\n");
}
GLuint index = src->Index;
GLuint file = src->File;
if (file == PROGRAM_OUTPUT && index != VERT_RESULT_HPOS)
- c->output_regs[index].used_in_src = GL_TRUE;
+ c->output_regs[index].used_in_src = true;
}
switch (inst->Opcode) {
case OPCODE_CAL:
case OPCODE_RET:
- c->needs_stack = GL_TRUE;
+ c->needs_stack = true;
break;
default:
break;
if (c->fp->program.Base.OutputsWritten &
BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
- c->source_depth_to_render_target = GL_TRUE;
- c->computes_depth = GL_TRUE;
+ c->source_depth_to_render_target = true;
+ c->computes_depth = true;
}
} else {
brw_wm_lookup_iz(intel, c);
GLuint cur_inst; /**< index of current instruction */
- GLboolean out_of_regs; /**< ran out of GRF registers? */
+ bool out_of_regs; /**< ran out of GRF registers? */
/** Mapping from Mesa registers to hardware registers */
struct {
- GLboolean inited;
+ bool inited;
struct brw_reg reg;
} wm_regs[NUM_FILES][256][4];
- GLboolean used_grf[BRW_WM_MAX_GRF];
+ bool used_grf[BRW_WM_MAX_GRF];
GLuint first_free_grf;
struct brw_reg stack;
struct brw_reg emit_mask_reg;
void brw_wm_pass1( struct brw_wm_compile *c );
void brw_wm_pass2( struct brw_wm_compile *c );
void brw_wm_emit( struct brw_wm_compile *c );
-GLboolean brw_wm_arg_can_be_immediate(enum prog_opcode, int arg);
+bool brw_wm_arg_can_be_immediate(enum prog_opcode, int arg);
void brw_wm_print_value( struct brw_wm_compile *c,
struct brw_wm_value *value );
void emit_ddxy(struct brw_compile *p,
const struct brw_reg *dst,
GLuint mask,
- GLboolean is_ddx,
+ bool is_ddx,
const struct brw_reg *arg0);
void emit_delta_xy(struct brw_compile *p,
const struct brw_reg *dst,
struct brw_reg depth_payload,
GLuint tex_idx,
GLuint sampler,
- GLboolean shadow);
+ bool shadow);
void emit_txb(struct brw_wm_compile *c,
struct brw_reg *dst,
GLuint dst_flags,
#include "brw_context.h"
#include "brw_wm.h"
-static GLboolean can_do_pln(struct intel_context *intel,
- const struct brw_reg *deltas)
+static bool
+can_do_pln(struct intel_context *intel, const struct brw_reg *deltas)
{
struct brw_context *brw = brw_context(&intel->ctx);
if (!brw->has_pln)
- return GL_FALSE;
+ return false;
if (deltas[1].nr != deltas[0].nr + 1)
- return GL_FALSE;
+ return false;
if (intel->gen < 6 && ((deltas[0].nr & 1) != 0))
- return GL_FALSE;
+ return false;
- return GL_TRUE;
+ return true;
}
/* Return the SrcReg index of the channels that can be immediate float operands
* instead of usage of PROGRAM_CONSTANT values through push/pull.
*/
-GLboolean
+bool
brw_wm_arg_can_be_immediate(enum prog_opcode opcode, int arg)
{
int opcode_array[] = {
*/
if (opcode == OPCODE_MAD || opcode == OPCODE_LRP) {
if (arg == 1 || arg == 2)
- return GL_TRUE;
+ return true;
}
if (opcode > ARRAY_SIZE(opcode_array))
- return GL_FALSE;
+ return false;
return arg == opcode_array[opcode] - 1;
}
void emit_ddxy(struct brw_compile *p,
const struct brw_reg *dst,
GLuint mask,
- GLboolean is_ddx,
+ bool is_ddx,
const struct brw_reg *arg0)
{
int i;
struct brw_reg depth_payload,
GLuint tex_idx,
GLuint sampler,
- GLboolean shadow)
+ bool shadow)
{
struct brw_compile *p = &c->func;
struct intel_context *intel = &p->brw->intel;
nr,
0,
eot,
- GL_TRUE);
+ true);
}
break;
case OPCODE_DDX:
- emit_ddxy(p, dst, dst_flags, GL_TRUE, args[0]);
+ emit_ddxy(p, dst, dst_flags, true, args[0]);
break;
case OPCODE_DDY:
- emit_ddxy(p, dst, dst_flags, GL_FALSE, args[0]);
+ emit_ddxy(p, dst, dst_flags, false, args[0]);
break;
case OPCODE_DP2:
return src_reg(PROGRAM_UNDEFINED, 0);
}
-static GLboolean src_is_undef(struct prog_src_register src)
+static bool src_is_undef(struct prog_src_register src)
{
return src.File == PROGRAM_UNDEFINED;
}
*/
if (c->key.yuvtex_mask & (1 << unit)) {
/* convert ycbcr to RGBA */
- GLboolean swap_uv = c->key.yuvtex_swap_mask & (1<<unit);
+ bool swap_uv = c->key.yuvtex_swap_mask & (1<<unit);
/*
CONST C0 = { -.5, -.0625, -.5, 1.164 }
/**
* Check if the given TXP instruction really needs the divide-by-W step.
*/
-static GLboolean projtex( struct brw_wm_compile *c,
- const struct prog_instruction *inst )
+static bool
+projtex(struct brw_wm_compile *c, const struct prog_instruction *inst)
{
const struct prog_src_register src = inst->SrcReg[0];
- GLboolean retVal;
+ bool retVal;
assert(inst->Opcode == OPCODE_TXP);
* user-provided fragment programs anyway:
*/
if (inst->TexSrcTarget == TEXTURE_CUBE_INDEX)
- retVal = GL_FALSE; /* ut2004 gun rendering !?! */
+ retVal = false; /* ut2004 gun rendering !?! */
else if (src.File == PROGRAM_INPUT &&
GET_SWZ(src.Swizzle, W) == W &&
(c->key.proj_attrib_mask & (1 << src.Index)) == 0)
- retVal = GL_FALSE;
+ retVal = false;
else
- retVal = GL_TRUE;
+ retVal = true;
return retVal;
}
if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
printf("pre-fp:\n");
_mesa_fprint_program_opt(stdout, &fp->program.Base, PROG_PRINT_DEBUG,
- GL_TRUE);
+ true);
printf("\n");
}
case OPCODE_RSQ:
out = emit_scalar_insn(c, inst);
- out->SrcReg[0].Abs = GL_TRUE;
+ out->SrcReg[0].Abs = true;
break;
case OPCODE_TEX:
struct brw_wm_compile *c)
{
GLuint reg = 2;
- GLboolean kill_stats_promoted_workaround = GL_FALSE;
+ bool kill_stats_promoted_workaround = false;
int lookup = c->key.iz_lookup;
bool uses_depth = (c->fp->program.Base.InputsRead &
(1 << FRAG_ATTRIB_WPOS)) != 0;
if (c->key.stats_wm &&
(lookup & IZ_PS_KILL_ALPHATEST_BIT) &&
wm_iz_table[lookup].mode == P) {
- kill_stats_promoted_workaround = GL_TRUE;
+ kill_stats_promoted_workaround = true;
}
if (lookup & IZ_PS_COMPUTES_DEPTH_BIT)
{
struct intel_context *intel = &brw->intel;
struct gl_context *ctx = &intel->ctx;
- const GLboolean render_to_fbo = (ctx->DrawBuffer->Name != 0);
+ const bool render_to_fbo = (ctx->DrawBuffer->Name != 0);
struct gen6_scissor_rect *scissor;
uint32_t scissor_state_offset;
uint32_t dw1, dw2, dw3, dw4, dw16, dw17;
int i;
/* _NEW_BUFFER */
- GLboolean render_to_fbo = brw->intel.ctx.DrawBuffer->Name != 0;
+ bool render_to_fbo = brw->intel.ctx.DrawBuffer->Name != 0;
int attr = 0, input_index = 0;
int urb_entry_read_offset = 1;
float point_size;
const GLfloat depth_scale = 1.0F / ctx->DrawBuffer->_DepthMaxF;
struct brw_sf_viewport *sfv;
GLfloat y_scale, y_bias;
- const GLboolean render_to_fbo = (ctx->DrawBuffer->Name != 0);
+ const bool render_to_fbo = (ctx->DrawBuffer->Name != 0);
const GLfloat *v = ctx->Viewport._WindowMap.m;
sfv = brw_state_batch(brw, AUB_TRACE_SF_VP_STATE,
uint32_t dw1 = GEN6_CLIP_STATISTICS_ENABLE;
/* _NEW_BUFFERS */
- GLboolean render_to_fbo = brw->intel.ctx.DrawBuffer->Name != 0;
+ bool render_to_fbo = brw->intel.ctx.DrawBuffer->Name != 0;
dw1 |= GEN7_CLIP_EARLY_CULL;
struct gl_context *ctx = &brw->intel.ctx;
const GLfloat depth_scale = 1.0F / ctx->DrawBuffer->_DepthMaxF;
GLfloat y_scale, y_bias;
- const GLboolean render_to_fbo = (ctx->DrawBuffer->Name != 0);
+ const bool render_to_fbo = (ctx->DrawBuffer->Name != 0);
const GLfloat *v = ctx->Viewport._WindowMap.m;
struct gen7_sf_clip_viewport *vp;
drm_intel_bo_map(batch->bo, false);
intel_decode(batch->bo->virtual, batch->used,
batch->bo->offset,
- intel->intelScreen->deviceID, GL_TRUE);
+ intel->intelScreen->deviceID, true);
drm_intel_bo_unmap(batch->bo);
if (intel->vtbl.debug_batch != NULL)
/* This is the only way buffers get added to the validate list.
*/
-GLboolean
+bool
intel_batchbuffer_emit_reloc(struct intel_context *intel,
drm_intel_bo *buffer,
uint32_t read_domains, uint32_t write_domain,
*/
intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
- return GL_TRUE;
+ return true;
}
-GLboolean
+bool
intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
drm_intel_bo *buffer,
uint32_t read_domains,
*/
intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
- return GL_TRUE;
+ return true;
}
void
void intel_batchbuffer_data(struct intel_context *intel,
const void *data, GLuint bytes, bool is_blit);
-GLboolean intel_batchbuffer_emit_reloc(struct intel_context *intel,
+bool intel_batchbuffer_emit_reloc(struct intel_context *intel,
drm_intel_bo *buffer,
uint32_t read_domains,
uint32_t write_domain,
uint32_t offset);
-GLboolean intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
+bool intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
drm_intel_bo *buffer,
uint32_t read_domains,
uint32_t write_domain,
/* Copy BitBlt
*/
-GLboolean
+bool
intelEmitCopyBlit(struct intel_context *intel,
GLuint cpp,
GLshort src_pitch,
if (dst_tiling != I915_TILING_NONE) {
if (dst_offset & 4095)
- return GL_FALSE;
+ return false;
if (dst_tiling == I915_TILING_Y)
- return GL_FALSE;
+ return false;
}
if (src_tiling != I915_TILING_NONE) {
if (src_offset & 4095)
- return GL_FALSE;
+ return false;
if (src_tiling == I915_TILING_Y)
- return GL_FALSE;
+ return false;
}
/* do space check before going any further */
} while (pass < 2);
if (pass >= 2)
- return GL_FALSE;
+ return false;
intel_batchbuffer_require_space(intel, 8 * 4, true);
DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
CMD = XY_SRC_COPY_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
break;
default:
- return GL_FALSE;
+ return false;
}
#ifndef I915
#endif
if (dst_y2 <= dst_y || dst_x2 <= dst_x) {
- return GL_TRUE;
+ return true;
}
assert(dst_x < dst_x2);
intel_batchbuffer_emit_mi_flush(intel);
- return GL_TRUE;
+ return true;
}
mask &= (1 << BUFFER_COUNT) - 1;
while (mask) {
GLuint buf = _mesa_ffs(mask) - 1;
- GLboolean is_depth_stencil = buf == BUFFER_DEPTH || buf == BUFFER_STENCIL;
+ bool is_depth_stencil = buf == BUFFER_DEPTH || buf == BUFFER_STENCIL;
struct intel_renderbuffer *irb;
int x1, y1, x2, y2;
uint32_t clear_val;
return fail_mask;
}
-GLboolean
+bool
intelEmitImmediateColorExpandBlit(struct intel_context *intel,
GLuint cpp,
GLubyte *src_bits, GLuint src_size,
if (dst_tiling != I915_TILING_NONE) {
if (dst_offset & 4095)
- return GL_FALSE;
+ return false;
if (dst_tiling == I915_TILING_Y)
- return GL_FALSE;
+ return false;
}
assert( logic_op - GL_CLEAR >= 0 );
assert(dst_pitch > 0);
if (w < 0 || h < 0)
- return GL_TRUE;
+ return true;
dst_pitch *= cpp;
intel_batchbuffer_emit_mi_flush(intel);
- return GL_TRUE;
+ return true;
}
/* We don't have a memmove-type blit like some other hardware, so we'll do a
unsigned int size)
{
GLuint pitch, height;
- GLboolean ok;
+ bool ok;
/* The pitch given to the GPU must be DWORD aligned, and
* we want width to match pitch. Max width is (1 << 15 - 1),
extern GLbitfield intelClearWithBlit(struct gl_context * ctx, GLbitfield mask);
-GLboolean
+bool
intelEmitCopyBlit(struct intel_context *intel,
GLuint cpp,
GLshort src_pitch,
GLshort w, GLshort h,
GLenum logicop );
-GLboolean
+bool
intelEmitImmediateColorExpandBlit(struct intel_context *intel,
GLuint cpp,
GLubyte *src_bits, GLuint src_size,
* previously stored in the buffer object is lost. If data is NULL,
* memory will be allocated, but no copy will occur.
* Called via ctx->Driver.BufferData().
- * \return GL_TRUE for success, GL_FALSE if out of memory
+ * \return true for success, false if out of memory
*/
static GLboolean
intel_bufferobj_data(struct gl_context * ctx,
if (intel_obj->sys_buffer != NULL) {
if (data != NULL)
memcpy(intel_obj->sys_buffer, data, size);
- return GL_TRUE;
+ return true;
}
}
intel_bufferobj_alloc_buffer(intel, intel_obj);
if (!intel_obj->buffer)
- return GL_FALSE;
+ return false;
if (data != NULL)
drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
}
- return GL_TRUE;
+ return true;
}
length, 64);
if (!(access & GL_MAP_READ_BIT)) {
drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
- intel_obj->mapped_gtt = GL_TRUE;
+ intel_obj->mapped_gtt = true;
} else {
drm_intel_bo_map(intel_obj->range_map_bo,
(access & GL_MAP_WRITE_BIT) != 0);
- intel_obj->mapped_gtt = GL_FALSE;
+ intel_obj->mapped_gtt = false;
}
obj->Pointer = intel_obj->range_map_bo->virtual;
}
if (!(access & GL_MAP_READ_BIT)) {
drm_intel_gem_bo_map_gtt(intel_obj->buffer);
- intel_obj->mapped_gtt = GL_TRUE;
+ intel_obj->mapped_gtt = true;
} else {
drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
- intel_obj->mapped_gtt = GL_FALSE;
+ intel_obj->mapped_gtt = false;
}
obj->Pointer = intel_obj->buffer->virtual + offset;
obj->Offset = 0;
obj->Length = 0;
- return GL_TRUE;
+ return true;
}
drm_intel_bo *
unsigned int range_map_offset;
GLsizei range_map_size;
- GLboolean mapped_gtt;
- GLboolean source;
+ bool mapped_gtt;
+ bool source;
};
/* drawing to window system buffer */
if (fb->_NumColorDrawBuffers > 0) {
if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
- intel->front_buffer_dirty = GL_TRUE;
+ intel->front_buffer_dirty = true;
}
}
}
{
if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
struct intel_context *const intel = intel_context(ctx);
- const GLboolean was_front_buffer_rendering =
+ const bool was_front_buffer_rendering =
intel->is_front_buffer_rendering;
intel->is_front_buffer_rendering = (mode == GL_FRONT_LEFT)
{
if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
struct intel_context *const intel = intel_context(ctx);
- const GLboolean was_front_buffer_reading =
+ const bool was_front_buffer_reading =
intel->is_front_buffer_reading;
intel->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
return;
if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
- intel->front_buffer_dirty = GL_TRUE;
+ intel->front_buffer_dirty = true;
}
if (0)
/* We set the dirty bit in intel_prepare_render() if we're
* front buffer rendering once we get there.
*/
- intel->front_buffer_dirty = GL_FALSE;
+ intel->front_buffer_dirty = false;
}
}
}
* mark it as dirty here.
*/
if (intel->is_front_buffer_rendering)
- intel->front_buffer_dirty = GL_TRUE;
+ intel->front_buffer_dirty = true;
/* Wait for the swapbuffers before the one we just emitted, so we
* don't get too many swaps outstanding for apps that are GPU-heavy
drm_intel_bo_wait_rendering(intel->first_post_swapbuffers_batch);
drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
intel->first_post_swapbuffers_batch = NULL;
- intel->need_throttle = GL_FALSE;
+ intel->need_throttle = false;
}
}
intel_flush(ctx);
intel_flush_front(ctx);
if (intel->is_front_buffer_rendering)
- intel->need_throttle = GL_TRUE;
+ intel->need_throttle = true;
}
void
intel_init_syncobj_functions(functions);
}
-GLboolean
+bool
intelInitContext(struct intel_context *intel,
int api,
const struct gl_config * mesaVis,
/* we can't do anything without a connection to the device */
if (intelScreen->bufmgr == NULL)
- return GL_FALSE;
+ return false;
/* Can't rely on invalidate events, fall back to glViewport hack */
if (!driContextPriv->driScreenPriv->dri2.useInvalidate) {
if (!_mesa_initialize_context(&intel->ctx, api, mesaVis, shareCtx,
functions, (void *) intel)) {
printf("%s: failed to init mesa context\n", __FUNCTION__);
- return GL_FALSE;
+ return false;
}
driContextPriv->driverPrivate = intel;
memset(&ctx->TextureFormatSupported, 0,
sizeof(ctx->TextureFormatSupported));
- ctx->TextureFormatSupported[MESA_FORMAT_ARGB8888] = GL_TRUE;
+ ctx->TextureFormatSupported[MESA_FORMAT_ARGB8888] = true;
if (devID != PCI_CHIP_I830_M && devID != PCI_CHIP_845_G)
- ctx->TextureFormatSupported[MESA_FORMAT_XRGB8888] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_ARGB4444] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_ARGB1555] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_RGB565] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_L8] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_A8] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_I8] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_AL88] = GL_TRUE;
+ ctx->TextureFormatSupported[MESA_FORMAT_XRGB8888] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_ARGB4444] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_ARGB1555] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_RGB565] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_L8] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_A8] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_I8] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_AL88] = true;
if (intel->gen >= 4)
- ctx->TextureFormatSupported[MESA_FORMAT_AL1616] = GL_TRUE;
+ ctx->TextureFormatSupported[MESA_FORMAT_AL1616] = true;
/* Depth and stencil */
- ctx->TextureFormatSupported[MESA_FORMAT_S8_Z24] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_X8_Z24] = GL_TRUE;
+ ctx->TextureFormatSupported[MESA_FORMAT_S8_Z24] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_X8_Z24] = true;
ctx->TextureFormatSupported[MESA_FORMAT_S8] = intel->has_separate_stencil;
/*
* combo that actually works, so this can probably be re-enabled.
*/
/*
- ctx->TextureFormatSupported[MESA_FORMAT_Z16] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_Z24] = GL_TRUE;
+ ctx->TextureFormatSupported[MESA_FORMAT_Z16] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_Z24] = true;
*/
/* ctx->Extensions.MESA_ycbcr_texture */
- ctx->TextureFormatSupported[MESA_FORMAT_YCBCR] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_YCBCR_REV] = GL_TRUE;
+ ctx->TextureFormatSupported[MESA_FORMAT_YCBCR] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_YCBCR_REV] = true;
/* GL_3DFX_texture_compression_FXT1 */
- ctx->TextureFormatSupported[MESA_FORMAT_RGB_FXT1] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_RGBA_FXT1] = GL_TRUE;
+ ctx->TextureFormatSupported[MESA_FORMAT_RGB_FXT1] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_RGBA_FXT1] = true;
/* GL_EXT_texture_compression_s3tc */
- ctx->TextureFormatSupported[MESA_FORMAT_RGB_DXT1] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_RGBA_DXT1] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_RGBA_DXT3] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_RGBA_DXT5] = GL_TRUE;
+ ctx->TextureFormatSupported[MESA_FORMAT_RGB_DXT1] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_RGBA_DXT1] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_RGBA_DXT3] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_RGBA_DXT5] = true;
#ifndef I915
/* GL_ARB_texture_compression_rgtc */
- ctx->TextureFormatSupported[MESA_FORMAT_RED_RGTC1] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_SIGNED_RED_RGTC1] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_RG_RGTC2] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_SIGNED_RG_RGTC2] = GL_TRUE;
+ ctx->TextureFormatSupported[MESA_FORMAT_RED_RGTC1] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_SIGNED_RED_RGTC1] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_RG_RGTC2] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_SIGNED_RG_RGTC2] = true;
/* GL_ARB_texture_rg */
- ctx->TextureFormatSupported[MESA_FORMAT_R8] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_R16] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_RG88] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_RG1616] = GL_TRUE;
+ ctx->TextureFormatSupported[MESA_FORMAT_R8] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_R16] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_RG88] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_RG1616] = true;
/* GL_MESA_texture_signed_rgba / GL_EXT_texture_snorm */
- ctx->TextureFormatSupported[MESA_FORMAT_DUDV8] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_SIGNED_RGBA8888_REV] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_SIGNED_R8] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_SIGNED_RG88_REV] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_SIGNED_R16] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_SIGNED_GR1616] = GL_TRUE;
+ ctx->TextureFormatSupported[MESA_FORMAT_DUDV8] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_SIGNED_RGBA8888_REV] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_SIGNED_R8] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_SIGNED_RG88_REV] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_SIGNED_R16] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_SIGNED_GR1616] = true;
/* GL_EXT_texture_sRGB */
- ctx->TextureFormatSupported[MESA_FORMAT_SARGB8] = GL_TRUE;
+ ctx->TextureFormatSupported[MESA_FORMAT_SARGB8] = true;
if (intel->gen >= 5 || intel->is_g4x)
- ctx->TextureFormatSupported[MESA_FORMAT_SRGB_DXT1] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_SRGBA_DXT1] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_SRGBA_DXT3] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_SRGBA_DXT5] = GL_TRUE;
+ ctx->TextureFormatSupported[MESA_FORMAT_SRGB_DXT1] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_SRGBA_DXT1] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_SRGBA_DXT3] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_SRGBA_DXT5] = true;
if (intel->gen >= 5 || intel->is_g4x) {
- ctx->TextureFormatSupported[MESA_FORMAT_SL8] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_SLA8] = GL_TRUE;
+ ctx->TextureFormatSupported[MESA_FORMAT_SL8] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_SLA8] = true;
}
#ifdef TEXTURE_FLOAT_ENABLED
- ctx->TextureFormatSupported[MESA_FORMAT_RGBA_FLOAT32] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_RG_FLOAT32] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_R_FLOAT32] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_INTENSITY_FLOAT32] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_LUMINANCE_FLOAT32] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_ALPHA_FLOAT32] = GL_TRUE;
- ctx->TextureFormatSupported[MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32] = GL_TRUE;
+ ctx->TextureFormatSupported[MESA_FORMAT_RGBA_FLOAT32] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_RG_FLOAT32] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_R_FLOAT32] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_INTENSITY_FLOAT32] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_LUMINANCE_FLOAT32] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_ALPHA_FLOAT32] = true;
+ ctx->TextureFormatSupported[MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32] = true;
/* GL_EXT_texture_shared_exponent */
- ctx->TextureFormatSupported[MESA_FORMAT_RGB9_E5_FLOAT] = GL_TRUE;
+ ctx->TextureFormatSupported[MESA_FORMAT_RGB9_E5_FLOAT] = true;
/* GL_EXT_packed_float */
- ctx->TextureFormatSupported[MESA_FORMAT_R11_G11_B10_FLOAT] = GL_TRUE;
+ ctx->TextureFormatSupported[MESA_FORMAT_R11_G11_B10_FLOAT] = true;
#endif
#endif /* !I915 */
_mesa_init_point(ctx);
if (intel->gen >= 4) {
- ctx->Const.sRGBCapable = GL_TRUE;
+ ctx->Const.sRGBCapable = true;
if (MAX_WIDTH > 8192)
ctx->Const.MaxRenderbufferSize = 8192;
} else {
_swsetup_CreateContext(ctx);
/* Configure swrast to match hardware characteristics: */
- _swrast_allow_pixel_fog(ctx, GL_FALSE);
- _swrast_allow_vertex_fog(ctx, GL_TRUE);
+ _swrast_allow_pixel_fog(ctx, false);
+ _swrast_allow_vertex_fog(ctx, true);
_mesa_meta_init(ctx);
INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
if (INTEL_DEBUG & DEBUG_BUFMGR)
- dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);
+ dri_bufmgr_set_debug(intel->bufmgr, true);
intel_batchbuffer_init(intel);
intel->always_flush_cache = 1;
}
- return GL_TRUE;
+ return true;
}
void
/* Unset current context and dispath table */
_mesa_make_current(NULL, NULL, NULL);
- return GL_TRUE;
+ return true;
}
GLboolean
_mesa_make_current(NULL, NULL, NULL);
}
- return GL_TRUE;
+ return true;
}
/**
/*@}*/
extern void intelFallback(struct intel_context *intel, GLbitfield bit,
- GLboolean mode);
+ bool mode);
#define FALLBACK( intel, bit, mode ) intelFallback( intel, bit, mode )
void (*reduced_primitive_state) (struct intel_context * intel,
GLenum rprim);
- GLboolean (*check_vertex_size) (struct intel_context * intel,
+ bool (*check_vertex_size) (struct intel_context * intel,
GLuint expected);
void (*invalidate_state) (struct intel_context *intel,
GLuint new_state);
*/
int gen;
int gt;
- GLboolean needs_ff_sync;
- GLboolean is_g4x;
- GLboolean is_945;
- GLboolean has_separate_stencil;
- GLboolean must_use_separate_stencil;
- GLboolean has_hiz;
+ bool needs_ff_sync;
+ bool is_g4x;
+ bool is_945;
+ bool has_separate_stencil;
+ bool must_use_separate_stencil;
+ bool has_hiz;
int urb_size;
} batch;
drm_intel_bo *first_post_swapbuffers_batch;
- GLboolean need_throttle;
- GLboolean no_batch_wrap;
+ bool need_throttle;
+ bool no_batch_wrap;
bool tnl_pipeline_running; /**< Set while i915's _tnl_run_pipeline. */
struct
GLfloat polygon_offset_scale; /* dependent on depth_scale, bpp */
- GLboolean hw_stencil;
- GLboolean hw_stipple;
- GLboolean depth_buffer_is_float;
- GLboolean no_rast;
- GLboolean always_flush_batch;
- GLboolean always_flush_cache;
+ bool hw_stencil;
+ bool hw_stipple;
+ bool depth_buffer_is_float;
+ bool no_rast;
+ bool always_flush_batch;
+ bool always_flush_cache;
/* 0 - nonconformant, best performance;
* 1 - fallback to sw for known conformance bugs
* This is used in the DRI2 case to detect that glFlush should also copy
* the contents of the fake front buffer to the real front buffer.
*/
- GLboolean front_buffer_dirty;
+ bool front_buffer_dirty;
/**
* Track whether front-buffer rendering is currently enabled
* A separate flag is used to track this in order to support MRT more
* easily.
*/
- GLboolean is_front_buffer_rendering;
+ bool is_front_buffer_rendering;
/**
* Track whether front-buffer is the current read target.
*
* be set separately. The DRI2 fake front buffer must be referenced
* either way.
*/
- GLboolean is_front_buffer_reading;
+ bool is_front_buffer_reading;
/**
* Count of intel_regions that are mapped.
*/
int num_mapped_regions;
- GLboolean use_texture_tiling;
- GLboolean use_early_z;
+ bool use_texture_tiling;
+ bool use_early_z;
int driFd;
* intel_context.c:
*/
-extern GLboolean intelInitContext(struct intel_context *intel,
+extern bool intelInitContext(struct intel_context *intel,
int api,
const struct gl_config * mesaVis,
__DRIcontext * driContextPriv,
return (struct intel_context *) ctx;
}
-static INLINE GLboolean
+static INLINE bool
is_power_of_two(uint32_t value)
{
return (value & (value - 1)) == 0;
cpp * 2,
ALIGN(width, 64),
ALIGN((height + 1) / 2, 64),
- GL_TRUE);
+ true);
if (!irb->region)
return false;
} else {
irb->region = intel_region_alloc(intel->intelScreen, tiling, cpp,
- width, height, GL_TRUE);
+ width, height, true);
if (!irb->region)
return false;
irb->region->cpp,
irb->region->width,
irb->region->height,
- GL_TRUE);
+ true);
if (!irb->hiz_region) {
intel_region_release(&irb->region);
return false;
}
}
- return GL_TRUE;
+ return true;
}
rb->Height = height;
rb->InternalFormat = internalFormat;
- return GL_TRUE;
+ return true;
}
_mesa_resize_framebuffer(ctx, fb, width, height);
- fb->Initialized = GL_TRUE; /* XXX remove someday */
+ fb->Initialized = true; /* XXX remove someday */
if (fb->Name != 0) {
return;
GLenum internalFormat, GLuint width, GLuint height)
{
_mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
- return GL_FALSE;
+ return false;
}
/**
struct intel_renderbuffer *irb,
struct intel_texture_image *intel_image);
-static GLboolean
+static bool
intel_update_wrapper(struct gl_context *ctx, struct intel_renderbuffer *irb,
struct gl_texture_image *texImage)
{
if (!intel_span_supports_format(texImage->TexFormat)) {
DBG("Render to texture BAD FORMAT %s\n",
_mesa_get_format_name(texImage->TexFormat));
- return GL_FALSE;
+ return false;
} else {
DBG("Render to texture %s\n", _mesa_get_format_name(texImage->TexFormat));
}
_mesa_get_format_bytes(rb->Format),
rb->Width,
rb->Height,
- GL_TRUE);
+ true);
if (!intel_image->mt->hiz_region)
- return GL_FALSE;
+ return false;
}
/* Point the renderbuffer's hiz region to the texture's hiz region. */
intel_region_reference(&irb->hiz_region, intel_image->mt->hiz_region);
}
- return GL_TRUE;
+ return true;
}
irb->Base.RefCount);
intel_renderbuffer_set_draw_offset(irb, intel_image, att->Zoffset);
- intel_image->used_as_render_target = GL_TRUE;
+ intel_image->used_as_render_target = true;
#ifndef I915
if (need_tile_offset_workaround(brw_context(ctx), irb)) {
intel_image->base.Base.Level,
intel_image->base.Base.Level,
width, height, depth,
- GL_TRUE);
+ true);
intel_miptree_copy_teximage(intel, intel_image, new_mt);
intel_renderbuffer_set_draw_offset(irb, intel_image, att->Zoffset);
/* Flag that this image may now be validated into the object's miptree. */
if (intel_image)
- intel_image->used_as_render_target = GL_FALSE;
+ intel_image->used_as_render_target = false;
/* Since we've (probably) rendered to the texture and will (likely) use
* it in the texture domain later on in this batchbuffer, flush the
GLuint width0,
GLuint height0,
GLuint depth0,
- GLboolean expect_accelerated_upload)
+ bool expect_accelerated_upload)
{
struct intel_mipmap_tree *mt;
uint32_t tiling = I915_TILING_NONE;
*
* Not sure whether I want to pass gl_texture_image here.
*/
-GLboolean
+bool
intel_miptree_match_image(struct intel_mipmap_tree *mt,
struct gl_texture_image *image)
{
/* Images with borders are never pulled into mipmap trees. */
if (image->Border)
- return GL_FALSE;
+ return false;
if (image->TexFormat != mt->format)
- return GL_FALSE;
+ return false;
intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
if (width != mt->level[level].width ||
height != mt->level[level].height ||
depth != mt->level[level].depth)
- return GL_FALSE;
+ return false;
- return GL_TRUE;
+ return true;
}
GLuint width0, height0, depth0; /**< Level zero image dimensions */
GLuint cpp;
- GLboolean compressed;
+ bool compressed;
/* Derived from the above:
*/
GLuint width0,
GLuint height0,
GLuint depth0,
- GLboolean expect_accelerated_upload);
+ bool expect_accelerated_upload);
struct intel_mipmap_tree *
intel_miptree_create_for_region(struct intel_context *intel,
/* Check if an image fits an existing mipmap tree layout
*/
-GLboolean intel_miptree_match_image(struct intel_mipmap_tree *mt,
+bool intel_miptree_match_image(struct intel_mipmap_tree *mt,
struct gl_texture_image *image);
void
#define FILE_DEBUG_FLAG DEBUG_PIXEL
static GLenum
-effective_func(GLenum func, GLboolean src_alpha_is_one)
+effective_func(GLenum func, bool src_alpha_is_one)
{
if (src_alpha_is_one) {
if (func == GL_SRC_ALPHA)
* Check if any fragment operations are in effect which might effect
* glDraw/CopyPixels.
*/
-GLboolean
-intel_check_blit_fragment_ops(struct gl_context * ctx, GLboolean src_alpha_is_one)
+bool
+intel_check_blit_fragment_ops(struct gl_context * ctx, bool src_alpha_is_one)
{
if (ctx->NewState)
_mesa_update_state(ctx);
if (ctx->FragmentProgram._Enabled) {
DBG("fallback due to fragment program\n");
- return GL_FALSE;
+ return false;
}
if (ctx->Color.BlendEnabled &&
effective_func(ctx->Color.Blend[0].DstA, src_alpha_is_one) != GL_ZERO ||
ctx->Color.Blend[0].EquationA != GL_FUNC_ADD)) {
DBG("fallback due to blend\n");
- return GL_FALSE;
+ return false;
}
if (ctx->Texture._EnabledUnits) {
DBG("fallback due to texturing\n");
- return GL_FALSE;
+ return false;
}
if (!(ctx->Color.ColorMask[0][0] &&
ctx->Color.ColorMask[0][2] &&
ctx->Color.ColorMask[0][3])) {
DBG("fallback due to color masking\n");
- return GL_FALSE;
+ return false;
}
if (ctx->Color.AlphaEnabled) {
DBG("fallback due to alpha\n");
- return GL_FALSE;
+ return false;
}
if (ctx->Depth.Test) {
DBG("fallback due to depth test\n");
- return GL_FALSE;
+ return false;
}
if (ctx->Fog.Enabled) {
DBG("fallback due to fog\n");
- return GL_FALSE;
+ return false;
}
if (ctx->_ImageTransferState) {
DBG("fallback due to image transfer\n");
- return GL_FALSE;
+ return false;
}
if (ctx->Stencil._Enabled) {
DBG("fallback due to image stencil\n");
- return GL_FALSE;
+ return false;
}
if (ctx->RenderMode != GL_RENDER) {
DBG("fallback due to render mode\n");
- return GL_FALSE;
+ return false;
}
- return GL_TRUE;
+ return true;
}
/* The intel_region struct doesn't really do enough to capture the
* \param format as given to glDraw/ReadPixels
* \param type as given to glDraw/ReadPixels
*/
-GLboolean
+bool
intel_check_blit_format(struct intel_region * region,
GLenum format, GLenum type)
{
if (region->cpp == 4 &&
(type == GL_UNSIGNED_INT_8_8_8_8_REV ||
type == GL_UNSIGNED_BYTE) && format == GL_BGRA) {
- return GL_TRUE;
+ return true;
}
if (region->cpp == 2 &&
type == GL_UNSIGNED_SHORT_5_6_5_REV && format == GL_BGR) {
- return GL_TRUE;
+ return true;
}
DBG("%s: bad format for blit (cpp %d, type %s format %s)\n",
__FUNCTION__, region->cpp,
_mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
- return GL_FALSE;
+ return false;
}
void
#include "main/mtypes.h"
void intelInitPixelFuncs(struct dd_function_table *functions);
-GLboolean intel_check_blit_fragment_ops(struct gl_context * ctx,
- GLboolean src_alpha_is_one);
+bool intel_check_blit_fragment_ops(struct gl_context * ctx,
+ bool src_alpha_is_one);
-GLboolean intel_check_blit_format(struct intel_region *region,
+bool intel_check_blit_format(struct intel_region *region,
GLenum format, GLenum type);
return ADD_POINTERS(buf, bitmap);
}
-static GLboolean test_bit( const GLubyte *src, GLuint bit )
+static bool test_bit( const GLubyte *src, GLuint bit )
{
return (src[bit/8] & (1<<(bit % 8))) ? 1 : 0;
}
GLuint w, GLuint h,
GLubyte *dest,
GLuint row_align,
- GLboolean invert)
+ bool invert)
{
GLuint src_offset = (x + unpack->SkipPixels) & 0x7;
GLuint mask = unpack->LsbFirst ? 0 : 7;
/*
* Render a bitmap.
*/
-static GLboolean
+static bool
do_blit_bitmap( struct gl_context *ctx,
GLint dstx, GLint dsty,
GLsizei width, GLsizei height,
* It seems the blit Z coord is always 1.0 (the far plane) so fragments
* will likely be obscured by other, closer geometry.
*/
- return GL_FALSE;
+ return false;
}
intel_prepare_render(intel);
dst = intel_drawbuf_region(intel);
if (!dst)
- return GL_FALSE;
+ return false;
if (_mesa_is_bufferobj(unpack->BufferObj)) {
bitmap = map_pbo(ctx, width, height, unpack, bitmap);
if (bitmap == NULL)
- return GL_TRUE; /* even though this is an error, we're done */
+ return true; /* even though this is an error, we're done */
}
COPY_4V(tmpColor, ctx->Current.RasterColor);
color = PACK_COLOR_8888(ubcolor[3], ubcolor[0], ubcolor[1], ubcolor[2]);
if (!intel_check_blit_fragment_ops(ctx, tmpColor[3] == 1.0F))
- return GL_FALSE;
+ return false;
/* Clip to buffer bounds and scissor. */
if (!_mesa_clip_to_region(fb->_Xmin, fb->_Ymin,
w, h,
(GLubyte *)stipple,
8,
- fb->Name == 0 ? GL_TRUE : GL_FALSE) == 0)
+ fb->Name == 0 ? true : false) == 0)
continue;
if (!intelEmitImmediateColorExpandBlit(intel,
dsty + py,
w, h,
logic_op)) {
- return GL_FALSE;
+ return false;
}
}
}
intel_check_front_buffer_rendering(intel);
- return GL_TRUE;
+ return true;
}
* glCopyPixels. Differs from intel_check_blit_fragment_ops in that
* we allow Scissor.
*/
-static GLboolean
+static bool
intel_check_copypixel_blit_fragment_ops(struct gl_context * ctx)
{
if (ctx->NewState)
/**
* CopyPixels with the blitter. Don't support zooming, pixel transfer, etc.
*/
-static GLboolean
+static bool
do_blit_copypixels(struct gl_context * ctx,
GLint srcx, GLint srcy,
GLsizei width, GLsizei height,
GLint orig_dsty;
GLint orig_srcx;
GLint orig_srcy;
- GLboolean flip = GL_FALSE;
+ bool flip = false;
struct intel_renderbuffer *draw_irb = NULL;
struct intel_renderbuffer *read_irb = NULL;
case GL_COLOR:
if (fb->_NumColorDrawBuffers != 1) {
fallback_debug("glCopyPixels() fallback: MRT\n");
- return GL_FALSE;
+ return false;
}
draw_irb = intel_renderbuffer(fb->_ColorDrawBuffers[0]);
break;
case GL_DEPTH:
fallback_debug("glCopyPixels() fallback: GL_DEPTH\n");
- return GL_FALSE;
+ return false;
case GL_STENCIL:
fallback_debug("glCopyPixels() fallback: GL_STENCIL\n");
- return GL_FALSE;
+ return false;
default:
fallback_debug("glCopyPixels(): Unknown type\n");
- return GL_FALSE;
+ return false;
}
if (!draw_irb) {
fallback_debug("glCopyPixels() fallback: missing draw buffer\n");
- return GL_FALSE;
+ return false;
}
if (!read_irb) {
fallback_debug("glCopyPixels() fallback: missing read buffer\n");
- return GL_FALSE;
+ return false;
}
if (draw_irb->Base.Format != read_irb->Base.Format &&
fallback_debug("glCopyPixels() fallback: mismatched formats (%s -> %s\n",
_mesa_get_format_name(read_irb->Base.Format),
_mesa_get_format_name(draw_irb->Base.Format));
- return GL_FALSE;
+ return false;
}
/* Copypixels can be more than a straight copy. Ensure all the
*/
if (!intel_check_copypixel_blit_fragment_ops(ctx) ||
ctx->Pixel.ZoomX != 1.0F || ctx->Pixel.ZoomY != 1.0F)
- return GL_FALSE;
+ return false;
intel_prepare_render(intel);
ctx->Color.ColorLogicOpEnabled ?
ctx->Color.LogicOp : GL_COPY)) {
DBG("%s: blit failure\n", __FUNCTION__);
- return GL_FALSE;
+ return false;
}
out:
intel_check_front_buffer_rendering(intel);
DBG("%s: success\n", __FUNCTION__);
- return GL_TRUE;
+ return true;
}
* any case.
*/
-static GLboolean
+static bool
do_blit_readpixels(struct gl_context * ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
GLuint dst_offset;
GLuint rowLength;
drm_intel_bo *dst_buffer;
- GLboolean all;
+ bool all;
GLint dst_x, dst_y;
GLuint dirty;
DBG("%s\n", __FUNCTION__);
if (!src)
- return GL_FALSE;
+ return false;
if (!_mesa_is_bufferobj(pack->BufferObj)) {
/* PBO only for now:
*/
DBG("%s - not PBO\n", __FUNCTION__);
- return GL_FALSE;
+ return false;
}
if (ctx->_ImageTransferState ||
!intel_check_blit_format(src, format, type)) {
DBG("%s - bad format for blit\n", __FUNCTION__);
- return GL_FALSE;
+ return false;
}
if (pack->Alignment != 1 || pack->SwapBytes || pack->LsbFirst) {
DBG("%s: bad packing params\n", __FUNCTION__);
- return GL_FALSE;
+ return false;
}
if (pack->RowLength > 0)
if (pack->Invert) {
DBG("%s: MESA_PACK_INVERT not done yet\n", __FUNCTION__);
- return GL_FALSE;
+ return false;
}
else {
if (ctx->ReadBuffer->Name == 0)
&dst_x, &dst_y,
&x, &y,
&width, &height)) {
- return GL_TRUE;
+ return true;
}
dirty = intel->front_buffer_dirty;
if (!intelEmitCopyBlit(intel,
src->cpp,
src->pitch, src->bo, 0, src->tiling,
- rowLength, dst_buffer, dst_offset, GL_FALSE,
+ rowLength, dst_buffer, dst_offset, false,
x, y,
dst_x, dst_y,
width, height,
GL_COPY)) {
- return GL_FALSE;
+ return false;
}
DBG("%s - DONE\n", __FUNCTION__);
- return GL_TRUE;
+ return true;
}
void
const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
struct intel_context *intel = intel_context(ctx);
- GLboolean dirty;
+ bool dirty;
DBG("%s\n", __FUNCTION__);
if (region->tiling != I915_TILING_NONE)
drm_intel_gem_bo_map_gtt(region->bo);
else
- drm_intel_bo_map(region->bo, GL_TRUE);
+ drm_intel_bo_map(region->bo, true);
region->map = region->bo->virtual;
++intel->num_mapped_regions;
intel_region_alloc(struct intel_screen *screen,
uint32_t tiling,
GLuint cpp, GLuint width, GLuint height,
- GLboolean expect_accelerated_upload)
+ bool expect_accelerated_upload)
{
drm_intel_bo *buffer;
unsigned long flags = 0;
return region;
}
-GLboolean
+bool
intel_region_flink(struct intel_region *region, uint32_t *name)
{
if (region->name == 0) {
if (drm_intel_bo_flink(region->bo, ®ion->name))
- return GL_FALSE;
+ return false;
_mesa_HashInsert(region->screen->named_regions,
region->name, region);
*name = region->name;
- return GL_TRUE;
+ return true;
}
struct intel_region *
/* Copy rectangular sub-regions. Need better logic about when to
* push buffers into AGP - will currently do so whenever possible.
*/
-GLboolean
+bool
intel_region_copy(struct intel_context *intel,
struct intel_region *dst,
GLuint dst_offset,
struct intel_region *src,
GLuint src_offset,
GLuint srcx, GLuint srcy, GLuint width, GLuint height,
- GLboolean flip,
+ bool flip,
GLenum logicop)
{
uint32_t src_pitch = src->pitch;
_DBG("%s\n", __FUNCTION__);
if (intel == NULL)
- return GL_FALSE;
+ return false;
assert(src->cpp == dst->cpp);
* stored in a drm_intel_bo.
*/
+#include <stdbool.h>
#include <xf86drm.h>
#include "main/mtypes.h"
uint32_t tiling,
GLuint cpp, GLuint width,
GLuint height,
- GLboolean expect_accelerated_upload);
+ bool expect_accelerated_upload);
struct intel_region *
intel_region_alloc_for_handle(struct intel_screen *screen,
GLuint width, GLuint height, GLuint pitch,
unsigned int handle, const char *name);
-GLboolean
+bool
intel_region_flink(struct intel_region *region, uint32_t *name);
void intel_region_reference(struct intel_region **dst,
/* Copy rectangular sub-regions
*/
-GLboolean
+bool
intel_region_copy(struct intel_context *intel,
struct intel_region *dest,
GLuint dest_offset,
struct intel_region *src,
GLuint src_offset,
GLuint srcx, GLuint srcy, GLuint width, GLuint height,
- GLboolean flip,
+ bool flip,
GLenum logicop);
void _mesa_copy_rect(GLubyte * dst,
if (intel->gen < 4)
INTEL_FIREVERTICES(intel);
- intel->need_throttle = GL_TRUE;
+ intel->need_throttle = true;
if (intel->batch.used)
intel_batchbuffer_flush(intel);
image->region =
intel_region_alloc(intelScreen, tiling,
- cpp, width, height, GL_TRUE);
+ cpp, width, height, true);
if (image->region == NULL) {
FREE(image);
return NULL;
switch (attrib) {
case __DRI_IMAGE_ATTRIB_STRIDE:
*value = image->region->pitch * image->region->cpp;
- return GL_TRUE;
+ return true;
case __DRI_IMAGE_ATTRIB_HANDLE:
*value = image->region->bo->handle;
- return GL_TRUE;
+ return true;
case __DRI_IMAGE_ATTRIB_NAME:
return intel_region_flink(image->region, (uint32_t *) value);
default:
- return GL_FALSE;
+ return false;
}
}
NULL
};
-static GLboolean
+static bool
intel_get_param(__DRIscreen *psp, int param, int *value)
{
int ret;
if (ret) {
if (ret != -EINVAL)
_mesa_warning(NULL, "drm_i915_getparam: %d", ret);
- return GL_FALSE;
+ return false;
}
- return GL_TRUE;
+ return true;
}
-static GLboolean
+static bool
intel_get_boolean(__DRIscreen *psp, int param)
{
int value = 0;
struct intel_screen *screen = (struct intel_screen*) driScrnPriv->private;
if (isPixmap) {
- return GL_FALSE; /* not implemented */
+ return false; /* not implemented */
}
else {
gl_format rgbFormat;
struct gl_framebuffer *fb = CALLOC_STRUCT(gl_framebuffer);
if (!fb)
- return GL_FALSE;
+ return false;
_mesa_initialize_window_framebuffer(fb, mesaVis);
/* now add any/all software-based renderbuffers we may need */
_mesa_add_soft_renderbuffers(fb,
- GL_FALSE, /* never sw color */
- GL_FALSE, /* never sw depth */
- GL_FALSE, /* never sw stencil */
+ false, /* never sw color */
+ false, /* never sw depth */
+ false, /* never sw stencil */
mesaVis->accumRedBits > 0,
- GL_FALSE, /* never sw alpha */
- GL_FALSE /* never sw aux */ );
+ false, /* never sw alpha */
+ false /* never sw aux */ );
driDrawPriv->driverPrivate = fb;
- return GL_TRUE;
+ return true;
}
}
* init-designated function to register chipids and createcontext
* functions.
*/
-extern GLboolean i830CreateContext(const struct gl_config * mesaVis,
- __DRIcontext * driContextPriv,
- void *sharedContextPrivate);
-
-extern GLboolean i915CreateContext(int api,
- const struct gl_config * mesaVis,
- __DRIcontext * driContextPriv,
- void *sharedContextPrivate);
-extern GLboolean brwCreateContext(int api,
- const struct gl_config * mesaVis,
- __DRIcontext * driContextPriv,
- void *sharedContextPrivate);
+extern bool
+i830CreateContext(const struct gl_config *mesaVis,
+ __DRIcontext *driContextPriv,
+ void *sharedContextPrivate);
+
+extern bool
+i915CreateContext(int api,
+ const struct gl_config *mesaVis,
+ __DRIcontext *driContextPriv,
+ void *sharedContextPrivate);
+extern bool
+brwCreateContext(int api,
+ const struct gl_config *mesaVis,
+ __DRIcontext *driContextPriv,
+ void *sharedContextPrivate);
static GLboolean
intelCreateContext(gl_api api,
sharedContextPrivate);
}
} else {
- intelScreen->no_vbo = GL_TRUE;
+ intelScreen->no_vbo = true;
return i830CreateContext(mesaVis, driContextPriv, sharedContextPrivate);
}
#else
driContextPriv, sharedContextPrivate);
#endif
fprintf(stderr, "Unrecognized deviceID 0x%x\n", intelScreen->deviceID);
- return GL_FALSE;
+ return false;
}
-static GLboolean
+static bool
intel_init_bufmgr(struct intel_screen *intelScreen)
{
__DRIscreen *spriv = intelScreen->driScrnPriv;
if (intelScreen->bufmgr == NULL) {
fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
__func__, __LINE__);
- return GL_FALSE;
+ return false;
}
if (!intel_get_param(spriv, I915_PARAM_NUM_FENCES_AVAIL, &num_fences) ||
num_fences == 0) {
fprintf(stderr, "[%s: %u] Kernel 2.6.29 required.\n", __func__, __LINE__);
- return GL_FALSE;
+ return false;
}
drm_intel_bufmgr_gem_enable_fenced_relocs(intelScreen->bufmgr);
intelScreen->relaxed_relocations |=
intel_get_boolean(spriv, I915_PARAM_HAS_RELAXED_DELTA) << 0;
- return GL_TRUE;
+ return true;
}
/**
intelScreen = CALLOC(sizeof *intelScreen);
if (!intelScreen) {
fprintf(stderr, "\nERROR! Allocating private area failed\n");
- return GL_FALSE;
+ return false;
}
/* parse information in __driConfigOptions */
driParseOptionInfo(&intelScreen->optionCache,
/* Determine chipset ID */
if (!intel_get_param(psp, I915_PARAM_CHIPSET_ID,
&intelScreen->deviceID))
- return GL_FALSE;
+ return false;
/* Allow an override of the device ID for the purpose of making the
* driver produce dumps for debugging of new chipset enablement.
psp->api_mask = api_mask;
if (!intel_init_bufmgr(intelScreen))
- return GL_FALSE;
+ return false;
psp->extensions = intelScreenExtensions;
ARRAY_SIZE(back_buffer_modes),
msaa_samples_array,
ARRAY_SIZE(msaa_samples_array),
- GL_FALSE);
+ false);
if (configs == NULL)
configs = new_configs;
else
depth_bits, stencil_bits, 1,
back_buffer_modes + 1, 1,
msaa_samples_array, 1,
- GL_TRUE);
+ true);
if (configs == NULL)
configs = new_configs;
else
tiling = I915_TILING_X;
intelBuffer->region = intel_region_alloc(intelScreen, tiling,
- format / 8, width, height, GL_TRUE);
+ format / 8, width, height, true);
if (intelBuffer->region == NULL) {
FREE(intelBuffer);
#ifndef _INTEL_INIT_H_
#define _INTEL_INIT_H_
+#include <stdbool.h>
#include <sys/time.h>
#include "dri_util.h"
#include "intel_bufmgr.h"
__DRIscreen *driScrnPriv;
- GLboolean no_hw;
+ bool no_hw;
GLuint relaxed_relocations;
/*
* rather than solely in intel_context, because glXCreatePbuffer and
* glXCreatePixmap are not passed a GLXContext.
*/
- GLboolean hw_has_separate_stencil;
- GLboolean hw_must_use_separate_stencil;
- GLboolean hw_has_hiz;
+ bool hw_has_separate_stencil;
+ bool hw_must_use_separate_stencil;
+ bool hw_has_hiz;
enum intel_dri2_has_hiz dri2_has_hiz;
- GLboolean no_vbo;
+ bool no_vbo;
dri_bufmgr *bufmgr;
struct _mesa_HashTable *named_regions;
driOptionCache optionCache;
};
-extern GLboolean intelMapScreenRegions(__DRIscreen * sPriv);
+extern bool intelMapScreenRegions(__DRIscreen * sPriv);
extern void intelDestroyContext(__DRIcontext * driContextPriv);
intel_miptree_create_for_teximage(struct intel_context *intel,
struct intel_texture_object *intelObj,
struct intel_texture_image *intelImage,
- GLboolean expect_accelerated_upload);
+ bool expect_accelerated_upload);
GLuint intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit);
int intel_compressed_num_bytes(GLuint mesaFormat);
-GLboolean intel_copy_texsubimage(struct intel_context *intel,
+bool intel_copy_texsubimage(struct intel_context *intel,
struct intel_texture_image *intelImage,
GLint dstx, GLint dsty,
GLint x, GLint y,
}
-GLboolean
+bool
intel_copy_texsubimage(struct intel_context *intel,
struct intel_texture_image *intelImage,
GLint dstx, GLint dsty,
if (unlikely(INTEL_DEBUG & DEBUG_FALLBACKS))
fprintf(stderr, "%s fail %p %p (0x%08x)\n",
__FUNCTION__, intelImage->mt, irb, internalFormat);
- return GL_FALSE;
+ return false;
}
copy_supported = intelImage->base.Base.TexFormat == irb->Base.Format;
__FUNCTION__,
_mesa_get_format_name(intelImage->base.Base.TexFormat),
_mesa_get_format_name(irb->Base.Format));
- return GL_FALSE;
+ return false;
}
{
/* The blitter can't handle Y-tiled buffers. */
if (intelImage->mt->region->tiling == I915_TILING_Y) {
- return GL_FALSE;
+ return false;
}
if (ctx->ReadBuffer->Name == 0) {
image_x + dstx, image_y + dsty,
width, height,
GL_COPY)) {
- return GL_FALSE;
+ return false;
}
}
if (copy_supported_with_alpha_override)
intel_set_teximage_alpha_to_one(ctx, intelImage);
- return GL_TRUE;
+ return true;
}
intel_miptree_create_for_teximage(struct intel_context *intel,
struct intel_texture_object *intelObj,
struct intel_texture_image *intelImage,
- GLboolean expect_accelerated_upload)
+ bool expect_accelerated_upload)
{
GLuint firstLevel;
GLuint lastLevel;
if (!intelEmitCopyBlit(intel,
intelImage->mt->cpp,
src_stride, src_buffer,
- src_offset, GL_FALSE,
+ src_offset, false,
dst_stride, dst_buffer, 0,
intelImage->mt->region->tiling,
0, 0, dst_x, dst_y, width, height,
* Else there is no image data.
*/
struct intel_mipmap_tree *mt;
- GLboolean used_as_render_target;
+ bool used_as_render_target;
/**
* \name Renderbuffers for faking packed depth/stencil
_mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
}
- GLboolean ret;
+ bool ret;
unsigned int dst_pitch = intelImage->mt->region->pitch *
intelImage->mt->cpp;
ret = intelEmitCopyBlit(intel,
intelImage->mt->cpp,
dstRowStride / intelImage->mt->cpp,
- temp_bo, 0, GL_FALSE,
+ temp_bo, 0, false,
dst_pitch / intelImage->mt->cpp,
intelImage->mt->region->bo, 0,
intelImage->mt->region->tiling,
*/
if (firstImage->base.Base.Border) {
intel_miptree_release(&intelObj->mt);
- return GL_FALSE;
+ return false;
}
intel_miptree_get_dimensions_for_image(&firstImage->base.Base,
width,
height,
depth,
- GL_TRUE);
+ true);
if (!intelObj->mt)
- return GL_FALSE;
+ return false;
}
/* Pull in any images not in the object's tree:
}
}
- return GL_TRUE;
+ return true;
}
static void