radeon_context now contains a gl_context, rather than a pointer to one.
This will allow some minor core Mesa clean-up.
}
/* Flush is needed to make sure that source buffer has correct data */
- radeonFlush(r200->radeon.glCtx);
+ radeonFlush(&r200->radeon.glCtx);
rcommonEnsureCmdBufSpace(&r200->radeon, 102, __FUNCTION__);
{
int i, mtu;
- mtu = rmesa->radeon.glCtx->Const.MaxTextureUnits;
+ mtu = rmesa->radeon.glCtx.Const.MaxTextureUnits;
make_empty_list(&rmesa->radeon.hw.atomlist);
rmesa->radeon.hw.atomlist.name = "atom-list";
retval = rmesa->radeon.tcl.elt_dma_bo->ptr + rmesa->radeon.tcl.elt_dma_offset;
assert(!rmesa->radeon.dma.flush);
- rmesa->radeon.glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
+ rmesa->radeon.glCtx.Driver.NeedFlush |= FLUSH_STORED_VERTICES;
rmesa->radeon.dma.flush = r200FlushElts;
return retval;
* setting allow larger textures.
*/
- ctx = rmesa->radeon.glCtx;
+ ctx = &rmesa->radeon.glCtx;
ctx->Const.MaxTextureUnits = driQueryOptioni (&rmesa->radeon.optionCache,
"texture_units");
ctx->Const.MaxTextureImageUnits = ctx->Const.MaxTextureUnits;
others get the bit ordering right but don't actually do YUV-RGB conversion */
ctx->Extensions.MESA_ycbcr_texture = true;
}
- if (rmesa->radeon.glCtx->Mesa_DXTn) {
+ if (rmesa->radeon.glCtx.Mesa_DXTn) {
ctx->Extensions.EXT_texture_compression_s3tc = true;
ctx->Extensions.S3_s3tc = true;
}
rmesa->radeon.radeonScreen->chip_flags &= ~RADEON_CHIPSET_TCL;
fprintf(stderr, "Disabling HW TCL support\n");
}
- TCL_FALLBACK(rmesa->radeon.glCtx, R200_TCL_FALLBACK_TCL_DISABLE, 1);
+ TCL_FALLBACK(&rmesa->radeon.glCtx, R200_TCL_FALLBACK_TCL_DISABLE, 1);
}
_mesa_compute_version(ctx);
#define R200_NEWPRIM( rmesa ) \
do { \
if ( rmesa->radeon.dma.flush ) \
- rmesa->radeon.dma.flush( rmesa->radeon.glCtx ); \
+ rmesa->radeon.dma.flush( &rmesa->radeon.glCtx ); \
} while (0)
/* Can accomodate several state changes and primitive changes without
#define FALLBACK( rmesa, bit, mode ) do { \
if ( 0 ) fprintf( stderr, "FALLBACK in %s: #%d=%d\n", \
__FUNCTION__, bit, mode ); \
- r200Fallback( rmesa->radeon.glCtx, bit, mode ); \
+ r200Fallback( &rmesa->radeon.glCtx, bit, mode ); \
} while (0)
extern void r200LightingSpaceChange( struct gl_context *ctx );
*/
void r200InitState( r200ContextPtr rmesa )
{
- struct gl_context *ctx = rmesa->radeon.glCtx;
+ struct gl_context *ctx = &rmesa->radeon.glCtx;
GLuint i;
rmesa->radeon.Fallback = 0;
}
else {
if (rmesa->radeon.dma.flush)
- rmesa->radeon.dma.flush( rmesa->radeon.glCtx );
+ rmesa->radeon.dma.flush( &rmesa->radeon.glCtx );
r200EmitAOS( rmesa,
rmesa->radeon.tcl.aos_count, 0 );
state_size = radeonCountStateEmitSize( &rmesa->radeon );
/* vtx may be changed in r200EmitArrays so account for it if not dirty */
if (!rmesa->hw.vtx.dirty)
- state_size += rmesa->hw.vtx.check(rmesa->radeon.glCtx, &rmesa->hw.vtx);
+ state_size += rmesa->hw.vtx.check(&rmesa->radeon.glCtx, &rmesa->hw.vtx);
/* predict size for elements */
for (i = 0; i < VB->PrimitiveCount; ++i)
{
tnl->Driver.NotifyMaterialChange = r200UpdateMaterial;
if ( rmesa->radeon.dma.flush )
- rmesa->radeon.dma.flush( rmesa->radeon.glCtx );
+ rmesa->radeon.dma.flush( &rmesa->radeon.glCtx );
rmesa->radeon.dma.flush = NULL;
if (oldfallback == 0) {
/* We have to flush before transition */
if ( rmesa->radeon.dma.flush )
- rmesa->radeon.dma.flush( rmesa->radeon.glCtx );
+ rmesa->radeon.dma.flush( &rmesa->radeon.glCtx );
if (R200_DEBUG & RADEON_FALLBACKS)
fprintf(stderr, "R200 begin tcl fallback %s\n",
if (oldfallback == bit) {
/* We have to flush before transition */
if ( rmesa->radeon.dma.flush )
- rmesa->radeon.dma.flush( rmesa->radeon.glCtx );
+ rmesa->radeon.dma.flush( &rmesa->radeon.glCtx );
if (R200_DEBUG & RADEON_FALLBACKS)
fprintf(stderr, "R200 end tcl fallback %s\n",
if (rmesa) {
int i;
radeon_firevertices(&rmesa->radeon);
- for ( i = 0 ; i < rmesa->radeon.glCtx->Const.MaxTextureUnits ; i++ ) {
+ for ( i = 0 ; i < rmesa->radeon.glCtx.Const.MaxTextureUnits ; i++ ) {
if ( t == rmesa->state.texture.unit[i].texobj ) {
rmesa->state.texture.unit[i].texobj = NULL;
rmesa->hw.tex[i].dirty = GL_FALSE;
radeon = pDRICtx->driverPrivate;
rfb = dPriv->driverPrivate;
- texUnit = &radeon->glCtx->Texture.Unit[radeon->glCtx->Texture.CurrentUnit];
- texObj = _mesa_select_tex_object(radeon->glCtx, texUnit, target);
- texImage = _mesa_get_tex_image(radeon->glCtx, texObj, target, 0);
+ texUnit = &radeon->glCtx.Texture.Unit[radeon->glCtx.Texture.CurrentUnit];
+ texObj = _mesa_select_tex_object(&radeon->glCtx, texUnit, target);
+ texImage = _mesa_get_tex_image(&radeon->glCtx, texObj, target, 0);
rImage = get_radeon_texture_image(texImage);
t = radeon_tex_obj(texObj);
return;
}
- _mesa_lock_texture(radeon->glCtx, texObj);
+ _mesa_lock_texture(&radeon->glCtx, texObj);
if (t->bo) {
radeon_bo_unref(t->bo);
t->bo = NULL;
break;
}
- _mesa_init_teximage_fields(radeon->glCtx, texImage,
+ _mesa_init_teximage_fields(&radeon->glCtx, texImage,
rb->base.Base.Width, rb->base.Base.Height,
1, 0,
rb->cpp, texFormat);
}
t->validated = GL_TRUE;
- _mesa_unlock_texture(radeon->glCtx, texObj);
+ _mesa_unlock_texture(&radeon->glCtx, texObj);
return;
}
R200_STATECHANGE( rmesa, ctx );
rmesa->hw.ctx.cmd[CTX_PP_CNTL] &= ~(R200_TEX_0_ENABLE << unit);
if (rmesa->radeon.TclFallback & (R200_TCL_FALLBACK_TEXGEN_0<<unit)) {
- TCL_FALLBACK( rmesa->radeon.glCtx, (R200_TCL_FALLBACK_TEXGEN_0<<unit), GL_FALSE);
+ TCL_FALLBACK( &rmesa->radeon.glCtx, (R200_TCL_FALLBACK_TEXGEN_0<<unit), GL_FALSE);
}
/* Actually want to keep all units less than max active texture
static INLINE void radeon_firevertices(radeonContextPtr radeon)
{
if (radeon->cmdbuf.cs->cdw || radeon->dma.flush )
- radeon->glCtx->Driver.Flush(radeon->glCtx); /* +r6/r7 */
+ radeon->glCtx.Driver.Flush(&radeon->glCtx); /* +r6/r7 */
}
#endif
if ((draw_rfb->base.Width != drawable->w) ||
(draw_rfb->base.Height != drawable->h)) {
- _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
+ _mesa_resize_framebuffer(&radeon->glCtx, &draw_rfb->base,
drawable->w, drawable->h);
draw_rfb->base.Initialized = GL_TRUE;
}
if (drawable != readable) {
if ((read_rfb->base.Width != readable->w) ||
(read_rfb->base.Height != readable->h)) {
- _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
+ _mesa_resize_framebuffer(&radeon->glCtx, &read_rfb->base,
readable->w, readable->h);
read_rfb->base.Initialized = GL_TRUE;
}
}
if (radeon->state.scissor.enabled)
- radeonUpdateScissor(radeon->glCtx);
+ radeonUpdateScissor(&radeon->glCtx);
}
old_viewport = ctx->Driver.Viewport;
ctx->Driver.Viewport = NULL;
radeon_window_moved(radeon);
- radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
+ radeon_draw_buffer(ctx, radeon->glCtx.DrawBuffer);
ctx->Driver.Viewport = old_viewport;
}
if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
return;
- dwords = (*state->check) (radeon->glCtx, state);
+ dwords = (*state->check) (&radeon->glCtx, state);
fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
goto out;
foreach(atom, &radeon->hw.atomlist) {
if (atom->dirty) {
- const GLuint atom_size = atom->check(radeon->glCtx, atom);
+ const GLuint atom_size = atom->check(&radeon->glCtx, atom);
dwords += atom_size;
if (RADEON_CMDBUF && atom_size) {
radeon_print_state_atom(radeon, atom);
}
} else {
foreach(atom, &radeon->hw.atomlist) {
- const GLuint atom_size = atom->check(radeon->glCtx, atom);
+ const GLuint atom_size = atom->check(&radeon->glCtx, atom);
dwords += atom_size;
if (RADEON_CMDBUF && atom_size) {
radeon_print_state_atom(radeon, atom);
BATCH_LOCALS(radeon);
int dwords;
- dwords = (*atom->check) (radeon->glCtx, atom);
+ dwords = (*atom->check) (&radeon->glCtx, atom);
if (dwords) {
radeon_print_state_atom(radeon, atom);
if (atom->emit) {
- (*atom->emit)(radeon->glCtx, atom);
+ (*atom->emit)(&radeon->glCtx, atom);
} else {
BEGIN_BATCH_NO_AUTOSTATE(dwords);
OUT_BATCH_TABLE(atom->cmd, dwords);
fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
}
- radeonEmitQueryEnd(rmesa->glCtx);
+ radeonEmitQueryEnd(&rmesa->glCtx);
if (rmesa->cmdbuf.cs->cdw) {
ret = radeon_cs_emit(rmesa->cmdbuf.cs);
radeon_cs_erase(rmesa->cmdbuf.cs);
rmesa->cmdbuf.flushing = 0;
- if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
+ if (radeon_revalidate_bos(&rmesa->glCtx) == GL_FALSE) {
fprintf(stderr,"failed to revalidate buffers\n");
}
rmesa->cmdbuf.size = size;
radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
- (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
+ (void (*)(void *))rmesa->glCtx.Driver.Flush, &rmesa->glCtx);
if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO,
radeon->radeonScreen = screen;
/* Allocate and initialize the Mesa context */
if (sharedContextPrivate)
- shareCtx = ((radeonContextPtr)sharedContextPrivate)->glCtx;
+ shareCtx = &((radeonContextPtr)sharedContextPrivate)->glCtx;
else
shareCtx = NULL;
- radeon->glCtx = _mesa_create_context(API_OPENGL, glVisual, shareCtx,
- functions, (void *)radeon);
- if (!radeon->glCtx)
+
+ if (!_mesa_initialize_context(&radeon->glCtx, API_OPENGL,
+ glVisual, shareCtx,
+ functions, (void *)radeon))
return GL_FALSE;
- ctx = radeon->glCtx;
+ ctx = &radeon->glCtx;
driContextPriv->driverPrivate = radeon;
_mesa_meta_init(ctx);
assert(radeon);
- _mesa_meta_free(radeon->glCtx);
+ _mesa_meta_free(&radeon->glCtx);
if (radeon == current) {
_mesa_make_current(NULL, NULL, NULL);
}
radeonFreeDmaRegions(radeon);
- radeonReleaseArrays(radeon->glCtx, ~0);
+ radeonReleaseArrays(&radeon->glCtx, ~0);
if (radeon->vtbl.free_context)
- radeon->vtbl.free_context(radeon->glCtx);
- _swsetup_DestroyContext( radeon->glCtx );
- _tnl_DestroyContext( radeon->glCtx );
- _vbo_DestroyContext( radeon->glCtx );
- _swrast_DestroyContext( radeon->glCtx );
+ radeon->vtbl.free_context(&radeon->glCtx);
+ _swsetup_DestroyContext( &radeon->glCtx );
+ _tnl_DestroyContext( &radeon->glCtx );
+ _vbo_DestroyContext( &radeon->glCtx );
+ _swrast_DestroyContext( &radeon->glCtx );
/* free atom list */
- /* free the Mesa context */
- _mesa_destroy_context(radeon->glCtx);
+ /* free the Mesa context data */
+ _mesa_free_context_data(&radeon->glCtx);
/* _mesa_destroy_context() might result in calls to functions that
* depend on the DriverCtx, so don't set it to NULL before.
if (RADEON_DEBUG & RADEON_DRI)
fprintf(stderr, "%s ctx %p\n", __FUNCTION__,
- radeon->glCtx);
+ &radeon->glCtx);
/* Unset current context and dispath table */
_mesa_make_current(NULL, NULL, NULL);
radeon_update_renderbuffers(driContext, drawable, GL_FALSE);
/* Intel driver does the equivalent of this, no clue if it is needed:*/
- radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
+ radeon_draw_buffer(&radeon->glCtx, radeon->glCtx.DrawBuffer);
driContext->dri2.draw_stamp = drawable->dri2.stamp;
}
}
}
- driUpdateFramebufferSize(radeon->glCtx, drawable);
+ driUpdateFramebufferSize(&radeon->glCtx, drawable);
}
/* Force the context `c' to be the current context and associate with it
}
if(driDrawPriv == NULL && driReadPriv == NULL) {
- drfb = _mesa_create_framebuffer(&radeon->glCtx->Visual);
+ drfb = _mesa_create_framebuffer(&radeon->glCtx.Visual);
readfb = drfb;
}
else {
&(radeon_get_renderbuffer(drfb, BUFFER_DEPTH)->base.Base));
if (RADEON_DEBUG & RADEON_DRI)
- fprintf(stderr, "%s ctx %p dfb %p rfb %p\n", __FUNCTION__, radeon->glCtx, drfb, readfb);
+ fprintf(stderr, "%s ctx %p dfb %p rfb %p\n", __FUNCTION__, &radeon->glCtx, drfb, readfb);
if(driDrawPriv)
- driUpdateFramebufferSize(radeon->glCtx, driDrawPriv);
+ driUpdateFramebufferSize(&radeon->glCtx, driDrawPriv);
if (driReadPriv != driDrawPriv)
- driUpdateFramebufferSize(radeon->glCtx, driReadPriv);
+ driUpdateFramebufferSize(&radeon->glCtx, driReadPriv);
- _mesa_make_current(radeon->glCtx, drfb, readfb);
+ _mesa_make_current(&radeon->glCtx, drfb, readfb);
if (driDrawPriv == NULL && driReadPriv == NULL)
_mesa_reference_framebuffer(&drfb, NULL);
- _mesa_update_state(radeon->glCtx);
+ _mesa_update_state(&radeon->glCtx);
- if (radeon->glCtx->DrawBuffer == drfb) {
+ if (radeon->glCtx.DrawBuffer == drfb) {
if(driDrawPriv != NULL) {
radeon_window_moved(radeon);
}
- radeon_draw_buffer(radeon->glCtx, drfb);
+ radeon_draw_buffer(&radeon->glCtx, drfb);
}
};
struct radeon_context {
- struct gl_context *glCtx;
+ struct gl_context glCtx; /**< base class, must be first */
radeonScreenPtr radeonScreen; /* Screen private DRI data */
/* Texture object bookkeeping
* setting allow larger textures.
*/
- ctx = rmesa->radeon.glCtx;
+ ctx = &rmesa->radeon.glCtx;
ctx->Const.MaxTextureUnits = driQueryOptioni (&rmesa->radeon.optionCache,
"texture_units");
ctx->Const.MaxTextureImageUnits = ctx->Const.MaxTextureUnits;
ctx->Extensions.EXT_framebuffer_object = true;
ctx->Extensions.ARB_texture_cube_map = true;
- if (rmesa->radeon.glCtx->Mesa_DXTn) {
+ if (rmesa->radeon.glCtx.Mesa_DXTn) {
ctx->Extensions.EXT_texture_compression_s3tc = true;
ctx->Extensions.S3_s3tc = true;
}
rmesa->radeon.radeonScreen->chip_flags &= ~RADEON_CHIPSET_TCL;
fprintf(stderr, "Disabling HW TCL support\n");
}
- TCL_FALLBACK(rmesa->radeon.glCtx, RADEON_TCL_FALLBACK_TCL_DISABLE, 1);
+ TCL_FALLBACK(&rmesa->radeon.glCtx, RADEON_TCL_FALLBACK_TCL_DISABLE, 1);
}
if (rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL) {
fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
if (rmesa->dma.flush)
- rmesa->dma.flush(rmesa->glCtx);
+ rmesa->dma.flush(&rmesa->glCtx);
assert(rmesa->dma.current_used == rmesa->dma.current_vertexptr);
if(is_empty_list(&rmesa->dma.reserved)
||rmesa->dma.current_vertexptr + bytes > first_elem(&rmesa->dma.reserved)->bo->size) {
if (rmesa->dma.flush) {
- rmesa->dma.flush(rmesa->glCtx);
+ rmesa->dma.flush(&rmesa->glCtx);
}
radeonRefillCurrentDmaRegion(rmesa, bytes);
if (!rmesa->dma.flush) {
/* if cmdbuf flushed DMA restart */
- rmesa->glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
+ rmesa->glCtx.Driver.NeedFlush |= FLUSH_STORED_VERTICES;
rmesa->dma.flush = rcommon_flush_last_swtcl_prim;
}
fprintf(stderr, "%s\n", __FUNCTION__);
if (radeon->dma.flush) {
- radeon->dma.flush(radeon->glCtx);
+ radeon->dma.flush(&radeon->glCtx);
}
for (i = 0; i < radeon->tcl.aos_count; i++) {
if (radeon->tcl.aos[i].bo) {
rb->Width = image->width;
rb->Height = image->height;
rb->Format = image->format;
- rb->_BaseFormat = _mesa_base_fbo_format(radeon->glCtx,
+ rb->_BaseFormat = _mesa_base_fbo_format(&radeon->glCtx,
image->internal_format);
}
void radeon_fbo_init(struct radeon_context *radeon)
{
- radeon->glCtx->Driver.NewFramebuffer = radeon_new_framebuffer;
- radeon->glCtx->Driver.NewRenderbuffer = radeon_new_renderbuffer;
- radeon->glCtx->Driver.MapRenderbuffer = radeon_map_renderbuffer;
- radeon->glCtx->Driver.UnmapRenderbuffer = radeon_unmap_renderbuffer;
- radeon->glCtx->Driver.BindFramebuffer = radeon_bind_framebuffer;
- radeon->glCtx->Driver.FramebufferRenderbuffer = radeon_framebuffer_renderbuffer;
- radeon->glCtx->Driver.RenderTexture = radeon_render_texture;
- radeon->glCtx->Driver.FinishRenderTexture = radeon_finish_render_texture;
- radeon->glCtx->Driver.ResizeBuffers = radeon_resize_buffers;
- radeon->glCtx->Driver.ValidateFramebuffer = radeon_validate_framebuffer;
- radeon->glCtx->Driver.BlitFramebuffer = _mesa_meta_BlitFramebuffer;
- radeon->glCtx->Driver.EGLImageTargetRenderbufferStorage =
+ radeon->glCtx.Driver.NewFramebuffer = radeon_new_framebuffer;
+ radeon->glCtx.Driver.NewRenderbuffer = radeon_new_renderbuffer;
+ radeon->glCtx.Driver.MapRenderbuffer = radeon_map_renderbuffer;
+ radeon->glCtx.Driver.UnmapRenderbuffer = radeon_unmap_renderbuffer;
+ radeon->glCtx.Driver.BindFramebuffer = radeon_bind_framebuffer;
+ radeon->glCtx.Driver.FramebufferRenderbuffer = radeon_framebuffer_renderbuffer;
+ radeon->glCtx.Driver.RenderTexture = radeon_render_texture;
+ radeon->glCtx.Driver.FinishRenderTexture = radeon_finish_render_texture;
+ radeon->glCtx.Driver.ResizeBuffers = radeon_resize_buffers;
+ radeon->glCtx.Driver.ValidateFramebuffer = radeon_validate_framebuffer;
+ radeon->glCtx.Driver.BlitFramebuffer = _mesa_meta_BlitFramebuffer;
+ radeon->glCtx.Driver.EGLImageTargetRenderbufferStorage =
radeon_image_target_renderbuffer_storage;
}
*/
void radeonSetUpAtomList( r100ContextPtr rmesa )
{
- int i, mtu = rmesa->radeon.glCtx->Const.MaxTextureUnits;
+ int i, mtu = rmesa->radeon.glCtx.Const.MaxTextureUnits;
make_empty_list(&rmesa->radeon.hw.atomlist);
rmesa->radeon.hw.atomlist.name = "atom-list";
if (RADEON_DEBUG & RADEON_SYNC) {
fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
- radeonFinish( rmesa->radeon.glCtx );
+ radeonFinish( &rmesa->radeon.glCtx );
}
}
__FUNCTION__, primitive);
assert(!rmesa->radeon.dma.flush);
- rmesa->radeon.glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
+ rmesa->radeon.glCtx.Driver.NeedFlush |= FLUSH_STORED_VERTICES;
rmesa->radeon.dma.flush = radeonFlushElts;
return retval;
#define RADEON_NEWPRIM( rmesa ) \
do { \
if ( rmesa->radeon.dma.flush ) \
- rmesa->radeon.dma.flush( rmesa->radeon.glCtx ); \
+ rmesa->radeon.dma.flush( &rmesa->radeon.glCtx ); \
} while (0)
/* Can accomodate several state changes and primitive changes without
{
GLuint curOffset, i, face, level;
- assert(mt->numLevels <= rmesa->glCtx->Const.MaxTextureLevels);
+ assert(mt->numLevels <= rmesa->glCtx.Const.MaxTextureLevels);
curOffset = 0;
for(face = 0; face < mt->faces; face++) {
assert(radeon->query.current == NULL);
if (radeon->dma.flush)
- radeon->dma.flush(radeon->glCtx);
+ radeon->dma.flush(&radeon->glCtx);
if (!query->bo) {
query->bo = radeon_bo_open(radeon->radeonScreen->bom, 0, RADEON_QUERY_PAGE_SIZE, RADEON_QUERY_PAGE_SIZE, RADEON_GEM_DOMAIN_GTT, 0);
radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __FUNCTION__, q->Id);
if (radeon->dma.flush)
- radeon->dma.flush(radeon->glCtx);
+ radeon->dma.flush(&radeon->glCtx);
radeonEmitQueryEnd(ctx);
radeon->query.current = NULL;
radeonContextPtr rmesa;
rmesa = (radeonContextPtr) drawable->driContextPriv->driverPrivate;
- radeonFlush(rmesa->glCtx);
+ radeonFlush(&rmesa->glCtx);
}
static const struct __DRI2flushExtensionRec radeonFlushExtension = {
struct gl_renderbuffer *rb;
struct radeon_renderbuffer *rrb;
- rb = _mesa_lookup_renderbuffer(radeon->glCtx, renderbuffer);
+ rb = _mesa_lookup_renderbuffer(&radeon->glCtx, renderbuffer);
if (!rb) {
- _mesa_error(radeon->glCtx,
+ _mesa_error(&radeon->glCtx,
GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
return NULL;
}
int idx = TEXMAT_0 + unit;
float *dest = ((float *)RADEON_DB_STATE( mat[idx] )) + MAT_ELT_0;
int i;
- struct gl_texture_unit tUnit = rmesa->radeon.glCtx->Texture.Unit[unit];
+ struct gl_texture_unit tUnit = rmesa->radeon.glCtx.Texture.Unit[unit];
GLfloat *src = rmesa->tmpmat[unit].m;
rmesa->TexMatColSwap &= ~(1 << unit);
#define FALLBACK( rmesa, bit, mode ) do { \
if ( 0 ) fprintf( stderr, "FALLBACK in %s: #%d=%d\n", \
__FUNCTION__, bit, mode ); \
- radeonFallback( rmesa->radeon.glCtx, bit, mode ); \
+ radeonFallback( &rmesa->radeon.glCtx, bit, mode ); \
} while (0)
*/
void radeonInitState( r100ContextPtr rmesa )
{
- struct gl_context *ctx = rmesa->radeon.glCtx;
+ struct gl_context *ctx = &rmesa->radeon.glCtx;
GLuint i;
rmesa->radeon.Fallback = 0;
static GLushort *radeonAllocElts( r100ContextPtr rmesa, GLuint nr )
{
if (rmesa->radeon.dma.flush)
- rmesa->radeon.dma.flush( rmesa->radeon.glCtx );
+ rmesa->radeon.dma.flush( &rmesa->radeon.glCtx );
radeonEmitAOS( rmesa,
rmesa->radeon.tcl.aos_count, 0 );
state_size = radeonCountStateEmitSize( &rmesa->radeon );
/* tcl may be changed in radeonEmitArrays so account for it if not dirty */
if (!rmesa->hw.tcl.dirty)
- state_size += rmesa->hw.tcl.check( rmesa->radeon.glCtx, &rmesa->hw.tcl );
+ state_size += rmesa->hw.tcl.check( &rmesa->radeon.glCtx, &rmesa->hw.tcl );
/* predict size for elements */
for (i = 0; i < VB->PrimitiveCount; ++i)
{
tnl->Driver.NotifyMaterialChange = radeonUpdateMaterial;
if ( rmesa->radeon.dma.flush )
- rmesa->radeon.dma.flush( rmesa->radeon.glCtx );
+ rmesa->radeon.dma.flush( &rmesa->radeon.glCtx );
rmesa->radeon.dma.flush = NULL;
rmesa->swtcl.vertex_format = 0;
if ( rmesa ) {
radeon_firevertices(&rmesa->radeon);
- for ( i = 0 ; i < rmesa->radeon.glCtx->Const.MaxTextureUnits ; i++ ) {
+ for ( i = 0 ; i < rmesa->radeon.glCtx.Const.MaxTextureUnits ; i++ ) {
if ( t == rmesa->state.texture.unit[i].texobj ) {
rmesa->state.texture.unit[i].texobj = NULL;
rmesa->hw.tex[i].dirty = GL_FALSE;
radeon = pDRICtx->driverPrivate;
rfb = dPriv->driverPrivate;
- texUnit = _mesa_get_current_tex_unit(radeon->glCtx);
- texObj = _mesa_select_tex_object(radeon->glCtx, texUnit, target);
- texImage = _mesa_get_tex_image(radeon->glCtx, texObj, target, 0);
+ texUnit = _mesa_get_current_tex_unit(&radeon->glCtx);
+ texObj = _mesa_select_tex_object(&radeon->glCtx, texUnit, target);
+ texImage = _mesa_get_tex_image(&radeon->glCtx, texObj, target, 0);
rImage = get_radeon_texture_image(texImage);
t = radeon_tex_obj(texObj);
return;
}
- _mesa_lock_texture(radeon->glCtx, texObj);
+ _mesa_lock_texture(&radeon->glCtx, texObj);
if (t->bo) {
radeon_bo_unref(t->bo);
t->bo = NULL;
break;
}
- _mesa_init_teximage_fields(radeon->glCtx, texImage,
+ _mesa_init_teximage_fields(&radeon->glCtx, texImage,
rb->base.Base.Width, rb->base.Base.Height,
1, 0,
rb->cpp, texFormat);
(texImage->HeightLog2 << RADEON_TXFORMAT_HEIGHT_SHIFT));
}
t->validated = GL_TRUE;
- _mesa_unlock_texture(radeon->glCtx, texObj);
+ _mesa_unlock_texture(&radeon->glCtx, texObj);
return;
}
RADEON_Q_BIT(unit));
if (rmesa->radeon.TclFallback & (RADEON_TCL_FALLBACK_TEXGEN_0<<unit)) {
- TCL_FALLBACK( rmesa->radeon.glCtx, (RADEON_TCL_FALLBACK_TEXGEN_0<<unit), GL_FALSE);
+ TCL_FALLBACK( &rmesa->radeon.glCtx, (RADEON_TCL_FALLBACK_TEXGEN_0<<unit), GL_FALSE);
rmesa->recheck_texgen[unit] = GL_TRUE;
}