/* Flush any existing batchbuffer that might reference this data. */
if (drm_intel_bo_references(intel->batch->buf, intel_obj->buffer))
- intelFlush(ctx);
+ intel_flush(ctx);
if (intel_obj->region)
intel_bufferobj_cow(intel, intel_obj);
*/
if (!(access & GL_MAP_UNSYNCHRONIZED_BIT) &&
drm_intel_bo_references(intel->batch->buf, intel_obj->buffer))
- intelFlush(ctx);
+ intel_flush(ctx);
if (intel_obj->buffer == NULL) {
obj->Pointer = NULL;
* buffer.
*/
if (intel->is_front_buffer_rendering) {
- intel_flush(&intel->ctx, GL_FALSE);
+ intel_flush(&intel->ctx);
intel_flush_front(&intel->ctx);
}
}
void
-intel_flush(GLcontext *ctx, GLboolean needs_mi_flush)
+intel_flush(GLcontext *ctx)
{
struct intel_context *intel = intel_context(ctx);
intel_batchbuffer_flush(intel->batch);
}
-void
-intelFlush(GLcontext * ctx)
-{
- intel_flush(ctx, GL_FALSE);
-}
-
static void
intel_glFlush(GLcontext *ctx)
{
struct intel_context *intel = intel_context(ctx);
- intel_flush(ctx, GL_TRUE);
+ intel_flush(ctx);
intel_flush_front(ctx);
struct gl_framebuffer *fb = ctx->DrawBuffer;
int i;
- intelFlush(ctx);
+ intel_flush(ctx);
for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
struct intel_renderbuffer *irb;
struct dd_function_table *functions);
extern void intelFinish(GLcontext * ctx);
-extern void intelFlush(GLcontext * ctx);
-extern void intel_flush(GLcontext * ctx, GLboolean needs_mi_flush);
+extern void intel_flush(GLcontext * ctx);
extern void intelInitDriverFunctions(struct dd_function_table *functions);
rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
cpp = _mesa_get_format_bytes(rb->Format);
- intelFlush(ctx);
+ intel_flush(ctx);
/* free old region */
if (irb->region) {
{
DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
- intelFlush(ctx);
+ intel_flush(ctx);
_mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
intel_draw_buffer(ctx, fb);
if (!src || !dst)
return GL_FALSE;
- intelFlush(&intel->ctx);
+ intel_flush(&intel->ctx);
/* Clip to destination buffer. */
orig_dstx = dstx;
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s\n", __FUNCTION__);
- intelFlush(ctx);
+ intel_flush(ctx);
/* glReadPixels() wont dirty the front buffer, so reset the dirty
* flag after calling intel_prepare_render(). */
GLubyte *
intel_region_map(struct intel_context *intel, struct intel_region *region)
{
- intelFlush(&intel->ctx);
+ intel_flush(&intel->ctx);
_DBG("%s %p\n", __FUNCTION__, region);
if (!region->map_refcount++) {
struct intel_context *intel = intel_context(ctx);
GLuint i;
- intelFlush(&intel->ctx);
+ intel_flush(&intel->ctx);
intel_prepare_render(intel);
for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
sync->bo = intel->batch->buf;
drm_intel_bo_reference(sync->bo);
- intelFlush(ctx);
+ intel_flush(ctx);
}
/* We ignore the user-supplied timeout. This is weaselly -- we're allowed to
return GL_FALSE;
}
- /* intelFlush(ctx); */
+ /* intel_flush(ctx); */
intel_prepare_render(intel);
{
drm_intel_bo *dst_bo = intel_region_buffer(intel,
dst_stride = intelImage->mt->region->pitch;
if (drm_intel_bo_references(intel->batch->buf, dst_buffer))
- intelFlush(&intel->ctx);
+ intel_flush(&intel->ctx);
intel_prepare_render(intel);
{
dri_bo *src_buffer = intel_bufferobj_buffer(intel, pbo, INTEL_READ);
/* Flush any queued rendering with the texture before mapping. */
if (drm_intel_bo_references(intel->batch->buf,
intelImage->mt->region->buffer)) {
- intelFlush(ctx);
+ intel_flush(ctx);
}
texImage->Data = intel_miptree_image_map(intel,
intelImage->mt,
* make sure rendering is complete.
* We could probably predicate this on texObj->_RenderToTexture
*/
- intelFlush(ctx);
+ intel_flush(ctx);
/* Map */
if (intelImage->mt) {
_mesa_lookup_enum_by_nr(target),
level, xoffset, yoffset, width, height);
- intelFlush(ctx);
+ intel_flush(ctx);
if (compressed)
pixels = _mesa_validate_pbo_compressed_teximage(ctx, imageSize,