fix fd.o bug #12217, recalcuate urb when clip plane size change
[mesa.git] / src / mesa / drivers / dri / i915 / intel_ioctl.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include <stdio.h>
30 #include <unistd.h>
31 #include <errno.h>
32 #include <sched.h>
33
34 #include "mtypes.h"
35 #include "context.h"
36 #include "swrast/swrast.h"
37
38 #include "intel_context.h"
39 #include "intel_ioctl.h"
40 #include "intel_batchbuffer.h"
41 #include "drm.h"
42
43 u_int32_t intelGetLastFrame (intelContextPtr intel)
44 {
45 int ret;
46 u_int32_t frame;
47 drm_i915_getparam_t gp;
48
49 gp.param = I915_PARAM_LAST_DISPATCH;
50 gp.value = (int *)&frame;
51 ret = drmCommandWriteRead( intel->driFd, DRM_I915_GETPARAM,
52 &gp, sizeof(gp) );
53 return frame;
54 }
55
56 /**
57 * Emits a marker in the command stream, numbered from 0x00000001 to
58 * 0x7fffffff.
59 */
60 int intelEmitIrqLocked( intelContextPtr intel )
61 {
62 drmI830IrqEmit ie;
63 int ret, seq;
64
65 assert(((*(int *)intel->driHwLock) & ~DRM_LOCK_CONT) ==
66 (DRM_LOCK_HELD|intel->hHWContext));
67
68 /* Valgrind can't tell that the kernel will have copyout()ed onto this
69 * value, so initialize it now to prevent false positives.
70 */
71 seq = 0;
72 ie.irq_seq = &seq;
73
74 ret = drmCommandWriteRead( intel->driFd, DRM_I830_IRQ_EMIT,
75 &ie, sizeof(ie) );
76 if ( ret ) {
77 fprintf( stderr, "%s: drmI830IrqEmit: %d\n", __FUNCTION__, ret );
78 exit(1);
79 }
80
81 if (0)
82 fprintf(stderr, "%s --> %d\n", __FUNCTION__, seq );
83
84 return seq;
85 }
86
87 /** Blocks on a marker returned by intelEitIrqLocked(). */
88 void intelWaitIrq( intelContextPtr intel, int seq )
89 {
90 int ret;
91
92 if (0)
93 fprintf(stderr, "%s %d\n", __FUNCTION__, seq );
94
95 intel->iw.irq_seq = seq;
96
97 do {
98 ret = drmCommandWrite( intel->driFd, DRM_I830_IRQ_WAIT, &intel->iw, sizeof(intel->iw) );
99 } while (ret == -EAGAIN || ret == -EINTR);
100
101 if ( ret ) {
102 fprintf( stderr, "%s: drmI830IrqWait: %d\n", __FUNCTION__, ret );
103 if (0)
104 intel_dump_batchbuffer( intel->alloc.offset,
105 intel->alloc.ptr,
106 intel->alloc.size );
107 exit(1);
108 }
109 }
110
111
112
113 static void age_intel( intelContextPtr intel, int age )
114 {
115 GLuint i;
116
117 for (i = 0 ; i < MAX_TEXTURE_UNITS ; i++)
118 if (intel->CurrentTexObj[i])
119 intel->CurrentTexObj[i]->age = age;
120 }
121
122 void intel_dump_batchbuffer( long offset,
123 int *ptr,
124 int count )
125 {
126 int i;
127 fprintf(stderr, "\n\n\nSTART BATCH (%d dwords):\n", count);
128 for (i = 0; i < count/4; i += 4)
129 fprintf(stderr, "\t0x%x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
130 (unsigned int)offset + i*4, ptr[i], ptr[i+1], ptr[i+2], ptr[i+3]);
131 fprintf(stderr, "END BATCH\n\n\n");
132 }
133
134 void intelRefillBatchLocked( intelContextPtr intel, GLboolean allow_unlock )
135 {
136 GLuint last_irq = intel->alloc.irq_emitted;
137 GLuint half = intel->alloc.size / 2;
138 GLuint buf = (intel->alloc.active_buf ^= 1);
139
140 intel->alloc.irq_emitted = intelEmitIrqLocked( intel );
141
142 if (last_irq) {
143 if (allow_unlock) UNLOCK_HARDWARE( intel );
144 intelWaitIrq( intel, last_irq );
145 if (allow_unlock) LOCK_HARDWARE( intel );
146 }
147
148 if (0)
149 fprintf(stderr, "%s: now using half %d\n", __FUNCTION__, buf);
150
151 intel->batch.start_offset = intel->alloc.offset + buf * half;
152 intel->batch.ptr = (unsigned char *)intel->alloc.ptr + buf * half;
153 intel->batch.size = half - 8;
154 intel->batch.space = half - 8;
155 assert(intel->batch.space >= 0);
156 }
157
158 #define MI_BATCH_BUFFER_END (0xA<<23)
159
160
161 void intelFlushBatchLocked( intelContextPtr intel,
162 GLboolean ignore_cliprects,
163 GLboolean refill,
164 GLboolean allow_unlock)
165 {
166 drmI830BatchBuffer batch;
167
168 assert(intel->locked);
169
170 if (0)
171 fprintf(stderr, "%s used %d of %d offset %x..%x refill %d (started in %s)\n",
172 __FUNCTION__,
173 (intel->batch.size - intel->batch.space),
174 intel->batch.size,
175 intel->batch.start_offset,
176 intel->batch.start_offset +
177 (intel->batch.size - intel->batch.space),
178 refill,
179 intel->batch.func);
180
181 /* Throw away non-effective packets. Won't work once we have
182 * hardware contexts which would preserve statechanges beyond a
183 * single buffer.
184 */
185 if (intel->numClipRects == 0 && !ignore_cliprects) {
186
187 /* Without this yeild, an application with no cliprects can hog
188 * the hardware. Without unlocking, the effect is much worse -
189 * effectively a lock-out of other contexts.
190 */
191 if (allow_unlock) {
192 UNLOCK_HARDWARE( intel );
193 sched_yield();
194 LOCK_HARDWARE( intel );
195 }
196
197 /* Note that any state thought to have been emitted actually
198 * hasn't:
199 */
200 intel->batch.ptr -= (intel->batch.size - intel->batch.space);
201 intel->batch.space = intel->batch.size;
202 intel->vtbl.lost_hardware( intel );
203 }
204
205 if (intel->batch.space != intel->batch.size) {
206
207 if (intel->sarea->ctxOwner != intel->hHWContext) {
208 intel->perf_boxes |= I830_BOX_LOST_CONTEXT;
209 intel->sarea->ctxOwner = intel->hHWContext;
210 }
211
212 batch.start = intel->batch.start_offset;
213 batch.used = intel->batch.size - intel->batch.space;
214 batch.cliprects = intel->pClipRects;
215 batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects;
216 batch.DR1 = 0;
217 batch.DR4 = ((((GLuint)intel->drawX) & 0xffff) |
218 (((GLuint)intel->drawY) << 16));
219
220 if (intel->alloc.offset) {
221 if ((batch.used & 0x4) == 0) {
222 ((int *)intel->batch.ptr)[0] = 0;
223 ((int *)intel->batch.ptr)[1] = MI_BATCH_BUFFER_END;
224 batch.used += 0x8;
225 intel->batch.ptr += 0x8;
226 }
227 else {
228 ((int *)intel->batch.ptr)[0] = MI_BATCH_BUFFER_END;
229 batch.used += 0x4;
230 intel->batch.ptr += 0x4;
231 }
232 }
233
234 if (0)
235 intel_dump_batchbuffer( batch.start,
236 (int *)(intel->batch.ptr - batch.used),
237 batch.used );
238
239 intel->batch.start_offset += batch.used;
240 intel->batch.size -= batch.used;
241
242 if (intel->batch.size < 8) {
243 refill = GL_TRUE;
244 intel->batch.space = intel->batch.size = 0;
245 }
246 else {
247 intel->batch.size -= 8;
248 intel->batch.space = intel->batch.size;
249 }
250
251
252 assert(intel->batch.space >= 0);
253 assert(batch.start >= intel->alloc.offset);
254 assert(batch.start < intel->alloc.offset + intel->alloc.size);
255 assert(batch.start + batch.used > intel->alloc.offset);
256 assert(batch.start + batch.used <=
257 intel->alloc.offset + intel->alloc.size);
258
259
260 if (intel->alloc.offset) {
261 if (drmCommandWrite (intel->driFd, DRM_I830_BATCHBUFFER, &batch,
262 sizeof(batch))) {
263 fprintf(stderr, "DRM_I830_BATCHBUFFER: %d\n", -errno);
264 UNLOCK_HARDWARE(intel);
265 exit(1);
266 }
267 } else {
268 drmI830CmdBuffer cmd;
269 cmd.buf = (char *)intel->alloc.ptr + batch.start;
270 cmd.sz = batch.used;
271 cmd.DR1 = batch.DR1;
272 cmd.DR4 = batch.DR4;
273 cmd.num_cliprects = batch.num_cliprects;
274 cmd.cliprects = batch.cliprects;
275
276 if (drmCommandWrite (intel->driFd, DRM_I830_CMDBUFFER, &cmd,
277 sizeof(cmd))) {
278 fprintf(stderr, "DRM_I830_CMDBUFFER: %d\n", -errno);
279 UNLOCK_HARDWARE(intel);
280 exit(1);
281 }
282 }
283
284
285 age_intel(intel, intel->sarea->last_enqueue);
286
287 /* FIXME: use hardware contexts to avoid 'losing' hardware after
288 * each buffer flush.
289 */
290 if (intel->batch.contains_geometry)
291 assert(intel->batch.last_emit_state == intel->batch.counter);
292
293 intel->batch.counter++;
294 intel->batch.contains_geometry = 0;
295 intel->batch.func = 0;
296 intel->vtbl.lost_hardware( intel );
297 }
298
299 if (refill)
300 intelRefillBatchLocked( intel, allow_unlock );
301 }
302
303 void intelFlushBatch( intelContextPtr intel, GLboolean refill )
304 {
305 if (intel->locked) {
306 intelFlushBatchLocked( intel, GL_FALSE, refill, GL_FALSE );
307 }
308 else {
309 LOCK_HARDWARE(intel);
310 intelFlushBatchLocked( intel, GL_FALSE, refill, GL_TRUE );
311 UNLOCK_HARDWARE(intel);
312 }
313 }
314
315
316 void intelWaitForIdle( intelContextPtr intel )
317 {
318 if (0)
319 fprintf(stderr, "%s\n", __FUNCTION__);
320
321 intel->vtbl.emit_flush( intel );
322 intelFlushBatch( intel, GL_TRUE );
323
324 /* Use an irq to wait for dma idle -- Need to track lost contexts
325 * to shortcircuit consecutive calls to this function:
326 */
327 intelWaitIrq( intel, intel->alloc.irq_emitted );
328 intel->alloc.irq_emitted = 0;
329 }
330
331
332 /**
333 * Check if we need to rotate/warp the front color buffer to the
334 * rotated screen. We generally need to do this when we get a glFlush
335 * or glFinish after drawing to the front color buffer.
336 */
337 static void
338 intelCheckFrontRotate(GLcontext *ctx)
339 {
340 intelContextPtr intel = INTEL_CONTEXT( ctx );
341 if (intel->ctx.DrawBuffer->_ColorDrawBufferMask[0] == BUFFER_BIT_FRONT_LEFT) {
342 intelScreenPrivate *screen = intel->intelScreen;
343 if (screen->current_rotation != 0) {
344 __DRIdrawablePrivate *dPriv = intel->driDrawable;
345 intelRotateWindow(intel, dPriv, BUFFER_BIT_FRONT_LEFT);
346 }
347 }
348 }
349
350
351 /**
352 * NOT directly called via glFlush.
353 */
354 void intelFlush( GLcontext *ctx )
355 {
356 intelContextPtr intel = INTEL_CONTEXT( ctx );
357
358 if (intel->Fallback)
359 _swrast_flush( ctx );
360
361 INTEL_FIREVERTICES( intel );
362
363 if (intel->batch.size != intel->batch.space)
364 intelFlushBatch( intel, GL_FALSE );
365 }
366
367
368 /**
369 * Called via glFlush.
370 */
371 void intelglFlush( GLcontext *ctx )
372 {
373 intelFlush(ctx);
374 intelCheckFrontRotate(ctx);
375 }
376
377
378 void intelFinish( GLcontext *ctx )
379 {
380 intelContextPtr intel = INTEL_CONTEXT( ctx );
381 intelFlush( ctx );
382 intelWaitForIdle( intel );
383 intelCheckFrontRotate(ctx);
384 }
385
386
387 void intelClear(GLcontext *ctx, GLbitfield mask)
388 {
389 intelContextPtr intel = INTEL_CONTEXT( ctx );
390 const GLuint colorMask = *((GLuint *) &ctx->Color.ColorMask);
391 GLbitfield tri_mask = 0;
392 GLbitfield blit_mask = 0;
393 GLbitfield swrast_mask = 0;
394
395 if (0)
396 fprintf(stderr, "%s\n", __FUNCTION__);
397
398 /* Take care of cliprects, which are handled differently for
399 * clears, etc.
400 */
401 intelFlush( &intel->ctx );
402
403 if (mask & BUFFER_BIT_FRONT_LEFT) {
404 if (colorMask == ~0) {
405 blit_mask |= BUFFER_BIT_FRONT_LEFT;
406 }
407 else {
408 tri_mask |= BUFFER_BIT_FRONT_LEFT;
409 }
410 }
411
412 if (mask & BUFFER_BIT_BACK_LEFT) {
413 if (colorMask == ~0) {
414 blit_mask |= BUFFER_BIT_BACK_LEFT;
415 }
416 else {
417 tri_mask |= BUFFER_BIT_BACK_LEFT;
418 }
419 }
420
421 if (mask & BUFFER_BIT_DEPTH) {
422 blit_mask |= BUFFER_BIT_DEPTH;
423 }
424
425 if (mask & BUFFER_BIT_STENCIL) {
426 if (!intel->hw_stencil) {
427 swrast_mask |= BUFFER_BIT_STENCIL;
428 }
429 else if ((ctx->Stencil.WriteMask[0] & 0xff) != 0xff) {
430 tri_mask |= BUFFER_BIT_STENCIL;
431 }
432 else {
433 blit_mask |= BUFFER_BIT_STENCIL;
434 }
435 }
436
437 swrast_mask |= (mask & BUFFER_BIT_ACCUM);
438
439 if (blit_mask)
440 intelClearWithBlit( ctx, blit_mask, 0, 0, 0, 0, 0);
441
442 if (tri_mask)
443 intel->vtbl.clear_with_tris( intel, tri_mask, 0, 0, 0, 0, 0);
444
445 if (swrast_mask)
446 _swrast_Clear( ctx, swrast_mask );
447 }
448
449
450 void
451 intelRotateWindow(intelContextPtr intel, __DRIdrawablePrivate *dPriv,
452 GLuint srcBuffer)
453 {
454 if (intel->vtbl.rotate_window) {
455 intel->vtbl.rotate_window(intel, dPriv, srcBuffer);
456 }
457 }
458
459
460 void *intelAllocateAGP( intelContextPtr intel, GLsizei size )
461 {
462 int region_offset;
463 drmI830MemAlloc alloc;
464 int ret;
465
466 if (0)
467 fprintf(stderr, "%s: %d bytes\n", __FUNCTION__, size);
468
469 alloc.region = I830_MEM_REGION_AGP;
470 alloc.alignment = 0;
471 alloc.size = size;
472 alloc.region_offset = &region_offset;
473
474 LOCK_HARDWARE(intel);
475
476 /* Make sure the global heap is initialized
477 */
478 if (intel->texture_heaps[0])
479 driAgeTextures( intel->texture_heaps[0] );
480
481
482 ret = drmCommandWriteRead( intel->driFd,
483 DRM_I830_ALLOC,
484 &alloc, sizeof(alloc));
485
486 if (ret) {
487 fprintf(stderr, "%s: DRM_I830_ALLOC ret %d\n", __FUNCTION__, ret);
488 UNLOCK_HARDWARE(intel);
489 return NULL;
490 }
491
492 if (0)
493 fprintf(stderr, "%s: allocated %d bytes\n", __FUNCTION__, size);
494
495 /* Need to propogate this information (agp memory in use) to our
496 * local texture lru. The kernel has already updated the global
497 * lru. An alternative would have been to allocate memory the
498 * usual way and then notify the kernel to pin the allocation.
499 */
500 if (intel->texture_heaps[0])
501 driAgeTextures( intel->texture_heaps[0] );
502
503 UNLOCK_HARDWARE(intel);
504
505 return (void *)((char *)intel->intelScreen->tex.map + region_offset);
506 }
507
508 void intelFreeAGP( intelContextPtr intel, void *pointer )
509 {
510 int region_offset;
511 drmI830MemFree memfree;
512 int ret;
513
514 region_offset = (char *)pointer - (char *)intel->intelScreen->tex.map;
515
516 if (region_offset < 0 ||
517 region_offset > intel->intelScreen->tex.size) {
518 fprintf(stderr, "offset %d outside range 0..%d\n", region_offset,
519 intel->intelScreen->tex.size);
520 return;
521 }
522
523 memfree.region = I830_MEM_REGION_AGP;
524 memfree.region_offset = region_offset;
525
526 ret = drmCommandWrite( intel->driFd,
527 DRM_I830_FREE,
528 &memfree, sizeof(memfree));
529
530 if (ret)
531 fprintf(stderr, "%s: DRM_I830_FREE ret %d\n", __FUNCTION__, ret);
532 }
533
534 /* This version of AllocateMemoryMESA allocates only agp memory, and
535 * only does so after the point at which the driver has been
536 * initialized.
537 *
538 * Theoretically a valid context isn't required. However, in this
539 * implementation, it is, as I'm using the hardware lock to protect
540 * the kernel data structures, and the current context to get the
541 * device fd.
542 */
543 void *intelAllocateMemoryMESA(__DRInativeDisplay *dpy, int scrn,
544 GLsizei size, GLfloat readfreq,
545 GLfloat writefreq, GLfloat priority)
546 {
547 GET_CURRENT_CONTEXT(ctx);
548
549 if (INTEL_DEBUG & DEBUG_IOCTL)
550 fprintf(stderr, "%s sz %d %f/%f/%f\n", __FUNCTION__, size, readfreq,
551 writefreq, priority);
552
553 if (getenv("INTEL_NO_ALLOC"))
554 return NULL;
555
556 if (!ctx || INTEL_CONTEXT(ctx) == 0)
557 return NULL;
558
559 return intelAllocateAGP( INTEL_CONTEXT(ctx), size );
560 }
561
562
563 /* Called via glXFreeMemoryMESA() */
564 void intelFreeMemoryMESA(__DRInativeDisplay *dpy, int scrn, GLvoid *pointer)
565 {
566 GET_CURRENT_CONTEXT(ctx);
567 if (INTEL_DEBUG & DEBUG_IOCTL)
568 fprintf(stderr, "%s %p\n", __FUNCTION__, pointer);
569
570 if (!ctx || INTEL_CONTEXT(ctx) == 0) {
571 fprintf(stderr, "%s: no context\n", __FUNCTION__);
572 return;
573 }
574
575 intelFreeAGP( INTEL_CONTEXT(ctx), pointer );
576 }
577
578 /* Called via glXGetMemoryOffsetMESA()
579 *
580 * Returns offset of pointer from the start of agp aperture.
581 */
582 GLuint intelGetMemoryOffsetMESA(__DRInativeDisplay *dpy, int scrn,
583 const GLvoid *pointer)
584 {
585 GET_CURRENT_CONTEXT(ctx);
586 intelContextPtr intel;
587
588 if (!ctx || !(intel = INTEL_CONTEXT(ctx)) ) {
589 fprintf(stderr, "%s: no context\n", __FUNCTION__);
590 return ~0;
591 }
592
593 if (!intelIsAgpMemory( intel, pointer, 0 ))
594 return ~0;
595
596 return intelAgpOffsetFromVirtual( intel, pointer );
597 }
598
599
600 GLboolean intelIsAgpMemory( intelContextPtr intel, const GLvoid *pointer,
601 GLint size )
602 {
603 int offset = (char *)pointer - (char *)intel->intelScreen->tex.map;
604 int valid = (size >= 0 &&
605 offset >= 0 &&
606 offset + size < intel->intelScreen->tex.size);
607
608 if (INTEL_DEBUG & DEBUG_IOCTL)
609 fprintf(stderr, "intelIsAgpMemory( %p ) : %d\n", pointer, valid );
610
611 return valid;
612 }
613
614
615 GLuint intelAgpOffsetFromVirtual( intelContextPtr intel, const GLvoid *pointer )
616 {
617 int offset = (char *)pointer - (char *)intel->intelScreen->tex.map;
618
619 if (offset < 0 || offset > intel->intelScreen->tex.size)
620 return ~0;
621 else
622 return intel->intelScreen->tex.offset + offset;
623 }
624
625
626
627
628
629 /* Flip the front & back buffes
630 */
631 void intelPageFlip( const __DRIdrawablePrivate *dPriv )
632 {
633 #if 0
634 intelContextPtr intel;
635 int tmp, ret;
636
637 if (INTEL_DEBUG & DEBUG_IOCTL)
638 fprintf(stderr, "%s\n", __FUNCTION__);
639
640 assert(dPriv);
641 assert(dPriv->driContextPriv);
642 assert(dPriv->driContextPriv->driverPrivate);
643
644 intel = (intelContextPtr) dPriv->driContextPriv->driverPrivate;
645
646 intelFlush( &intel->ctx );
647 LOCK_HARDWARE( intel );
648
649 if (dPriv->pClipRects) {
650 *(drm_clip_rect_t *)intel->sarea->boxes = dPriv->pClipRects[0];
651 intel->sarea->nbox = 1;
652 }
653
654 ret = drmCommandNone(intel->driFd, DRM_I830_FLIP);
655 if (ret) {
656 fprintf(stderr, "%s: %d\n", __FUNCTION__, ret);
657 UNLOCK_HARDWARE( intel );
658 exit(1);
659 }
660
661 tmp = intel->sarea->last_enqueue;
662 intelRefillBatchLocked( intel );
663 UNLOCK_HARDWARE( intel );
664
665
666 intelSetDrawBuffer( &intel->ctx, intel->ctx.Color.DriverDrawBuffer );
667 #endif
668 }