dec0166605b04b19bb4ce4de0e279a15097c8e92
[mesa.git] / src / mesa / drivers / dri / i915 / intel_ioctl.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include <stdio.h>
30 #include <unistd.h>
31 #include <errno.h>
32
33 #include "mtypes.h"
34 #include "context.h"
35 #include "swrast/swrast.h"
36
37 #include "intel_context.h"
38 #include "intel_ioctl.h"
39 #include "intel_batchbuffer.h"
40 #include "drm.h"
41
42
43
44 static int intelEmitIrqLocked( intelContextPtr intel )
45 {
46 drmI830IrqEmit ie;
47 int ret, seq;
48
49 assert(((*(int *)intel->driHwLock) & ~DRM_LOCK_CONT) ==
50 (DRM_LOCK_HELD|intel->hHWContext));
51
52 ie.irq_seq = &seq;
53
54 ret = drmCommandWriteRead( intel->driFd, DRM_I830_IRQ_EMIT,
55 &ie, sizeof(ie) );
56 if ( ret ) {
57 fprintf( stderr, "%s: drmI830IrqEmit: %d\n", __FUNCTION__, ret );
58 exit(1);
59 }
60
61 if (0)
62 fprintf(stderr, "%s --> %d\n", __FUNCTION__, seq );
63
64 return seq;
65 }
66
67 static void intelWaitIrq( intelContextPtr intel, int seq )
68 {
69 drmI830IrqWait iw;
70 int ret;
71
72 if (0)
73 fprintf(stderr, "%s %d\n", __FUNCTION__, seq );
74
75 iw.irq_seq = seq;
76
77 do {
78 ret = drmCommandWrite( intel->driFd, DRM_I830_IRQ_WAIT, &iw, sizeof(iw) );
79 } while (ret == -EAGAIN || ret == -EINTR);
80
81 if ( ret ) {
82 fprintf( stderr, "%s: drmI830IrqWait: %d\n", __FUNCTION__, ret );
83 if (0)
84 intel_dump_batchbuffer( intel->alloc.offset,
85 intel->alloc.ptr,
86 intel->alloc.size );
87 exit(1);
88 }
89 }
90
91
92
93 static void age_intel( intelContextPtr intel, int age )
94 {
95 GLuint i;
96
97 for (i = 0 ; i < MAX_TEXTURE_UNITS ; i++)
98 if (intel->CurrentTexObj[i])
99 intel->CurrentTexObj[i]->age = age;
100 }
101
102 void intel_dump_batchbuffer( long offset,
103 int *ptr,
104 int count )
105 {
106 int i;
107 fprintf(stderr, "\n\n\nSTART BATCH (%d dwords):\n", count);
108 for (i = 0; i < count/4; i += 4)
109 fprintf(stderr, "\t0x%x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
110 offset + i*4, ptr[i], ptr[i+1], ptr[i+2], ptr[i+3]);
111 fprintf(stderr, "END BATCH\n\n\n");
112 }
113
114 void intelRefillBatchLocked( intelContextPtr intel, GLboolean allow_unlock )
115 {
116 GLuint last_irq = intel->alloc.irq_emitted;
117 GLuint half = intel->alloc.size / 2;
118 GLuint buf = (intel->alloc.active_buf ^= 1);
119
120 intel->alloc.irq_emitted = intelEmitIrqLocked( intel );
121
122 if (last_irq) {
123 if (allow_unlock) UNLOCK_HARDWARE( intel );
124 intelWaitIrq( intel, last_irq );
125 if (allow_unlock) LOCK_HARDWARE( intel );
126 }
127
128 if (0)
129 fprintf(stderr, "%s: now using half %d\n", __FUNCTION__, buf);
130
131 intel->batch.start_offset = intel->alloc.offset + buf * half;
132 intel->batch.ptr = (char *)intel->alloc.ptr + buf * half;
133 intel->batch.size = half - 8;
134 intel->batch.space = half - 8;
135 assert(intel->batch.space >= 0);
136 }
137
138 #define MI_BATCH_BUFFER_END (0xA<<23)
139
140
141 void intelFlushBatchLocked( intelContextPtr intel,
142 GLboolean ignore_cliprects,
143 GLboolean refill,
144 GLboolean allow_unlock)
145 {
146 drmI830BatchBuffer batch;
147
148 assert(intel->locked);
149
150 if (0)
151 fprintf(stderr, "%s used %d of %d offset %x..%x refill %d\n",
152 __FUNCTION__,
153 (intel->batch.size - intel->batch.space),
154 intel->batch.size,
155 intel->batch.start_offset,
156 intel->batch.start_offset +
157 (intel->batch.size - intel->batch.space),
158 refill);
159
160 /* Throw away non-effective packets. Won't work once we have
161 * hardware contexts which would preserve statechanges beyond a
162 * single buffer.
163 */
164 if (intel->numClipRects == 0 && !ignore_cliprects) {
165
166 /* Without this yeild, an application with no cliprects can hog
167 * the hardware. Without unlocking, the effect is much worse -
168 * effectively a lock-out of other contexts.
169 */
170 if (allow_unlock) {
171 UNLOCK_HARDWARE( intel );
172 sched_yield();
173 LOCK_HARDWARE( intel );
174 }
175
176 /* Note that any state thought to have been emitted actually
177 * hasn't:
178 */
179 intel->batch.ptr -= (intel->batch.size - intel->batch.space);
180 intel->batch.space = intel->batch.size;
181 intel->vtbl.lost_hardware( intel );
182 }
183
184 if (intel->batch.space != intel->batch.size) {
185 batch.start = intel->batch.start_offset;
186 batch.used = intel->batch.size - intel->batch.space;
187 batch.cliprects = intel->pClipRects;
188 batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects;
189 batch.DR1 = 0;
190 batch.DR4 = ((((GLuint)intel->drawX) & 0xffff) |
191 (((GLuint)intel->drawY) << 16));
192
193 if (intel->alloc.offset) {
194 if ((batch.used & 0x4) == 0) {
195 ((int *)intel->batch.ptr)[0] = 0;
196 ((int *)intel->batch.ptr)[1] = MI_BATCH_BUFFER_END;
197 batch.used += 0x8;
198 intel->batch.ptr += 0x8;
199 }
200 else {
201 ((int *)intel->batch.ptr)[0] = MI_BATCH_BUFFER_END;
202 batch.used += 0x4;
203 intel->batch.ptr += 0x4;
204 }
205 }
206
207 if (0)
208 intel_dump_batchbuffer( batch.start,
209 (int *)(intel->batch.ptr - batch.used),
210 batch.used );
211
212 if (0)
213 fprintf(stderr, "%s: 0x%x..0x%x DR4: %x cliprects: %d\n",
214 __FUNCTION__,
215 batch.start,
216 batch.start + batch.used,
217 batch.DR4, batch.num_cliprects);
218
219 intel->batch.start_offset += batch.used;
220 intel->batch.size -= batch.used;
221
222 if (intel->batch.size < 8) {
223 refill = GL_TRUE;
224 intel->batch.space = intel->batch.size = 0;
225 }
226 else {
227 intel->batch.size -= 8;
228 intel->batch.space = intel->batch.size;
229 }
230
231
232 assert(intel->batch.space >= 0);
233 assert(batch.start >= intel->alloc.offset);
234 assert(batch.start < intel->alloc.offset + intel->alloc.size);
235 assert(batch.start + batch.used > intel->alloc.offset);
236 assert(batch.start + batch.used <=
237 intel->alloc.offset + intel->alloc.size);
238
239
240 if (intel->alloc.offset) {
241 if (drmCommandWrite (intel->driFd, DRM_I830_BATCHBUFFER, &batch,
242 sizeof(batch))) {
243 fprintf(stderr, "DRM_I830_BATCHBUFFER: %d\n", -errno);
244 UNLOCK_HARDWARE(intel);
245 exit(1);
246 }
247 } else {
248 drmI830CmdBuffer cmd;
249 cmd.buf = intel->alloc.ptr + batch.start;
250 cmd.sz = batch.used;
251 cmd.DR1 = batch.DR1;
252 cmd.DR4 = batch.DR4;
253 cmd.num_cliprects = batch.num_cliprects;
254 cmd.cliprects = batch.cliprects;
255
256 if (drmCommandWrite (intel->driFd, DRM_I830_CMDBUFFER, &cmd,
257 sizeof(cmd))) {
258 fprintf(stderr, "DRM_I830_CMDBUFFER: %d\n", -errno);
259 UNLOCK_HARDWARE(intel);
260 exit(1);
261 }
262 }
263
264
265 age_intel(intel, intel->sarea->last_enqueue);
266
267 /* FIXME: use hardware contexts to avoid 'losing' hardware after
268 * each buffer flush.
269 */
270 intel->vtbl.lost_hardware( intel );
271 }
272
273 if (refill)
274 intelRefillBatchLocked( intel, allow_unlock );
275 }
276
277 void intelFlushBatch( intelContextPtr intel, GLboolean refill )
278 {
279 if (intel->locked) {
280 intelFlushBatchLocked( intel, GL_FALSE, refill, GL_FALSE );
281 }
282 else {
283 LOCK_HARDWARE(intel);
284 intelFlushBatchLocked( intel, GL_FALSE, refill, GL_TRUE );
285 UNLOCK_HARDWARE(intel);
286 }
287 }
288
289
290
291
292
293
294
295 void intelWaitForIdle( intelContextPtr intel )
296 {
297 if (0)
298 fprintf(stderr, "%s\n", __FUNCTION__);
299
300 intel->vtbl.emit_flush( intel );
301 intelFlushBatch( intel, GL_TRUE );
302
303 /* Use an irq to wait for dma idle -- Need to track lost contexts
304 * to shortcircuit consecutive calls to this function:
305 */
306 intelWaitIrq( intel, intel->alloc.irq_emitted );
307 intel->alloc.irq_emitted = 0;
308 }
309
310
311
312 void intelFlush( GLcontext *ctx )
313 {
314 intelContextPtr intel = INTEL_CONTEXT( ctx );
315
316 if (intel->Fallback)
317 _swrast_flush( ctx );
318
319 INTEL_FIREVERTICES( intel );
320
321 if (intel->batch.size != intel->batch.space)
322 intelFlushBatch( intel, GL_FALSE );
323 }
324
325 void intelFinish( GLcontext *ctx )
326 {
327 intelContextPtr intel = INTEL_CONTEXT( ctx );
328 intelFlush( ctx );
329 intelWaitForIdle( intel );
330 }
331
332
333 void intelClear(GLcontext *ctx, GLbitfield mask, GLboolean all,
334 GLint cx, GLint cy, GLint cw, GLint ch)
335 {
336 intelContextPtr intel = INTEL_CONTEXT( ctx );
337 const GLuint colorMask = *((GLuint *) &ctx->Color.ColorMask);
338 GLbitfield tri_mask = 0;
339 GLbitfield blit_mask = 0;
340 GLbitfield swrast_mask = 0;
341
342 if (0)
343 fprintf(stderr, "%s\n", __FUNCTION__);
344
345 /* Take care of cliprects, which are handled differently for
346 * clears, etc.
347 */
348 intelFlush( &intel->ctx );
349
350 if (mask & DD_FRONT_LEFT_BIT) {
351 if (colorMask == ~0) {
352 blit_mask |= DD_FRONT_LEFT_BIT;
353 }
354 else {
355 tri_mask |= DD_FRONT_LEFT_BIT;
356 }
357 }
358
359 if (mask & DD_BACK_LEFT_BIT) {
360 if (colorMask == ~0) {
361 blit_mask |= DD_BACK_LEFT_BIT;
362 }
363 else {
364 tri_mask |= DD_BACK_LEFT_BIT;
365 }
366 }
367
368 if (mask & DD_DEPTH_BIT) {
369 blit_mask |= DD_DEPTH_BIT;
370 }
371
372 if (mask & DD_STENCIL_BIT) {
373 if (!intel->hw_stencil) {
374 swrast_mask |= DD_STENCIL_BIT;
375 }
376 else if (ctx->Stencil.WriteMask[0] != 0xff) {
377 tri_mask |= DD_STENCIL_BIT;
378 }
379 else {
380 blit_mask |= DD_STENCIL_BIT;
381 }
382 }
383
384 swrast_mask |= (mask & DD_ACCUM_BIT);
385
386 if (blit_mask)
387 intelClearWithBlit( ctx, blit_mask, all, cx, cy, cw, ch );
388
389 if (tri_mask)
390 intel->vtbl.clear_with_tris( intel, tri_mask, all, cx, cy, cw, ch);
391
392 if (swrast_mask)
393 _swrast_Clear( ctx, swrast_mask, all, cx, cy, cw, ch );
394 }
395
396
397
398 void *intelAllocateAGP( intelContextPtr intel, GLsizei size )
399 {
400 int region_offset;
401 drmI830MemAlloc alloc;
402 int ret;
403
404 if (0)
405 fprintf(stderr, "%s: %d bytes\n", __FUNCTION__, size);
406
407 alloc.region = I830_MEM_REGION_AGP;
408 alloc.alignment = 0;
409 alloc.size = size;
410 alloc.region_offset = &region_offset;
411
412 LOCK_HARDWARE(intel);
413
414 /* Make sure the global heap is initialized
415 */
416 if (intel->texture_heaps[0])
417 driAgeTextures( intel->texture_heaps[0] );
418
419
420 ret = drmCommandWriteRead( intel->driFd,
421 DRM_I830_ALLOC,
422 &alloc, sizeof(alloc));
423
424 if (ret) {
425 fprintf(stderr, "%s: DRM_I830_ALLOC ret %d\n", __FUNCTION__, ret);
426 UNLOCK_HARDWARE(intel);
427 return NULL;
428 }
429
430 if (0)
431 fprintf(stderr, "%s: allocated %d bytes\n", __FUNCTION__, size);
432
433 /* Need to propogate this information (agp memory in use) to our
434 * local texture lru. The kernel has already updated the global
435 * lru. An alternative would have been to allocate memory the
436 * usual way and then notify the kernel to pin the allocation.
437 */
438 if (intel->texture_heaps[0])
439 driAgeTextures( intel->texture_heaps[0] );
440
441 UNLOCK_HARDWARE(intel);
442
443 return (void *)((char *)intel->intelScreen->tex.map + region_offset);
444 }
445
446 void intelFreeAGP( intelContextPtr intel, void *pointer )
447 {
448 int region_offset;
449 drmI830MemFree memfree;
450 int ret;
451
452 region_offset = (char *)pointer - (char *)intel->intelScreen->tex.map;
453
454 if (region_offset < 0 ||
455 region_offset > intel->intelScreen->tex.size) {
456 fprintf(stderr, "offset %d outside range 0..%d\n", region_offset,
457 intel->intelScreen->tex.size);
458 return;
459 }
460
461 memfree.region = I830_MEM_REGION_AGP;
462 memfree.region_offset = region_offset;
463
464 ret = drmCommandWrite( intel->driFd,
465 DRM_I830_FREE,
466 &memfree, sizeof(memfree));
467
468 if (ret)
469 fprintf(stderr, "%s: DRM_I830_FREE ret %d\n", __FUNCTION__, ret);
470 }
471
472 /* This version of AllocateMemoryMESA allocates only agp memory, and
473 * only does so after the point at which the driver has been
474 * initialized.
475 *
476 * Theoretically a valid context isn't required. However, in this
477 * implementation, it is, as I'm using the hardware lock to protect
478 * the kernel data structures, and the current context to get the
479 * device fd.
480 */
481 void *intelAllocateMemoryMESA(__DRInativeDisplay *dpy, int scrn,
482 GLsizei size, GLfloat readfreq,
483 GLfloat writefreq, GLfloat priority)
484 {
485 GET_CURRENT_CONTEXT(ctx);
486
487 if (INTEL_DEBUG & DEBUG_IOCTL)
488 fprintf(stderr, "%s sz %d %f/%f/%f\n", __FUNCTION__, size, readfreq,
489 writefreq, priority);
490
491 if (getenv("INTEL_NO_ALLOC"))
492 return NULL;
493
494 if (!ctx || INTEL_CONTEXT(ctx) == 0)
495 return NULL;
496
497 return intelAllocateAGP( INTEL_CONTEXT(ctx), size );
498 }
499
500
501 /* Called via glXFreeMemoryMESA() */
502 void intelFreeMemoryMESA(__DRInativeDisplay *dpy, int scrn, GLvoid *pointer)
503 {
504 GET_CURRENT_CONTEXT(ctx);
505 if (INTEL_DEBUG & DEBUG_IOCTL)
506 fprintf(stderr, "%s %p\n", __FUNCTION__, pointer);
507
508 if (!ctx || INTEL_CONTEXT(ctx) == 0) {
509 fprintf(stderr, "%s: no context\n", __FUNCTION__);
510 return;
511 }
512
513 intelFreeAGP( INTEL_CONTEXT(ctx), pointer );
514 }
515
516 /* Called via glXGetMemoryOffsetMESA()
517 *
518 * Returns offset of pointer from the start of agp aperture.
519 */
520 GLuint intelGetMemoryOffsetMESA(__DRInativeDisplay *dpy, int scrn,
521 const GLvoid *pointer)
522 {
523 GET_CURRENT_CONTEXT(ctx);
524 intelContextPtr intel;
525
526 if (!ctx || !(intel = INTEL_CONTEXT(ctx)) ) {
527 fprintf(stderr, "%s: no context\n", __FUNCTION__);
528 return ~0;
529 }
530
531 if (!intelIsAgpMemory( intel, pointer, 0 ))
532 return ~0;
533
534 return intelAgpOffsetFromVirtual( intel, pointer );
535 }
536
537
538 GLboolean intelIsAgpMemory( intelContextPtr intel, const GLvoid *pointer,
539 GLint size )
540 {
541 int offset = (char *)pointer - (char *)intel->intelScreen->tex.map;
542 int valid = (size >= 0 &&
543 offset >= 0 &&
544 offset + size < intel->intelScreen->tex.size);
545
546 if (INTEL_DEBUG & DEBUG_IOCTL)
547 fprintf(stderr, "intelIsAgpMemory( %p ) : %d\n", pointer, valid );
548
549 return valid;
550 }
551
552
553 GLuint intelAgpOffsetFromVirtual( intelContextPtr intel, const GLvoid *pointer )
554 {
555 int offset = (char *)pointer - (char *)intel->intelScreen->tex.map;
556
557 if (offset < 0 || offset > intel->intelScreen->tex.size)
558 return ~0;
559 else
560 return intel->intelScreen->textureOffset + offset;
561 }
562
563
564
565
566
567 /* Flip the front & back buffes
568 */
569 void intelPageFlip( const __DRIdrawablePrivate *dPriv )
570 {
571 #if 0
572 intelContextPtr intel;
573 int tmp, ret;
574
575 if (INTEL_DEBUG & DEBUG_IOCTL)
576 fprintf(stderr, "%s\n", __FUNCTION__);
577
578 assert(dPriv);
579 assert(dPriv->driContextPriv);
580 assert(dPriv->driContextPriv->driverPrivate);
581
582 intel = (intelContextPtr) dPriv->driContextPriv->driverPrivate;
583
584 intelFlush( &intel->ctx );
585 LOCK_HARDWARE( intel );
586
587 if (dPriv->pClipRects) {
588 *(drm_clip_rect_t *)intel->sarea->boxes = dPriv->pClipRects[0];
589 intel->sarea->nbox = 1;
590 }
591
592 ret = drmCommandNone(intel->driFd, DRM_I830_FLIP);
593 if (ret) {
594 fprintf(stderr, "%s: %d\n", __FUNCTION__, ret);
595 UNLOCK_HARDWARE( intel );
596 exit(1);
597 }
598
599 tmp = intel->sarea->last_enqueue;
600 intelRefillBatchLocked( intel );
601 UNLOCK_HARDWARE( intel );
602
603
604 intelSetDrawBuffer( &intel->ctx, intel->ctx.Color.DriverDrawBuffer );
605 #endif
606 }