2 * Copyright (C) 2005 Aapo Tahkola.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 * Aapo Tahkola <aet@rasterburn.org>
34 #include "r300_context.h"
35 #include "r300_cmdbuf.h"
36 #include "r300_ioctl.h"
37 #include "radeon_mm.h"
38 #include "radeon_ioctl.h"
42 static void resize_u_list(r300ContextPtr rmesa
)
47 temp
= rmesa
->rmm
->u_list
;
48 nsize
= rmesa
->rmm
->u_size
* 2;
50 rmesa
->rmm
->u_list
= _mesa_malloc(nsize
* sizeof(*rmesa
->rmm
->u_list
));
51 _mesa_memset(rmesa
->rmm
->u_list
, 0, nsize
* sizeof(*rmesa
->rmm
->u_list
));
54 r300FlushCmdBuf(rmesa
, __FUNCTION__
);
56 _mesa_memcpy(rmesa
->rmm
->u_list
, temp
, rmesa
->rmm
->u_size
* sizeof(*rmesa
->rmm
->u_list
));
60 rmesa
->rmm
->u_size
= nsize
;
63 void radeon_mm_init(r300ContextPtr rmesa
)
65 rmesa
->rmm
= malloc(sizeof(struct radeon_memory_manager
));
66 memset(rmesa
->rmm
, 0, sizeof(struct radeon_memory_manager
));
68 rmesa
->rmm
->u_size
= 128;
72 void *radeon_mm_ptr(r300ContextPtr rmesa
, int id
)
74 assert(id
<= rmesa
->rmm
->u_last
);
75 return rmesa
->rmm
->u_list
[id
].ptr
;
78 int radeon_mm_find(r300ContextPtr rmesa
, void *ptr
)
82 for (i
=1; i
< rmesa
->rmm
->u_size
+1; i
++)
83 if(rmesa
->rmm
->u_list
[i
].ptr
&&
84 ptr
>= rmesa
->rmm
->u_list
[i
].ptr
&&
85 ptr
< rmesa
->rmm
->u_list
[i
].ptr
+ rmesa
->rmm
->u_list
[i
].size
)
88 if (i
< rmesa
->rmm
->u_size
+ 1)
91 fprintf(stderr
, "%p failed\n", ptr
);
96 int radeon_mm_alloc(r300ContextPtr rmesa
, int alignment
, int size
)
98 drm_radeon_mem_alloc_t alloc
;
102 drm_radeon_mem_free_t memfree
;
104 static int bytes_wasted
=0, allocated
=0;
107 bytes_wasted
+= 4096 - size
;
113 if (t
!= time(NULL
)) {
115 fprintf(stderr
, "slots used %d, wasted %d kb, allocated %d\n", rmesa
->rmm
->u_last
, bytes_wasted
/1024, allocated
/1024);
119 memfree
.region
= RADEON_MEM_REGION_GART
;
123 done_age
= radeonGetAge((radeonContextPtr
)rmesa
);
125 if (rmesa
->rmm
->u_last
+ 1 >= rmesa
->rmm
->u_size
)
126 resize_u_list(rmesa
);
128 for (i
= rmesa
->rmm
->u_last
+ 1; i
> 0; i
--) {
129 if (rmesa
->rmm
->u_list
[i
].ptr
== NULL
) {
134 if (rmesa
->rmm
->u_list
[i
].h_pending
== 0 &&
135 rmesa
->rmm
->u_list
[i
].pending
&& rmesa
->rmm
->u_list
[i
].age
<= done_age
) {
136 memfree
.region_offset
= (char *)rmesa
->rmm
->u_list
[i
].ptr
-
137 (char *)rmesa
->radeon
.radeonScreen
->gartTextures
.map
;
139 ret
= drmCommandWrite(rmesa
->radeon
.radeonScreen
->driScreen
->fd
,
140 DRM_RADEON_FREE
, &memfree
, sizeof(memfree
));
143 fprintf(stderr
, "Failed to free at %p\n", rmesa
->rmm
->u_list
[i
].ptr
);
144 fprintf(stderr
, "ret = %s\n", strerror(-ret
));
148 fprintf(stderr
, "really freed %d at age %x\n", i
, radeonGetAge((radeonContextPtr
)rmesa
));
150 if (i
== rmesa
->rmm
->u_last
)
151 rmesa
->rmm
->u_last
--;
153 if(rmesa
->rmm
->u_list
[i
].size
< 4096)
154 bytes_wasted
-= 4096 - rmesa
->rmm
->u_list
[i
].size
;
156 allocated
-= rmesa
->rmm
->u_list
[i
].size
;
157 rmesa
->rmm
->u_list
[i
].pending
= 0;
158 rmesa
->rmm
->u_list
[i
].ptr
= NULL
;
160 if (rmesa
->rmm
->u_list
[i
].fb
) {
161 LOCK_HARDWARE(&(rmesa
->radeon
));
162 ret
= mmFreeMem(rmesa
->rmm
->u_list
[i
].fb
);
163 UNLOCK_HARDWARE(&(rmesa
->radeon
));
166 fprintf(stderr
, "failed to free!\n");
167 rmesa
->rmm
->u_list
[i
].fb
= NULL
;
169 rmesa
->rmm
->u_list
[i
].ref_count
= 0;
174 rmesa
->rmm
->u_head
= i
;
177 WARN_ONCE("Ran out of slots!\n");
179 r300FlushCmdBuf(rmesa
, __FUNCTION__
);
182 WARN_ONCE("Ran out of slots!\n");
188 alloc
.region
= RADEON_MEM_REGION_GART
;
189 alloc
.alignment
= alignment
;
191 alloc
.region_offset
= &offset
;
193 ret
= drmCommandWriteRead( rmesa
->radeon
.dri
.fd
, DRM_RADEON_ALLOC
, &alloc
, sizeof(alloc
));
196 WARN_ONCE("Ran out of mem!\n");
197 r300FlushCmdBuf(rmesa
, __FUNCTION__
);
202 WARN_ONCE("Ran out of GART memory!\n");
207 WARN_ONCE("Ran out of GART memory!\nPlease consider adjusting GARTSize option.\n");
214 if (i
> rmesa
->rmm
->u_last
)
215 rmesa
->rmm
->u_last
= i
;
217 rmesa
->rmm
->u_list
[i
].ptr
= ((GLubyte
*)rmesa
->radeon
.radeonScreen
->gartTextures
.map
) + offset
;
218 rmesa
->rmm
->u_list
[i
].size
= size
;
219 rmesa
->rmm
->u_list
[i
].age
= 0;
220 rmesa
->rmm
->u_list
[i
].fb
= NULL
;
221 //fprintf(stderr, "alloc %p at id %d\n", rmesa->rmm->u_list[i].ptr, i);
224 fprintf(stderr
, "allocated %d at age %x\n", i
, radeonGetAge((radeonContextPtr
)rmesa
));
230 #include "r300_emit.h"
231 static void emit_lin_cp(r300ContextPtr rmesa
, unsigned long dst
, unsigned long src
, unsigned long size
)
239 if(cp_size
> /*8190*/4096)
240 cp_size
= /*8190*/4096;
265 start_packet3(RADEON_CP_PACKET3_UNK1B
, 2);
268 e32(cp_size
<< 16 | 0x1);
285 void radeon_mm_use(r300ContextPtr rmesa
, int id
)
289 fprintf(stderr
, "%s: %d at age %x\n", __FUNCTION__
, id
, radeonGetAge((radeonContextPtr
)rmesa
));
291 drm_r300_cmd_header_t
*cmd
;
293 assert(id
<= rmesa
->rmm
->u_last
);
298 #if 0 /* FB VBOs. Needs further changes... */
299 rmesa
->rmm
->u_list
[id
].ref_count
++;
300 if (rmesa
->rmm
->u_list
[id
].ref_count
> 100 && rmesa
->rmm
->u_list
[id
].fb
== NULL
&&
301 rmesa
->rmm
->u_list
[id
].size
!= RADEON_BUFFER_SIZE
*16 /*&& rmesa->rmm->u_list[id].size > 40*/) {
303 struct mem_block
*mb
;
305 LOCK_HARDWARE(&(rmesa
->radeon
));
307 heap
= rmesa
->texture_heaps
[0];
309 mb
= mmAllocMem(heap
->memory_heap
, rmesa
->rmm
->u_list
[id
].size
, heap
->alignmentShift
, 0);
311 UNLOCK_HARDWARE(&(rmesa
->radeon
));
314 rmesa
->rmm
->u_list
[id
].fb
= mb
;
316 emit_lin_cp(rmesa
, rmesa
->radeon
.radeonScreen
->texOffset
[0] + rmesa
->rmm
->u_list
[id
].fb
->ofs
,
317 r300GartOffsetFromVirtual(rmesa
, rmesa
->rmm
->u_list
[id
].ptr
),
318 rmesa
->rmm
->u_list
[id
].size
);
320 WARN_ONCE("Upload to fb failed, %d, %d\n", rmesa
->rmm
->u_list
[id
].size
, id
);
322 //fprintf(stderr, "Upload to fb! %d, %d\n", rmesa->rmm->u_list[id].ref_count, id);
324 /*if (rmesa->rmm->u_list[id].fb) {
325 emit_lin_cp(rmesa, rmesa->radeon.radeonScreen->texOffset[0] + rmesa->rmm->u_list[id].fb->ofs,
326 r300GartOffsetFromVirtual(rmesa, rmesa->rmm->u_list[id].ptr),
327 rmesa->rmm->u_list[id].size);
331 cmd
= (drm_r300_cmd_header_t
*)r300AllocCmdBuf(rmesa
, 2 + sizeof(ull
) / 4, __FUNCTION__
);
332 cmd
[0].scratch
.cmd_type
= R300_CMD_SCRATCH
;
333 cmd
[0].scratch
.reg
= RADEON_MM_SCRATCH
;
334 cmd
[0].scratch
.n_bufs
= 1;
335 cmd
[0].scratch
.flags
= 0;
338 ull
= (uint64_t)(intptr_t)&rmesa
->rmm
->u_list
[id
].age
;
339 _mesa_memcpy(cmd
, &ull
, sizeof(ull
));
340 cmd
+= sizeof(ull
) / 4;
344 LOCK_HARDWARE(&rmesa
->radeon
); /* Protect from DRM. */
345 rmesa
->rmm
->u_list
[id
].h_pending
++;
346 UNLOCK_HARDWARE(&rmesa
->radeon
);
349 unsigned long radeon_mm_offset(r300ContextPtr rmesa
, int id
)
351 unsigned long offset
;
353 assert(id
<= rmesa
->rmm
->u_last
);
355 if (rmesa
->rmm
->u_list
[id
].fb
) {
356 offset
= rmesa
->radeon
.radeonScreen
->texOffset
[0] + rmesa
->rmm
->u_list
[id
].fb
->ofs
;
358 offset
= (char *)rmesa
->rmm
->u_list
[id
].ptr
-
359 (char *)rmesa
->radeon
.radeonScreen
->gartTextures
.map
;
360 offset
+= rmesa
->radeon
.radeonScreen
->gart_texture_offset
;
366 int radeon_mm_on_card(r300ContextPtr rmesa
, int id
)
368 assert(id
<= rmesa
->rmm
->u_last
);
370 if (rmesa
->rmm
->u_list
[id
].fb
)
376 void *radeon_mm_map(r300ContextPtr rmesa
, int id
, int access
)
379 fprintf(stderr
, "%s: %d at age %x\n", __FUNCTION__
, id
, radeonGetAge((radeonContextPtr
)rmesa
));
384 assert(id
<= rmesa
->rmm
->u_last
);
386 rmesa
->rmm
->u_list
[id
].ref_count
= 0;
387 if (rmesa
->rmm
->u_list
[id
].fb
) {
388 WARN_ONCE("Mapping fb!\n");
389 /* Idle gart only and do upload on unmap */
390 //rmesa->rmm->u_list[id].fb = NULL;
393 if(rmesa
->rmm
->u_list
[id
].mapped
== 1)
394 WARN_ONCE("buffer %d already mapped\n", id
);
396 rmesa
->rmm
->u_list
[id
].mapped
= 1;
397 ptr
= radeon_mm_ptr(rmesa
, id
);
402 if (access
== RADEON_MM_R
) {
404 if(rmesa
->rmm
->u_list
[id
].mapped
== 1)
405 WARN_ONCE("buffer %d already mapped\n", id
);
407 rmesa
->rmm
->u_list
[id
].mapped
= 1;
408 ptr
= radeon_mm_ptr(rmesa
, id
);
414 if (rmesa
->rmm
->u_list
[id
].h_pending
)
415 r300FlushCmdBuf(rmesa
, __FUNCTION__
);
417 if (rmesa
->rmm
->u_list
[id
].h_pending
) {
421 while(rmesa
->rmm
->u_list
[id
].age
> radeonGetAge((radeonContextPtr
)rmesa
) && tries
++ < 1000)
425 fprintf(stderr
, "Idling failed (%x vs %x)\n",
426 rmesa
->rmm
->u_list
[id
].age
, radeonGetAge((radeonContextPtr
)rmesa
));
430 if(rmesa
->rmm
->u_list
[id
].mapped
== 1)
431 WARN_ONCE("buffer %d already mapped\n", id
);
433 rmesa
->rmm
->u_list
[id
].mapped
= 1;
434 ptr
= radeon_mm_ptr(rmesa
, id
);
439 void radeon_mm_unmap(r300ContextPtr rmesa
, int id
)
442 fprintf(stderr
, "%s: %d at age %x\n", __FUNCTION__
, id
, radeonGetAge((radeonContextPtr
)rmesa
));
445 assert(id
<= rmesa
->rmm
->u_last
);
447 if(rmesa
->rmm
->u_list
[id
].mapped
== 0)
448 WARN_ONCE("buffer %d not mapped\n", id
);
450 rmesa
->rmm
->u_list
[id
].mapped
= 0;
452 if (rmesa
->rmm
->u_list
[id
].fb
)
453 emit_lin_cp(rmesa
, rmesa
->radeon
.radeonScreen
->texOffset
[0] + rmesa
->rmm
->u_list
[id
].fb
->ofs
,
454 r300GartOffsetFromVirtual(rmesa
, rmesa
->rmm
->u_list
[id
].ptr
),
455 rmesa
->rmm
->u_list
[id
].size
);
458 void radeon_mm_free(r300ContextPtr rmesa
, int id
)
461 fprintf(stderr
, "%s: %d at age %x\n", __FUNCTION__
, id
, radeonGetAge((radeonContextPtr
)rmesa
));
464 assert(id
<= rmesa
->rmm
->u_last
);
469 if(rmesa
->rmm
->u_list
[id
].ptr
== NULL
){
470 WARN_ONCE("Not allocated!\n");
474 if(rmesa
->rmm
->u_list
[id
].pending
){
475 WARN_ONCE("%p already pended!\n", rmesa
->rmm
->u_list
[id
].ptr
);
479 rmesa
->rmm
->u_list
[id
].pending
= 1;