76b98bed675a46c4821df90e310db4d56d89d858
2 * Copyright 2007 Nouveau Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #include "nouveau_drmif.h"
29 #include "nouveau_dma.h"
30 #include "nouveau_local.h"
33 nouveau_mem_free(struct nouveau_device
*dev
, struct drm_nouveau_mem_alloc
*ma
,
36 struct nouveau_device_priv
*nvdev
= nouveau_device(dev
);
37 struct drm_nouveau_mem_free mf
;
40 drmUnmap(*map
, ma
->size
);
45 mf
.offset
= ma
->offset
;
47 drmCommandWrite(nvdev
->fd
, DRM_NOUVEAU_MEM_FREE
,
54 nouveau_mem_alloc(struct nouveau_device
*dev
, unsigned size
, unsigned align
,
55 uint32_t flags
, struct drm_nouveau_mem_alloc
*ma
, void **map
)
57 struct nouveau_device_priv
*nvdev
= nouveau_device(dev
);
60 ma
->alignment
= align
;
64 ma
->flags
|= NOUVEAU_MEM_MAPPED
;
65 ret
= drmCommandWriteRead(nvdev
->fd
, DRM_NOUVEAU_MEM_ALLOC
, ma
,
66 sizeof(struct drm_nouveau_mem_alloc
));
71 ret
= drmMap(nvdev
->fd
, ma
->map_handle
, ma
->size
, map
);
74 nouveau_mem_free(dev
, ma
, map
);
83 nouveau_bo_tmp_del(void *priv
)
85 struct nouveau_resource
*r
= priv
;
87 nouveau_fence_ref(NULL
, (struct nouveau_fence
**)&r
->priv
);
88 nouveau_resource_free(&r
);
92 nouveau_bo_tmp_max(struct nouveau_device_priv
*nvdev
)
94 struct nouveau_resource
*r
= nvdev
->sa_heap
;
98 if (r
->in_use
&& !nouveau_fence(r
->priv
)->emitted
) {
111 static struct nouveau_resource
*
112 nouveau_bo_tmp(struct nouveau_channel
*chan
, unsigned size
,
113 struct nouveau_fence
*fence
)
115 struct nouveau_device_priv
*nvdev
= nouveau_device(chan
->device
);
116 struct nouveau_resource
*r
= NULL
;
117 struct nouveau_fence
*ref
= NULL
;
120 nouveau_fence_ref(fence
, &ref
);
122 nouveau_fence_new(chan
, &ref
);
125 while (nouveau_resource_alloc(nvdev
->sa_heap
, size
, ref
, &r
)) {
126 if (nouveau_bo_tmp_max(nvdev
) < size
) {
127 nouveau_fence_ref(NULL
, &ref
);
131 nouveau_fence_flush(chan
);
133 nouveau_fence_signal_cb(ref
, nouveau_bo_tmp_del
, r
);
139 nouveau_bo_init(struct nouveau_device
*dev
)
141 struct nouveau_device_priv
*nvdev
= nouveau_device(dev
);
144 ret
= nouveau_mem_alloc(dev
, 128*1024, 0, NOUVEAU_MEM_AGP
|
145 NOUVEAU_MEM_PCI
, &nvdev
->sa
, &nvdev
->sa_map
);
149 ret
= nouveau_resource_init(&nvdev
->sa_heap
, 0, nvdev
->sa
.size
);
151 nouveau_mem_free(dev
, &nvdev
->sa
, &nvdev
->sa_map
);
159 nouveau_bo_takedown(struct nouveau_device
*dev
)
161 struct nouveau_device_priv
*nvdev
= nouveau_device(dev
);
163 nouveau_mem_free(dev
, &nvdev
->sa
, &nvdev
->sa_map
);
167 nouveau_bo_new(struct nouveau_device
*dev
, uint32_t flags
, int align
,
168 int size
, struct nouveau_bo
**bo
)
170 struct nouveau_bo_priv
*nvbo
;
173 if (!dev
|| !bo
|| *bo
)
176 nvbo
= calloc(1, sizeof(struct nouveau_bo_priv
));
179 nvbo
->base
.device
= dev
;
180 nvbo
->base
.size
= size
;
181 nvbo
->base
.handle
= bo_to_ptr(nvbo
);
182 nvbo
->drm
.alignment
= align
;
185 if (flags
& NOUVEAU_BO_TILED
) {
187 if (flags
& NOUVEAU_BO_ZTILE
)
189 flags
&= ~NOUVEAU_BO_TILED
;
192 ret
= nouveau_bo_set_status(&nvbo
->base
, flags
);
203 nouveau_bo_user(struct nouveau_device
*dev
, void *ptr
, int size
,
204 struct nouveau_bo
**bo
)
206 struct nouveau_bo_priv
*nvbo
;
208 if (!dev
|| !bo
|| *bo
)
211 nvbo
= calloc(1, sizeof(*nvbo
));
214 nvbo
->base
.device
= dev
;
219 nvbo
->base
.size
= size
;
220 nvbo
->base
.offset
= nvbo
->drm
.offset
;
221 nvbo
->base
.handle
= bo_to_ptr(nvbo
);
228 nouveau_bo_ref(struct nouveau_device
*dev
, uint64_t handle
,
229 struct nouveau_bo
**bo
)
231 struct nouveau_bo_priv
*nvbo
= ptr_to_bo(handle
);
233 if (!dev
|| !bo
|| *bo
)
242 nouveau_bo_del_cb(void *priv
)
244 struct nouveau_bo_priv
*nvbo
= priv
;
246 nouveau_fence_ref(NULL
, &nvbo
->fence
);
247 nouveau_mem_free(nvbo
->base
.device
, &nvbo
->drm
, &nvbo
->map
);
248 if (nvbo
->sysmem
&& !nvbo
->user
)
254 nouveau_bo_del(struct nouveau_bo
**bo
)
256 struct nouveau_bo_priv
*nvbo
;
260 nvbo
= nouveau_bo(*bo
);
263 if (--nvbo
->refcount
)
267 nouveau_pushbuf_flush(nvbo
->pending
->channel
, 0);
270 nouveau_fence_signal_cb(nvbo
->fence
, nouveau_bo_del_cb
, nvbo
);
272 nouveau_bo_del_cb(nvbo
);
276 nouveau_bo_busy(struct nouveau_bo
*bo
, uint32_t flags
)
278 struct nouveau_bo_priv
*nvbo
= nouveau_bo(bo
);
279 struct nouveau_fence
*fence
;
284 /* If the buffer is pending it must be busy, unless
285 * both are RD, in which case we can allow access */
287 if ((nvbo
->pending
->flags
& NOUVEAU_BO_RDWR
) == NOUVEAU_BO_RD
&&
288 (flags
& NOUVEAU_BO_RDWR
) == NOUVEAU_BO_RD
)
294 if (flags
& NOUVEAU_BO_WR
)
297 fence
= nvbo
->wr_fence
;
299 /* If the buffer is not pending and doesn't have a fence
300 * that conflicts with our flags then it can't be busy
305 /* If the fence is signalled the buffer is not busy, else is busy */
306 return !nouveau_fence(fence
)->signalled
;
310 nouveau_bo_map(struct nouveau_bo
*bo
, uint32_t flags
)
312 struct nouveau_bo_priv
*nvbo
= nouveau_bo(bo
);
318 (nvbo
->pending
->flags
& NOUVEAU_BO_WR
|| flags
& NOUVEAU_BO_WR
)) {
319 nouveau_pushbuf_flush(nvbo
->pending
->channel
, 0);
322 if (flags
& NOUVEAU_BO_WR
)
323 nouveau_fence_wait(&nvbo
->fence
);
325 nouveau_fence_wait(&nvbo
->wr_fence
);
328 bo
->map
= nvbo
->sysmem
;
335 nouveau_bo_unmap(struct nouveau_bo
*bo
)
341 nouveau_bo_upload(struct nouveau_bo_priv
*nvbo
)
344 nouveau_fence_wait(&nvbo
->fence
);
345 memcpy(nvbo
->map
, nvbo
->sysmem
, nvbo
->drm
.size
);
350 nouveau_bo_set_status(struct nouveau_bo
*bo
, uint32_t flags
)
352 struct nouveau_bo_priv
*nvbo
= nouveau_bo(bo
);
353 struct drm_nouveau_mem_alloc
new;
354 void *new_map
= NULL
, *new_sysmem
= NULL
;
355 unsigned new_flags
= 0, ret
;
359 /* Check current memtype vs requested, if they match do nothing */
360 if ((nvbo
->drm
.flags
& NOUVEAU_MEM_FB
) && (flags
& NOUVEAU_BO_VRAM
))
362 if ((nvbo
->drm
.flags
& (NOUVEAU_MEM_AGP
| NOUVEAU_MEM_PCI
)) &&
363 (flags
& NOUVEAU_BO_GART
))
365 if (nvbo
->drm
.size
== 0 && nvbo
->sysmem
&& (flags
& NOUVEAU_BO_LOCAL
))
368 memset(&new, 0x00, sizeof(new));
370 /* Allocate new memory */
371 if (flags
& NOUVEAU_BO_VRAM
)
372 new_flags
|= NOUVEAU_MEM_FB
;
374 if (flags
& NOUVEAU_BO_GART
)
375 new_flags
|= (NOUVEAU_MEM_AGP
| NOUVEAU_MEM_PCI
);
377 if (nvbo
->tiled
&& flags
) {
378 new_flags
|= NOUVEAU_MEM_TILE
;
380 new_flags
|= NOUVEAU_MEM_TILE_ZETA
;
384 ret
= nouveau_mem_alloc(bo
->device
, bo
->size
,
385 nvbo
->drm
.alignment
, new_flags
,
391 new_sysmem
= malloc(bo
->size
);
394 /* Copy old -> new */
396 if (nvbo
->sysmem
|| nvbo
->map
) {
397 struct nouveau_pushbuf_bo
*pbo
= nvbo
->pending
;
398 nvbo
->pending
= NULL
;
399 nouveau_bo_map(bo
, NOUVEAU_BO_RD
);
400 memcpy(new_map
, bo
->map
, bo
->size
);
401 nouveau_bo_unmap(bo
);
405 /* Free old memory */
407 nouveau_fence_wait(&nvbo
->fence
);
408 nouveau_mem_free(bo
->device
, &nvbo
->drm
, &nvbo
->map
);
409 if (nvbo
->sysmem
&& !nvbo
->user
)
415 nvbo
->sysmem
= new_sysmem
;
417 bo
->offset
= nvbo
->drm
.offset
;
422 nouveau_bo_validate_user(struct nouveau_channel
*chan
, struct nouveau_bo
*bo
,
423 struct nouveau_fence
*fence
, uint32_t flags
)
425 struct nouveau_channel_priv
*nvchan
= nouveau_channel(chan
);
426 struct nouveau_device_priv
*nvdev
= nouveau_device(chan
->device
);
427 struct nouveau_bo_priv
*nvbo
= nouveau_bo(bo
);
428 struct nouveau_resource
*r
;
430 if (nvchan
->user_charge
+ bo
->size
> nvdev
->sa
.size
)
433 if (!(flags
& NOUVEAU_BO_GART
))
436 r
= nouveau_bo_tmp(chan
, bo
->size
, fence
);
439 nvchan
->user_charge
+= bo
->size
;
441 memcpy(nvdev
->sa_map
+ r
->start
, nvbo
->sysmem
, bo
->size
);
443 nvbo
->offset
= nvdev
->sa
.offset
+ r
->start
;
444 nvbo
->flags
= NOUVEAU_BO_GART
;
449 nouveau_bo_validate_bo(struct nouveau_channel
*chan
, struct nouveau_bo
*bo
,
450 struct nouveau_fence
*fence
, uint32_t flags
)
452 struct nouveau_bo_priv
*nvbo
= nouveau_bo(bo
);
455 ret
= nouveau_bo_set_status(bo
, flags
);
457 nouveau_fence_flush(chan
);
459 ret
= nouveau_bo_set_status(bo
, flags
);
465 nouveau_bo_upload(nvbo
);
467 nvbo
->offset
= nvbo
->drm
.offset
;
468 if (nvbo
->drm
.flags
& (NOUVEAU_MEM_AGP
| NOUVEAU_MEM_PCI
))
469 nvbo
->flags
= NOUVEAU_BO_GART
;
471 nvbo
->flags
= NOUVEAU_BO_VRAM
;
477 nouveau_bo_validate(struct nouveau_channel
*chan
, struct nouveau_bo
*bo
,
480 struct nouveau_bo_priv
*nvbo
= nouveau_bo(bo
);
481 struct nouveau_fence
*fence
= nouveau_pushbuf(chan
->pushbuf
)->fence
;
484 assert(bo
->map
== NULL
);
487 ret
= nouveau_bo_validate_user(chan
, bo
, fence
, flags
);
489 ret
= nouveau_bo_validate_bo(chan
, bo
, fence
, flags
);
494 ret
= nouveau_bo_validate_bo(chan
, bo
, fence
, flags
);
499 if (flags
& NOUVEAU_BO_WR
)
500 nouveau_fence_ref(fence
, &nvbo
->wr_fence
);
501 nouveau_fence_ref(fence
, &nvbo
->fence
);