nouveau: track last validated offsets, so we know when relocs can be avoided.
[mesa.git] / src / mesa / drivers / dri / nouveau_winsys / nouveau_bo.c
1 /*
2 * Copyright 2007 Nouveau Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 #include <stdint.h>
24 #include <stdlib.h>
25 #include <errno.h>
26 #include <assert.h>
27
28 #include "nouveau_drmif.h"
29 #include "nouveau_dma.h"
30 #include "nouveau_local.h"
31
32 static void
33 nouveau_mem_free(struct nouveau_device *dev, struct drm_nouveau_mem_alloc *ma,
34 void **map)
35 {
36 struct nouveau_device_priv *nvdev = nouveau_device(dev);
37 struct drm_nouveau_mem_free mf;
38
39 if (map && *map) {
40 drmUnmap(*map, ma->size);
41 *map = NULL;
42 }
43
44 if (ma->size) {
45 mf.offset = ma->offset;
46 mf.flags = ma->flags;
47 drmCommandWrite(nvdev->fd, DRM_NOUVEAU_MEM_FREE,
48 &mf, sizeof(mf));
49 ma->size = 0;
50 }
51 }
52
53 static int
54 nouveau_mem_alloc(struct nouveau_device *dev, unsigned size, unsigned align,
55 uint32_t flags, struct drm_nouveau_mem_alloc *ma, void **map)
56 {
57 struct nouveau_device_priv *nvdev = nouveau_device(dev);
58 int ret;
59
60 ma->alignment = align;
61 ma->size = size;
62 ma->flags = flags;
63 if (map)
64 ma->flags |= NOUVEAU_MEM_MAPPED;
65 ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_MEM_ALLOC, ma,
66 sizeof(struct drm_nouveau_mem_alloc));
67 if (ret)
68 return ret;
69
70 if (map) {
71 ret = drmMap(nvdev->fd, ma->map_handle, ma->size, map);
72 if (ret) {
73 *map = NULL;
74 nouveau_mem_free(dev, ma, map);
75 return ret;
76 }
77 }
78
79 return 0;
80 }
81
82 static int
83 nouveau_bo_realloc_gpu(struct nouveau_bo_priv *nvbo, uint32_t flags, int size)
84 {
85 int ret;
86
87 if (nvbo->drm.size && nvbo->drm.size != size) {
88 nouveau_mem_free(nvbo->base.device, &nvbo->drm, &nvbo->map);
89 }
90
91 if (size && !nvbo->drm.size) {
92 if (flags) {
93 nvbo->drm.flags = 0;
94 if (flags & NOUVEAU_BO_VRAM)
95 nvbo->drm.flags |= NOUVEAU_MEM_FB;
96 if (flags & NOUVEAU_BO_GART)
97 nvbo->drm.flags |= (NOUVEAU_MEM_AGP |
98 NOUVEAU_MEM_PCI);
99 nvbo->drm.flags |= NOUVEAU_MEM_MAPPED;
100 }
101
102 ret = nouveau_mem_alloc(nvbo->base.device, size,
103 nvbo->drm.alignment, nvbo->drm.flags,
104 &nvbo->drm, &nvbo->map);
105 if (ret) {
106 assert(0);
107 }
108 }
109
110 return 0;
111 }
112
113 static void
114 nouveau_bo_tmp_del(void *priv)
115 {
116 struct nouveau_resource *r = priv;
117
118 nouveau_fence_ref(NULL, (struct nouveau_fence **)&r->priv);
119 nouveau_resource_free(&r);
120 }
121
122 static struct nouveau_resource *
123 nouveau_bo_tmp(struct nouveau_channel *chan, unsigned size,
124 struct nouveau_fence *fence)
125 {
126 struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
127 struct nouveau_resource *r = NULL;
128 struct nouveau_fence *ref = NULL;
129
130 if (fence)
131 nouveau_fence_ref(fence, &ref);
132 else
133 nouveau_fence_new(chan, &ref);
134 assert(ref);
135
136 while (nouveau_resource_alloc(nvdev->sa_heap, size, ref, &r)) {
137 nouveau_fence_flush(chan);
138 }
139 nouveau_fence_signal_cb(ref, nouveau_bo_tmp_del, r);
140
141 return r;
142 }
143
144 int
145 nouveau_bo_init(struct nouveau_device *dev)
146 {
147 struct nouveau_device_priv *nvdev = nouveau_device(dev);
148 int ret;
149
150 ret = nouveau_mem_alloc(dev, 128*1024, 0, NOUVEAU_MEM_AGP |
151 NOUVEAU_MEM_PCI, &nvdev->sa, &nvdev->sa_map);
152 if (ret)
153 return ret;
154
155 ret = nouveau_resource_init(&nvdev->sa_heap, 0, nvdev->sa.size);
156 if (ret) {
157 nouveau_mem_free(dev, &nvdev->sa, &nvdev->sa_map);
158 return ret;
159 }
160
161 return 0;
162 }
163
164 void
165 nouveau_bo_takedown(struct nouveau_device *dev)
166 {
167 struct nouveau_device_priv *nvdev = nouveau_device(dev);
168
169 nouveau_mem_free(dev, &nvdev->sa, &nvdev->sa_map);
170 }
171
172 int
173 nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, int align,
174 int size, struct nouveau_bo **bo)
175 {
176 struct nouveau_bo_priv *nvbo;
177 int ret;
178
179 if (!dev || !bo || *bo)
180 return -EINVAL;
181
182 nvbo = calloc(1, sizeof(struct nouveau_bo_priv));
183 if (!nvbo)
184 return -ENOMEM;
185 nvbo->base.device = dev;
186 nvbo->drm.alignment = align;
187
188 if (flags & NOUVEAU_BO_PIN) {
189 ret = nouveau_bo_realloc_gpu(nvbo, flags, size);
190 if (ret) {
191 free(nvbo);
192 return ret;
193 }
194 } else {
195 nvbo->sysmem = malloc(size);
196 if (!nvbo->sysmem) {
197 free(nvbo);
198 return -ENOMEM;
199 }
200 }
201
202 nvbo->base.size = size;
203 nvbo->base.offset = nvbo->drm.offset;
204 nvbo->base.handle = bo_to_ptr(nvbo);
205 nvbo->refcount = 1;
206 *bo = &nvbo->base;
207 return 0;
208 }
209
210 int
211 nouveau_bo_user(struct nouveau_device *dev, void *ptr, int size,
212 struct nouveau_bo **bo)
213 {
214 struct nouveau_bo_priv *nvbo;
215
216 if (!dev || !bo || *bo)
217 return -EINVAL;
218
219 nvbo = calloc(1, sizeof(*nvbo));
220 if (!nvbo)
221 return -ENOMEM;
222 nvbo->base.device = dev;
223
224 nvbo->sysmem = ptr;
225 nvbo->user = 1;
226
227 nvbo->base.size = size;
228 nvbo->base.offset = nvbo->drm.offset;
229 nvbo->base.handle = bo_to_ptr(nvbo);
230 nvbo->refcount = 1;
231 *bo = &nvbo->base;
232 return 0;
233 }
234
235 int
236 nouveau_bo_ref(struct nouveau_device *dev, uint64_t handle,
237 struct nouveau_bo **bo)
238 {
239 struct nouveau_bo_priv *nvbo = ptr_to_bo(handle);
240
241 if (!dev || !bo || *bo)
242 return -EINVAL;
243
244 nvbo->refcount++;
245 *bo = &nvbo->base;
246 return 0;
247 }
248
249 int
250 nouveau_bo_resize(struct nouveau_bo *bo, int size)
251 {
252 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
253 int ret;
254
255 if (!nvbo || nvbo->user)
256 return -EINVAL;
257
258 if (nvbo->sysmem) {
259 nvbo->sysmem = realloc(nvbo->sysmem, size);
260 if (!nvbo->sysmem)
261 return -ENOMEM;
262 } else {
263 ret = nouveau_bo_realloc_gpu(nvbo, 0, size);
264 if (ret)
265 return ret;
266 }
267
268 nvbo->base.size = size;
269 return 0;
270 }
271
272 void
273 nouveau_bo_del(struct nouveau_bo **bo)
274 {
275 struct nouveau_bo_priv *nvbo;
276
277 if (!bo || !*bo)
278 return;
279 nvbo = nouveau_bo(*bo);
280 *bo = NULL;
281
282 if (--nvbo->refcount)
283 return;
284
285 if (nvbo->fence)
286 nouveau_fence_wait(&nvbo->fence);
287
288 nouveau_bo_realloc_gpu(nvbo, 0, 0);
289 if (nvbo->sysmem && !nvbo->user)
290 free(nvbo->sysmem);
291 free(nvbo);
292 }
293
294 int
295 nouveau_bo_map(struct nouveau_bo *bo, uint32_t flags)
296 {
297 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
298
299 if (!nvbo)
300 return -EINVAL;
301
302 if (flags & NOUVEAU_BO_WR)
303 nouveau_fence_wait(&nvbo->fence);
304 else
305 nouveau_fence_wait(&nvbo->wr_fence);
306
307 if (nvbo->sysmem)
308 bo->map = nvbo->sysmem;
309 else
310 bo->map = nvbo->map;
311 return 0;
312 }
313
314 void
315 nouveau_bo_unmap(struct nouveau_bo *bo)
316 {
317 bo->map = NULL;
318 }
319
320 static int
321 nouveau_bo_upload(struct nouveau_bo_priv *nvbo)
322 {
323 if (nvbo->fence)
324 nouveau_fence_wait(&nvbo->fence);
325 memcpy(nvbo->map, nvbo->sysmem, nvbo->drm.size);
326 return 0;
327 }
328
329 static int
330 nouveau_bo_validate_user(struct nouveau_channel *chan, struct nouveau_bo *bo,
331 struct nouveau_fence *fence, uint32_t flags)
332 {
333 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
334 struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
335 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
336 struct nouveau_resource *r;
337
338 if (nvchan->user_charge + bo->size > nvdev->sa.size)
339 return 1;
340 nvchan->user_charge += bo->size;
341
342 if (!(flags & NOUVEAU_BO_GART))
343 return 1;
344
345 r = nouveau_bo_tmp(chan, bo->size, fence);
346 if (!r)
347 return 1;
348
349 memcpy(nvdev->sa_map + r->start, nvbo->sysmem, bo->size);
350
351 nvbo->offset = nvdev->sa.offset + r->start;
352 nvbo->flags = NOUVEAU_BO_GART;
353 return 0;
354 }
355
356 static int
357 nouveau_bo_validate_bo(struct nouveau_channel *chan, struct nouveau_bo *bo,
358 struct nouveau_fence *fence, uint32_t flags)
359 {
360 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
361
362 if (!nvbo->drm.size) {
363 nouveau_bo_realloc_gpu(nvbo, flags, nvbo->base.size);
364 nouveau_bo_upload(nvbo);
365 if (!nvbo->user) {
366 free(nvbo->sysmem);
367 nvbo->sysmem = NULL;
368 }
369 } else
370 if (nvbo->user) {
371 nouveau_bo_upload(nvbo);
372 }
373
374 nvbo->offset = nvbo->drm.offset;
375 if (nvbo->drm.flags & (NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI))
376 nvbo->flags = NOUVEAU_BO_GART;
377 else
378 nvbo->flags = NOUVEAU_BO_VRAM;
379
380 return 0;
381 }
382
383 int
384 nouveau_bo_validate(struct nouveau_channel *chan, struct nouveau_bo *bo,
385 struct nouveau_fence *fence, uint32_t flags)
386 {
387 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
388 int ret;
389
390 assert(bo->map == NULL);
391
392 if (nvbo->user) {
393 ret = nouveau_bo_validate_user(chan, bo, fence, flags);
394 if (ret) {
395 ret = nouveau_bo_validate_bo(chan, bo, fence, flags);
396 if (ret)
397 return ret;
398 }
399 } else {
400 ret = nouveau_bo_validate_bo(chan, bo, fence, flags);
401 if (ret)
402 return ret;
403 }
404
405 if (flags & NOUVEAU_BO_WR)
406 nouveau_fence_ref(fence, &nvbo->wr_fence);
407 nouveau_fence_ref(fence, &nvbo->fence);
408 return 0;
409 }
410