nv50: move onto common linear buffer manager
[mesa.git] / src / gallium / drivers / nouveau / nouveau_buffer.h
1 #ifndef __NOUVEAU_BUFFER_H__
2 #define __NOUVEAU_BUFFER_H__
3
4 #include "util/u_transfer.h"
5 #include "util/u_double_list.h"
6
7 struct pipe_resource;
8 struct nouveau_bo;
9
10 #define NOUVEAU_BUFFER_SCORE_MIN -25000
11 #define NOUVEAU_BUFFER_SCORE_MAX 25000
12 #define NOUVEAU_BUFFER_SCORE_VRAM_THRESHOLD 20000
13
14 /* DIRTY: buffer was (or will be after the next flush) written to by GPU and
15 * resource->data has not been updated to reflect modified VRAM contents
16 *
17 * USER_MEMORY: resource->data is a pointer to client memory and may change
18 * between GL calls
19 */
20 #define NOUVEAU_BUFFER_STATUS_DIRTY (1 << 0)
21 #define NOUVEAU_BUFFER_STATUS_USER_MEMORY (1 << 7)
22
23 /* Resources, if mapped into the GPU's address space, are guaranteed to
24 * have constant virtual addresses (nv50+).
25 *
26 * The address of a resource will lie within the nouveau_bo referenced,
27 * and this bo should be added to the memory manager's validation list.
28 */
29 struct nv04_resource {
30 struct pipe_resource base;
31 const struct u_resource_vtbl *vtbl;
32
33 uint8_t *data;
34 struct nouveau_bo *bo;
35 uint32_t offset;
36
37 uint8_t status;
38 uint8_t domain;
39
40 int16_t score; /* low if mapped very often, if high can move to VRAM */
41
42 struct nouveau_fence *fence;
43 struct nouveau_fence *fence_wr;
44
45 struct nouveau_mm_allocation *mm;
46 };
47
48 void
49 nouveau_buffer_release_gpu_storage(struct nv04_resource *);
50
51 boolean
52 nouveau_buffer_download(struct pipe_context *, struct nv04_resource *,
53 unsigned start, unsigned size);
54
55 boolean
56 nouveau_buffer_migrate(struct pipe_context *,
57 struct nv04_resource *, unsigned domain);
58
59 static INLINE void
60 nouveau_buffer_adjust_score(struct pipe_context *pipe,
61 struct nv04_resource *res, int16_t score)
62 {
63 if (score < 0) {
64 if (res->score > NOUVEAU_BUFFER_SCORE_MIN)
65 res->score += score;
66 } else
67 if (score > 0){
68 if (res->score < NOUVEAU_BUFFER_SCORE_MAX)
69 res->score += score;
70 if (res->domain == NOUVEAU_BO_GART &&
71 res->score > NOUVEAU_BUFFER_SCORE_VRAM_THRESHOLD)
72 nouveau_buffer_migrate(pipe, res, NOUVEAU_BO_VRAM);
73 }
74 }
75
76 /* XXX: wait for fence (atm only using this for vertex push) */
77 static INLINE void *
78 nouveau_resource_map_offset(struct pipe_context *pipe,
79 struct nv04_resource *res, uint32_t offset,
80 uint32_t flags)
81 {
82 void *map;
83
84 nouveau_buffer_adjust_score(pipe, res, -250);
85
86 if ((res->domain == NOUVEAU_BO_VRAM) &&
87 (res->status & NOUVEAU_BUFFER_STATUS_DIRTY))
88 nouveau_buffer_download(pipe, res, 0, res->base.width0);
89
90 if ((res->domain != NOUVEAU_BO_GART) ||
91 (res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
92 return res->data + offset;
93
94 if (res->mm)
95 flags |= NOUVEAU_BO_NOSYNC;
96
97 if (nouveau_bo_map_range(res->bo, res->offset + offset,
98 res->base.width0, flags))
99 return NULL;
100
101 map = res->bo->map;
102 nouveau_bo_unmap(res->bo);
103 return map;
104 }
105
106 static INLINE void
107 nouveau_resource_unmap(struct nv04_resource *res)
108 {
109 /* no-op */
110 }
111
112 static INLINE struct nv04_resource *
113 nv04_resource(struct pipe_resource *resource)
114 {
115 return (struct nv04_resource *)resource;
116 }
117
118 /* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
119 static INLINE boolean
120 nouveau_resource_mapped_by_gpu(struct pipe_resource *resource)
121 {
122 return nv04_resource(resource)->domain != 0;
123 }
124
125 struct pipe_resource *
126 nouveau_buffer_create(struct pipe_screen *pscreen,
127 const struct pipe_resource *templ);
128
129 struct pipe_resource *
130 nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr,
131 unsigned bytes, unsigned usage);
132
133 boolean
134 nouveau_user_buffer_upload(struct nv04_resource *, unsigned base,
135 unsigned size);
136
137 #endif