nv50: sync textures with render targets ourselves
[mesa.git] / src / gallium / drivers / nouveau / nouveau_buffer.h
1 #ifndef __NOUVEAU_BUFFER_H__
2 #define __NOUVEAU_BUFFER_H__
3
4 #include "util/u_transfer.h"
5 #include "util/u_double_list.h"
6
7 struct pipe_resource;
8 struct nouveau_bo;
9
10 #define NOUVEAU_BUFFER_SCORE_MIN -25000
11 #define NOUVEAU_BUFFER_SCORE_MAX 25000
12 #define NOUVEAU_BUFFER_SCORE_VRAM_THRESHOLD 20000
13
14 /* DIRTY: buffer was (or will be after the next flush) written to by GPU and
15 * resource->data has not been updated to reflect modified VRAM contents
16 *
17 * USER_MEMORY: resource->data is a pointer to client memory and may change
18 * between GL calls
19 */
20 #define NOUVEAU_BUFFER_STATUS_GPU_READING (1 << 0)
21 #define NOUVEAU_BUFFER_STATUS_GPU_WRITING (1 << 1)
22 #define NOUVEAU_BUFFER_STATUS_USER_MEMORY (1 << 7)
23
24 /* Resources, if mapped into the GPU's address space, are guaranteed to
25 * have constant virtual addresses (nv50+).
26 *
27 * The address of a resource will lie within the nouveau_bo referenced,
28 * and this bo should be added to the memory manager's validation list.
29 */
30 struct nv04_resource {
31 struct pipe_resource base;
32 const struct u_resource_vtbl *vtbl;
33
34 uint8_t *data;
35 struct nouveau_bo *bo;
36 uint32_t offset;
37
38 uint8_t status;
39 uint8_t domain;
40
41 int16_t score; /* low if mapped very often, if high can move to VRAM */
42
43 struct nouveau_fence *fence;
44 struct nouveau_fence *fence_wr;
45
46 struct nouveau_mm_allocation *mm;
47 };
48
49 void
50 nouveau_buffer_release_gpu_storage(struct nv04_resource *);
51
52 boolean
53 nouveau_buffer_download(struct pipe_context *, struct nv04_resource *,
54 unsigned start, unsigned size);
55
56 boolean
57 nouveau_buffer_migrate(struct pipe_context *,
58 struct nv04_resource *, unsigned domain);
59
60 static INLINE void
61 nouveau_buffer_adjust_score(struct pipe_context *pipe,
62 struct nv04_resource *res, int16_t score)
63 {
64 if (score < 0) {
65 if (res->score > NOUVEAU_BUFFER_SCORE_MIN)
66 res->score += score;
67 } else
68 if (score > 0){
69 if (res->score < NOUVEAU_BUFFER_SCORE_MAX)
70 res->score += score;
71 if (res->domain == NOUVEAU_BO_GART &&
72 res->score > NOUVEAU_BUFFER_SCORE_VRAM_THRESHOLD)
73 nouveau_buffer_migrate(pipe, res, NOUVEAU_BO_VRAM);
74 }
75 }
76
77 /* XXX: wait for fence (atm only using this for vertex push) */
78 static INLINE void *
79 nouveau_resource_map_offset(struct pipe_context *pipe,
80 struct nv04_resource *res, uint32_t offset,
81 uint32_t flags)
82 {
83 void *map;
84
85 nouveau_buffer_adjust_score(pipe, res, -250);
86
87 if ((res->domain == NOUVEAU_BO_VRAM) &&
88 (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING))
89 nouveau_buffer_download(pipe, res, 0, res->base.width0);
90
91 if ((res->domain != NOUVEAU_BO_GART) ||
92 (res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
93 return res->data + offset;
94
95 if (res->mm)
96 flags |= NOUVEAU_BO_NOSYNC;
97
98 if (nouveau_bo_map_range(res->bo, res->offset + offset,
99 res->base.width0, flags))
100 return NULL;
101
102 map = res->bo->map;
103 nouveau_bo_unmap(res->bo);
104 return map;
105 }
106
107 static INLINE void
108 nouveau_resource_unmap(struct nv04_resource *res)
109 {
110 /* no-op */
111 }
112
113 static INLINE struct nv04_resource *
114 nv04_resource(struct pipe_resource *resource)
115 {
116 return (struct nv04_resource *)resource;
117 }
118
119 /* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
120 static INLINE boolean
121 nouveau_resource_mapped_by_gpu(struct pipe_resource *resource)
122 {
123 return nv04_resource(resource)->domain != 0;
124 }
125
126 struct pipe_resource *
127 nouveau_buffer_create(struct pipe_screen *pscreen,
128 const struct pipe_resource *templ);
129
130 struct pipe_resource *
131 nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr,
132 unsigned bytes, unsigned usage);
133
134 boolean
135 nouveau_user_buffer_upload(struct nv04_resource *, unsigned base,
136 unsigned size);
137
138 #endif