1 #ifndef __NOUVEAU_RESOURCE_H__
2 #define __NOUVEAU_RESOURCE_H__
4 #include "util/u_transfer.h"
5 #include "util/u_double_list.h"
10 #define NOUVEAU_BUFFER_SCORE_MIN -25000
11 #define NOUVEAU_BUFFER_SCORE_MAX 25000
12 #define NOUVEAU_BUFFER_SCORE_VRAM_THRESHOLD 20000
14 /* DIRTY: buffer was (or will be after the next flush) written to by GPU and
15 * resource->data has not been updated to reflect modified VRAM contents
17 * USER_MEMORY: resource->data is a pointer to client memory and may change
20 #define NOUVEAU_BUFFER_STATUS_DIRTY (1 << 0)
21 #define NOUVEAU_BUFFER_STATUS_USER_MEMORY (1 << 7)
23 /* Resources, if mapped into the GPU's address space, are guaranteed to
24 * have constant virtual addresses (nv50+).
26 * The address of a resource will lie within the nouveau_bo referenced,
27 * and this bo should be added to the memory manager's validation list.
29 struct nv04_resource
{
30 struct pipe_resource base
;
31 const struct u_resource_vtbl
*vtbl
;
34 struct nouveau_bo
*bo
;
40 int16_t score
; /* low if mapped very often, if high can move to VRAM */
42 struct nouveau_fence
*fence
;
43 struct nouveau_fence
*fence_wr
;
45 struct nouveau_mm_allocation
*mm
;
49 nouveau_buffer_release_gpu_storage(struct nv04_resource
*);
52 nouveau_buffer_download(struct pipe_context
*, struct nv04_resource
*,
53 unsigned start
, unsigned size
);
56 nouveau_buffer_migrate(struct pipe_context
*,
57 struct nv04_resource
*, unsigned domain
);
60 nouveau_buffer_adjust_score(struct pipe_context
*pipe
,
61 struct nv04_resource
*res
, int16_t score
)
64 if (res
->score
> NOUVEAU_BUFFER_SCORE_MIN
)
68 if (res
->score
< NOUVEAU_BUFFER_SCORE_MAX
)
70 if (res
->domain
== NOUVEAU_BO_GART
&&
71 res
->score
> NOUVEAU_BUFFER_SCORE_VRAM_THRESHOLD
)
72 nouveau_buffer_migrate(pipe
, res
, NOUVEAU_BO_VRAM
);
76 /* XXX: wait for fence (atm only using this for vertex push) */
78 nouveau_resource_map_offset(struct pipe_context
*pipe
,
79 struct nv04_resource
*res
, uint32_t offset
,
84 nouveau_buffer_adjust_score(pipe
, res
, -250);
86 if ((res
->domain
== NOUVEAU_BO_VRAM
) &&
87 (res
->status
& NOUVEAU_BUFFER_STATUS_DIRTY
))
88 nouveau_buffer_download(pipe
, res
, 0, res
->base
.width0
);
90 if ((res
->domain
!= NOUVEAU_BO_GART
) ||
91 (res
->status
& NOUVEAU_BUFFER_STATUS_USER_MEMORY
))
92 return res
->data
+ offset
;
95 flags
|= NOUVEAU_BO_NOSYNC
;
97 if (nouveau_bo_map_range(res
->bo
, res
->offset
+ offset
,
98 res
->base
.width0
, flags
))
102 nouveau_bo_unmap(res
->bo
);
107 nouveau_resource_unmap(struct nv04_resource
*res
)
112 static INLINE
struct nv04_resource
*
113 nv04_resource(struct pipe_resource
*resource
)
115 return (struct nv04_resource
*)resource
;
118 /* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
119 static INLINE boolean
120 nouveau_resource_mapped_by_gpu(struct pipe_resource
*resource
)
122 return nv04_resource(resource
)->domain
!= 0;
125 struct pipe_resource
*
126 nouveau_buffer_create(struct pipe_screen
*pscreen
,
127 const struct pipe_resource
*templ
);
129 struct pipe_resource
*
130 nouveau_user_buffer_create(struct pipe_screen
*screen
, void *ptr
,
131 unsigned bytes
, unsigned usage
);
134 nouveau_user_buffer_upload(struct nv04_resource
*, unsigned base
,