1 #ifndef NVFX_RESOURCE_H
2 #define NVFX_RESOURCE_H
4 #include "util/u_transfer.h"
5 #include "util/u_format.h"
6 #include "util/u_math.h"
7 #include "util/u_double_list.h"
8 #include "util/u_surfaces.h"
9 #include "util/u_dirty_surfaces.h"
10 #include <nouveau/nouveau_bo.h>
15 struct nvfx_resource
{
16 struct pipe_resource base
;
17 struct nouveau_bo
*bo
;
21 struct nvfx_resource
*nvfx_resource(struct pipe_resource
*resource
)
23 return (struct nvfx_resource
*)resource
;
26 #define NVFX_RESOURCE_FLAG_USER (NOUVEAU_RESOURCE_FLAG_DRV_PRIV << 0)
28 /* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
30 nvfx_resource_mapped_by_gpu(struct pipe_resource
*resource
)
32 return nvfx_resource(resource
)->bo
->handle
;
35 /* is resource in VRAM? */
37 nvfx_resource_on_gpu(struct pipe_resource
* pr
)
40 // a compiler error here means you need to apply libdrm-nouveau-add-domain.patch to libdrm
41 // TODO: return FALSE if not VRAM and on a PCI-E system
42 return ((struct nvfx_resource
*)pr
)->bo
->domain
& (NOUVEAU_BO_VRAM
| NOUVEAU_BO_GART
);
48 #define NVFX_MAX_TEXTURE_LEVELS 16
50 /* We have the following invariants for render temporaries
52 * 1. Render temporaries are always linear
53 * 2. Render temporaries are always up to date
54 * 3. Currently, render temporaries are destroyed when the resource is used for sampling, but kept for any other use
56 * Also, we do NOT flush temporaries on any pipe->flush().
57 * This is fine, as long as scanout targets and shared resources never need temps.
59 * TODO: we may want to also support swizzled temporaries to improve performance in some cases.
63 struct nvfx_resource base
;
65 unsigned linear_pitch
; /* for linear textures, 0 for swizzled and compressed textures with level-dependent minimal pitch */
66 unsigned face_size
; /* 128-byte aligned face/total size */
67 unsigned level_offset
[NVFX_MAX_TEXTURE_LEVELS
];
69 struct util_surfaces surfaces
;
70 struct util_dirty_surfaces dirty_surfaces
;
74 struct util_dirty_surface base
;
78 struct nvfx_miptree
* temp
;
81 static INLINE
struct nouveau_bo
*
82 nvfx_surface_buffer(struct pipe_surface
*surf
)
84 struct nvfx_resource
*mt
= nvfx_resource(surf
->texture
);
89 static INLINE
struct util_dirty_surfaces
*
90 nvfx_surface_get_dirty_surfaces(struct pipe_surface
* surf
)
92 struct nvfx_miptree
*mt
= (struct nvfx_miptree
*)surf
->texture
;
93 return &mt
->dirty_surfaces
;
97 nvfx_init_resource_functions(struct pipe_context
*pipe
);
100 nvfx_screen_init_resource_functions(struct pipe_screen
*pscreen
);
106 struct pipe_resource
*
107 nvfx_miptree_create(struct pipe_screen
*pscreen
, const struct pipe_resource
*pt
);
110 nvfx_miptree_destroy(struct pipe_screen
*pscreen
,
111 struct pipe_resource
*presource
);
113 struct pipe_resource
*
114 nvfx_miptree_from_handle(struct pipe_screen
*pscreen
,
115 const struct pipe_resource
*template,
116 struct winsys_handle
*whandle
);
119 nvfx_miptree_surface_del(struct pipe_context
*pipe
, struct pipe_surface
*ps
);
121 struct pipe_surface
*
122 nvfx_miptree_surface_new(struct pipe_context
*pipe
, struct pipe_resource
*pt
,
123 const struct pipe_surface
*surf_tmpl
);
125 /* only for miptrees, don't use for buffers */
127 /* NOTE: for swizzled 3D textures, this just returns the offset of the mipmap level */
128 static inline unsigned
129 nvfx_subresource_offset(struct pipe_resource
* pt
, unsigned face
, unsigned level
, unsigned zslice
)
131 if(pt
->target
== PIPE_BUFFER
)
135 struct nvfx_miptree
*mt
= (struct nvfx_miptree
*)pt
;
137 unsigned offset
= mt
->level_offset
[level
];
138 if (pt
->target
== PIPE_TEXTURE_CUBE
)
139 offset
+= mt
->face_size
* face
;
140 else if (pt
->target
== PIPE_TEXTURE_3D
&& mt
->linear_pitch
)
141 offset
+= zslice
* util_format_get_2d_size(pt
->format
, (mt
->linear_pitch
? mt
->linear_pitch
: util_format_get_stride(pt
->format
, u_minify(pt
->width0
, level
))), u_minify(pt
->height0
, level
));
146 static inline unsigned
147 nvfx_subresource_pitch(struct pipe_resource
* pt
, unsigned level
)
149 if(pt
->target
== PIPE_BUFFER
)
150 return ((struct nvfx_resource
*)pt
)->bo
->size
;
153 struct nvfx_miptree
*mt
= (struct nvfx_miptree
*)pt
;
156 return mt
->linear_pitch
;
158 return util_format_get_stride(pt
->format
, u_minify(pt
->width0
, level
));
163 nvfx_surface_create_temp(struct pipe_context
* pipe
, struct pipe_surface
* surf
);
166 nvfx_surface_flush(struct pipe_context
* pipe
, struct pipe_surface
* surf
);
170 struct nvfx_resource base
;
174 /* the range of data not yet uploaded to the GPU bo */
175 unsigned dirty_begin
;
178 /* whether all transfers were unsynchronized */
179 boolean dirty_unsynchronized
;
181 /* whether it would have been profitable to upload
182 * the latest updated data to the GPU immediately */
183 boolean last_update_static
;
185 /* how many bytes we need to draw before we deem
186 * the buffer to be static
188 long long bytes_to_draw_until_static
;
191 static inline struct nvfx_buffer
* nvfx_buffer(struct pipe_resource
* pr
)
193 return (struct nvfx_buffer
*)pr
;
196 /* this is an heuristic to determine whether we are better off uploading the
197 * buffer to the GPU, or just continuing pushing it on the FIFO
199 static inline boolean
nvfx_buffer_seems_static(struct nvfx_buffer
* buffer
)
201 return buffer
->last_update_static
202 || buffer
->bytes_to_draw_until_static
< 0;
205 struct pipe_resource
*
206 nvfx_buffer_create(struct pipe_screen
*pscreen
,
207 const struct pipe_resource
*template);
210 nvfx_buffer_destroy(struct pipe_screen
*pscreen
,
211 struct pipe_resource
*presource
);
213 struct pipe_resource
*
214 nvfx_user_buffer_create(struct pipe_screen
*screen
,
220 nvfx_buffer_upload(struct nvfx_buffer
* buffer
);