nvfx: move nv04_surface_2d.c into nvfx directory
[mesa.git] / src / gallium / drivers / nvfx / nvfx_miptree.c
1 #include "pipe/p_state.h"
2 #include "pipe/p_defines.h"
3 #include "util/u_inlines.h"
4 #include "util/u_format.h"
5 #include "util/u_math.h"
6
7 #include "nvfx_context.h"
8 #include "nv04_surface_2d.h"
9
10
11
12 static void
13 nvfx_miptree_layout(struct nvfx_miptree *mt)
14 {
15 struct pipe_texture *pt = &mt->base;
16 uint width = pt->width0;
17 uint offset = 0;
18 int nr_faces, l, f;
19 uint wide_pitch = pt->tex_usage & (PIPE_TEXTURE_USAGE_SAMPLER |
20 PIPE_TEXTURE_USAGE_DEPTH_STENCIL |
21 PIPE_TEXTURE_USAGE_RENDER_TARGET |
22 PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
23 PIPE_TEXTURE_USAGE_SCANOUT);
24
25 if (pt->target == PIPE_TEXTURE_CUBE) {
26 nr_faces = 6;
27 } else
28 if (pt->target == PIPE_TEXTURE_3D) {
29 nr_faces = pt->depth0;
30 } else {
31 nr_faces = 1;
32 }
33
34 for (l = 0; l <= pt->last_level; l++) {
35 if (wide_pitch && (pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR))
36 mt->level[l].pitch = align(util_format_get_stride(pt->format, pt->width0), 64);
37 else
38 mt->level[l].pitch = util_format_get_stride(pt->format, width);
39
40 mt->level[l].image_offset =
41 CALLOC(nr_faces, sizeof(unsigned));
42
43 width = u_minify(width, 1);
44 }
45
46 for (f = 0; f < nr_faces; f++) {
47 for (l = 0; l < pt->last_level; l++) {
48 mt->level[l].image_offset[f] = offset;
49
50 if (!(pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR) &&
51 u_minify(pt->width0, l + 1) > 1 && u_minify(pt->height0, l + 1) > 1)
52 offset += align(mt->level[l].pitch * u_minify(pt->height0, l), 64);
53 else
54 offset += mt->level[l].pitch * u_minify(pt->height0, l);
55 }
56
57 mt->level[l].image_offset[f] = offset;
58 offset += mt->level[l].pitch * u_minify(pt->height0, l);
59 }
60
61 mt->total_size = offset;
62 }
63
64 static struct pipe_texture *
65 nvfx_miptree_create(struct pipe_screen *pscreen, const struct pipe_texture *pt)
66 {
67 struct nvfx_miptree *mt;
68 unsigned buf_usage = PIPE_BUFFER_USAGE_PIXEL |
69 NOUVEAU_BUFFER_USAGE_TEXTURE;
70
71 mt = MALLOC(sizeof(struct nvfx_miptree));
72 if (!mt)
73 return NULL;
74 mt->base = *pt;
75 pipe_reference_init(&mt->base.reference, 1);
76 mt->base.screen = pscreen;
77
78 /* Swizzled textures must be POT */
79 if (pt->width0 & (pt->width0 - 1) ||
80 pt->height0 & (pt->height0 - 1))
81 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
82 else
83 if (pt->tex_usage & (PIPE_TEXTURE_USAGE_SCANOUT |
84 PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
85 PIPE_TEXTURE_USAGE_DEPTH_STENCIL))
86 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
87 else
88 if (pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC)
89 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
90 else {
91 switch (pt->format) {
92 case PIPE_FORMAT_B5G6R5_UNORM:
93 case PIPE_FORMAT_L8A8_UNORM:
94 case PIPE_FORMAT_A8_UNORM:
95 case PIPE_FORMAT_L8_UNORM:
96 case PIPE_FORMAT_I8_UNORM:
97 /* TODO: we can actually swizzle these formats on nv40, we
98 are just preserving the pre-unification behavior.
99 The whole 2D code is going to be rewritten anyway. */
100 if(nvfx_screen(pscreen)->is_nv4x) {
101 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
102 break;
103 }
104 /* TODO: Figure out which formats can be swizzled */
105 case PIPE_FORMAT_B8G8R8A8_UNORM:
106 case PIPE_FORMAT_B8G8R8X8_UNORM:
107 case PIPE_FORMAT_R16_SNORM:
108 {
109 if (debug_get_bool_option("NOUVEAU_NO_SWIZZLE", FALSE))
110 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
111 break;
112 }
113 default:
114 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
115 }
116 }
117
118 if (pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC)
119 buf_usage |= PIPE_BUFFER_USAGE_CPU_READ_WRITE;
120
121 /* apparently we can't render to swizzled surfaces smaller than 64 bytes, so make them linear.
122 * If the user did not ask for a render target, they can still render to it, but it will cost them an extra copy.
123 * This also happens for small mipmaps of large textures. */
124 if (pt->tex_usage & PIPE_TEXTURE_USAGE_RENDER_TARGET && util_format_get_stride(pt->format, pt->width0) < 64)
125 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
126
127 nvfx_miptree_layout(mt);
128
129 mt->buffer = pscreen->buffer_create(pscreen, 256, buf_usage, mt->total_size);
130 if (!mt->buffer) {
131 FREE(mt);
132 return NULL;
133 }
134 mt->bo = nouveau_bo(mt->buffer);
135 return &mt->base;
136 }
137
138 static struct pipe_texture *
139 nvfx_miptree_blanket(struct pipe_screen *pscreen, const struct pipe_texture *pt,
140 const unsigned *stride, struct pipe_buffer *pb)
141 {
142 struct nvfx_miptree *mt;
143
144 /* Only supports 2D, non-mipmapped textures for the moment */
145 if (pt->target != PIPE_TEXTURE_2D || pt->last_level != 0 ||
146 pt->depth0 != 1)
147 return NULL;
148
149 mt = CALLOC_STRUCT(nvfx_miptree);
150 if (!mt)
151 return NULL;
152
153 mt->base = *pt;
154 pipe_reference_init(&mt->base.reference, 1);
155 mt->base.screen = pscreen;
156 mt->level[0].pitch = stride[0];
157 mt->level[0].image_offset = CALLOC(1, sizeof(unsigned));
158
159 /* Assume whoever created this buffer expects it to be linear for now */
160 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
161
162 pipe_buffer_reference(&mt->buffer, pb);
163 mt->bo = nouveau_bo(mt->buffer);
164 return &mt->base;
165 }
166
167 static void
168 nvfx_miptree_destroy(struct pipe_texture *pt)
169 {
170 struct nvfx_miptree *mt = (struct nvfx_miptree *)pt;
171 int l;
172
173 pipe_buffer_reference(&mt->buffer, NULL);
174 for (l = 0; l <= pt->last_level; l++) {
175 if (mt->level[l].image_offset)
176 FREE(mt->level[l].image_offset);
177 }
178
179 FREE(mt);
180 }
181
182 static struct pipe_surface *
183 nvfx_miptree_surface_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
184 unsigned face, unsigned level, unsigned zslice,
185 unsigned flags)
186 {
187 struct nvfx_miptree *mt = (struct nvfx_miptree *)pt;
188 struct nv04_surface *ns;
189
190 ns = CALLOC_STRUCT(nv04_surface);
191 if (!ns)
192 return NULL;
193 pipe_texture_reference(&ns->base.texture, pt);
194 ns->base.format = pt->format;
195 ns->base.width = u_minify(pt->width0, level);
196 ns->base.height = u_minify(pt->height0, level);
197 ns->base.usage = flags;
198 pipe_reference_init(&ns->base.reference, 1);
199 ns->base.face = face;
200 ns->base.level = level;
201 ns->base.zslice = zslice;
202 ns->pitch = mt->level[level].pitch;
203
204 if (pt->target == PIPE_TEXTURE_CUBE) {
205 ns->base.offset = mt->level[level].image_offset[face];
206 } else
207 if (pt->target == PIPE_TEXTURE_3D) {
208 ns->base.offset = mt->level[level].image_offset[zslice];
209 } else {
210 ns->base.offset = mt->level[level].image_offset[0];
211 }
212
213 /* create a linear temporary that we can render into if necessary.
214 * Note that ns->pitch is always a multiple of 64 for linear surfaces and swizzled surfaces are POT, so
215 * ns->pitch & 63 is equivalent to (ns->pitch < 64 && swizzled)*/
216 if((ns->pitch & 63) && (ns->base.usage & (PIPE_BUFFER_USAGE_GPU_WRITE | NOUVEAU_BUFFER_USAGE_NO_RENDER)) == PIPE_BUFFER_USAGE_GPU_WRITE)
217 return &nv04_surface_wrap_for_render(pscreen, ((struct nvfx_screen*)pscreen)->eng2d, ns)->base;
218
219 return &ns->base;
220 }
221
222 static void
223 nvfx_miptree_surface_del(struct pipe_surface *ps)
224 {
225 struct nv04_surface* ns = (struct nv04_surface*)ps;
226 if(ns->backing)
227 {
228 struct nvfx_screen* screen = (struct nvfx_screen*)ps->texture->screen;
229 if(ns->backing->base.usage & PIPE_BUFFER_USAGE_GPU_WRITE)
230 screen->eng2d->copy(screen->eng2d, &ns->backing->base, 0, 0, ps, 0, 0, ns->base.width, ns->base.height);
231 nvfx_miptree_surface_del(&ns->backing->base);
232 }
233
234 pipe_texture_reference(&ps->texture, NULL);
235 FREE(ps);
236 }
237
238 void
239 nvfx_screen_init_miptree_functions(struct pipe_screen *pscreen)
240 {
241 pscreen->texture_create = nvfx_miptree_create;
242 pscreen->texture_destroy = nvfx_miptree_destroy;
243 pscreen->get_tex_surface = nvfx_miptree_surface_new;
244 pscreen->tex_surface_destroy = nvfx_miptree_surface_del;
245
246 nouveau_screen(pscreen)->texture_blanket = nvfx_miptree_blanket;
247 }