Merge branch '7.8'
[mesa.git] / src / gallium / drivers / nvfx / nvfx_miptree.c
1 #include "pipe/p_state.h"
2 #include "pipe/p_defines.h"
3 #include "util/u_inlines.h"
4 #include "util/u_format.h"
5 #include "util/u_math.h"
6
7 #include "nvfx_context.h"
8 #include "nv04_surface_2d.h"
9
10
11
12 static void
13 nvfx_miptree_layout(struct nvfx_miptree *mt)
14 {
15 struct pipe_texture *pt = &mt->base;
16 uint width = pt->width0;
17 uint offset = 0;
18 int nr_faces, l, f;
19 uint wide_pitch = pt->tex_usage & (PIPE_TEXTURE_USAGE_SAMPLER |
20 PIPE_TEXTURE_USAGE_DEPTH_STENCIL |
21 PIPE_TEXTURE_USAGE_RENDER_TARGET |
22 PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
23 PIPE_TEXTURE_USAGE_SCANOUT);
24
25 if (pt->target == PIPE_TEXTURE_CUBE) {
26 nr_faces = 6;
27 } else
28 if (pt->target == PIPE_TEXTURE_3D) {
29 nr_faces = pt->depth0;
30 } else {
31 nr_faces = 1;
32 }
33
34 for (l = 0; l <= pt->last_level; l++) {
35 if (wide_pitch && (pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR))
36 mt->level[l].pitch = align(util_format_get_stride(pt->format, pt->width0), 64);
37 else
38 mt->level[l].pitch = util_format_get_stride(pt->format, width);
39
40 mt->level[l].image_offset =
41 CALLOC(nr_faces, sizeof(unsigned));
42
43 width = u_minify(width, 1);
44 }
45
46 for (f = 0; f < nr_faces; f++) {
47 for (l = 0; l < pt->last_level; l++) {
48 mt->level[l].image_offset[f] = offset;
49
50 if (!(pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR) &&
51 u_minify(pt->width0, l + 1) > 1 && u_minify(pt->height0, l + 1) > 1)
52 offset += align(mt->level[l].pitch * u_minify(pt->height0, l), 64);
53 else
54 offset += mt->level[l].pitch * u_minify(pt->height0, l);
55 }
56
57 mt->level[l].image_offset[f] = offset;
58 offset += mt->level[l].pitch * u_minify(pt->height0, l);
59 }
60
61 mt->total_size = offset;
62 }
63
64 static struct pipe_texture *
65 nvfx_miptree_create(struct pipe_screen *pscreen, const struct pipe_texture *pt)
66 {
67 struct nvfx_miptree *mt;
68 unsigned buf_usage = PIPE_BUFFER_USAGE_PIXEL |
69 NOUVEAU_BUFFER_USAGE_TEXTURE;
70 static int no_swizzle = -1;
71 if(no_swizzle < 0)
72 no_swizzle = debug_get_bool_option("NOUVEAU_NO_SWIZZLE", FALSE);
73
74 mt = MALLOC(sizeof(struct nvfx_miptree));
75 if (!mt)
76 return NULL;
77 mt->base = *pt;
78 pipe_reference_init(&mt->base.reference, 1);
79 mt->base.screen = pscreen;
80
81 /* Swizzled textures must be POT */
82 if (pt->width0 & (pt->width0 - 1) ||
83 pt->height0 & (pt->height0 - 1))
84 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
85 else
86 if (pt->tex_usage & (PIPE_TEXTURE_USAGE_SCANOUT |
87 PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
88 PIPE_TEXTURE_USAGE_DEPTH_STENCIL))
89 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
90 else
91 if (pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC)
92 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
93 else {
94 switch (pt->format) {
95 case PIPE_FORMAT_B5G6R5_UNORM:
96 case PIPE_FORMAT_L8A8_UNORM:
97 case PIPE_FORMAT_A8_UNORM:
98 case PIPE_FORMAT_L8_UNORM:
99 case PIPE_FORMAT_I8_UNORM:
100 /* TODO: we can actually swizzle these formats on nv40, we
101 are just preserving the pre-unification behavior.
102 The whole 2D code is going to be rewritten anyway. */
103 if(nvfx_screen(pscreen)->is_nv4x) {
104 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
105 break;
106 }
107 /* TODO: Figure out which formats can be swizzled */
108 case PIPE_FORMAT_B8G8R8A8_UNORM:
109 case PIPE_FORMAT_B8G8R8X8_UNORM:
110 case PIPE_FORMAT_R16_SNORM:
111 {
112 if (no_swizzle)
113 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
114 break;
115 }
116 default:
117 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
118 }
119 }
120
121 if (pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC)
122 buf_usage |= PIPE_BUFFER_USAGE_CPU_READ_WRITE;
123
124 /* apparently we can't render to swizzled surfaces smaller than 64 bytes, so make them linear.
125 * If the user did not ask for a render target, they can still render to it, but it will cost them an extra copy.
126 * This also happens for small mipmaps of large textures. */
127 if (pt->tex_usage & PIPE_TEXTURE_USAGE_RENDER_TARGET && util_format_get_stride(pt->format, pt->width0) < 64)
128 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
129
130 nvfx_miptree_layout(mt);
131
132 mt->buffer = pscreen->buffer_create(pscreen, 256, buf_usage, mt->total_size);
133 if (!mt->buffer) {
134 FREE(mt);
135 return NULL;
136 }
137 mt->bo = nouveau_bo(mt->buffer);
138 return &mt->base;
139 }
140
141 static struct pipe_texture *
142 nvfx_miptree_blanket(struct pipe_screen *pscreen, const struct pipe_texture *pt,
143 const unsigned *stride, struct pipe_buffer *pb)
144 {
145 struct nvfx_miptree *mt;
146
147 /* Only supports 2D, non-mipmapped textures for the moment */
148 if (pt->target != PIPE_TEXTURE_2D || pt->last_level != 0 ||
149 pt->depth0 != 1)
150 return NULL;
151
152 mt = CALLOC_STRUCT(nvfx_miptree);
153 if (!mt)
154 return NULL;
155
156 mt->base = *pt;
157 pipe_reference_init(&mt->base.reference, 1);
158 mt->base.screen = pscreen;
159 mt->level[0].pitch = stride[0];
160 mt->level[0].image_offset = CALLOC(1, sizeof(unsigned));
161
162 /* Assume whoever created this buffer expects it to be linear for now */
163 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
164
165 pipe_buffer_reference(&mt->buffer, pb);
166 mt->bo = nouveau_bo(mt->buffer);
167 return &mt->base;
168 }
169
170 static void
171 nvfx_miptree_destroy(struct pipe_texture *pt)
172 {
173 struct nvfx_miptree *mt = (struct nvfx_miptree *)pt;
174 int l;
175
176 pipe_buffer_reference(&mt->buffer, NULL);
177 for (l = 0; l <= pt->last_level; l++) {
178 if (mt->level[l].image_offset)
179 FREE(mt->level[l].image_offset);
180 }
181
182 FREE(mt);
183 }
184
185 static struct pipe_surface *
186 nvfx_miptree_surface_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
187 unsigned face, unsigned level, unsigned zslice,
188 unsigned flags)
189 {
190 struct nvfx_miptree *mt = (struct nvfx_miptree *)pt;
191 struct nv04_surface *ns;
192
193 ns = CALLOC_STRUCT(nv04_surface);
194 if (!ns)
195 return NULL;
196 pipe_texture_reference(&ns->base.texture, pt);
197 ns->base.format = pt->format;
198 ns->base.width = u_minify(pt->width0, level);
199 ns->base.height = u_minify(pt->height0, level);
200 ns->base.usage = flags;
201 pipe_reference_init(&ns->base.reference, 1);
202 ns->base.face = face;
203 ns->base.level = level;
204 ns->base.zslice = zslice;
205 ns->pitch = mt->level[level].pitch;
206
207 if (pt->target == PIPE_TEXTURE_CUBE) {
208 ns->base.offset = mt->level[level].image_offset[face];
209 } else
210 if (pt->target == PIPE_TEXTURE_3D) {
211 ns->base.offset = mt->level[level].image_offset[zslice];
212 } else {
213 ns->base.offset = mt->level[level].image_offset[0];
214 }
215
216 /* create a linear temporary that we can render into if necessary.
217 * Note that ns->pitch is always a multiple of 64 for linear surfaces and swizzled surfaces are POT, so
218 * ns->pitch & 63 is equivalent to (ns->pitch < 64 && swizzled)*/
219 if((ns->pitch & 63) && (ns->base.usage & (PIPE_BUFFER_USAGE_GPU_WRITE | NOUVEAU_BUFFER_USAGE_NO_RENDER)) == PIPE_BUFFER_USAGE_GPU_WRITE)
220 return &nv04_surface_wrap_for_render(pscreen, ((struct nvfx_screen*)pscreen)->eng2d, ns)->base;
221
222 return &ns->base;
223 }
224
225 static void
226 nvfx_miptree_surface_del(struct pipe_surface *ps)
227 {
228 struct nv04_surface* ns = (struct nv04_surface*)ps;
229 if(ns->backing)
230 {
231 struct nvfx_screen* screen = (struct nvfx_screen*)ps->texture->screen;
232 if(ns->backing->base.usage & PIPE_BUFFER_USAGE_GPU_WRITE)
233 screen->eng2d->copy(screen->eng2d, &ns->backing->base, 0, 0, ps, 0, 0, ns->base.width, ns->base.height);
234 nvfx_miptree_surface_del(&ns->backing->base);
235 }
236
237 pipe_texture_reference(&ps->texture, NULL);
238 FREE(ps);
239 }
240
241 void
242 nvfx_screen_init_miptree_functions(struct pipe_screen *pscreen)
243 {
244 pscreen->texture_create = nvfx_miptree_create;
245 pscreen->texture_destroy = nvfx_miptree_destroy;
246 pscreen->get_tex_surface = nvfx_miptree_surface_new;
247 pscreen->tex_surface_destroy = nvfx_miptree_surface_del;
248
249 nouveau_screen(pscreen)->texture_blanket = nvfx_miptree_blanket;
250 }