radeon/uvd: recalculate dbp buffer size
[mesa.git] / src / gallium / state_trackers / xa / xa_context.c
1 /**********************************************************
2 * Copyright 2009-2011 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 *********************************************************
25 * Authors:
26 * Zack Rusin <zackr-at-vmware-dot-com>
27 * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
28 */
29 #include "xa_context.h"
30 #include "xa_priv.h"
31 #include "cso_cache/cso_context.h"
32 #include "util/u_inlines.h"
33 #include "util/u_rect.h"
34 #include "util/u_surface.h"
35 #include "pipe/p_context.h"
36
37 XA_EXPORT void
38 xa_context_flush(struct xa_context *ctx)
39 {
40 if (ctx->last_fence) {
41 struct pipe_screen *screen = ctx->xa->screen;
42 screen->fence_reference(screen, &ctx->last_fence, NULL);
43 }
44 ctx->pipe->flush(ctx->pipe, &ctx->last_fence, 0);
45 }
46
47 XA_EXPORT struct xa_context *
48 xa_context_default(struct xa_tracker *xa)
49 {
50 return xa->default_ctx;
51 }
52
53 XA_EXPORT struct xa_context *
54 xa_context_create(struct xa_tracker *xa)
55 {
56 struct xa_context *ctx = calloc(1, sizeof(*ctx));
57
58 ctx->xa = xa;
59 ctx->pipe = xa->screen->context_create(xa->screen, NULL);
60 ctx->cso = cso_create_context(ctx->pipe);
61 ctx->shaders = xa_shaders_create(ctx);
62 renderer_init_state(ctx);
63
64 return ctx;
65 }
66
67 XA_EXPORT void
68 xa_context_destroy(struct xa_context *r)
69 {
70 struct pipe_resource **vsbuf = &r->vs_const_buffer;
71 struct pipe_resource **fsbuf = &r->fs_const_buffer;
72
73 if (*vsbuf)
74 pipe_resource_reference(vsbuf, NULL);
75
76 if (*fsbuf)
77 pipe_resource_reference(fsbuf, NULL);
78
79 if (r->shaders) {
80 xa_shaders_destroy(r->shaders);
81 r->shaders = NULL;
82 }
83
84 xa_ctx_sampler_views_destroy(r);
85 if (r->srf)
86 pipe_surface_reference(&r->srf, NULL);
87
88 if (r->cso) {
89 cso_destroy_context(r->cso);
90 r->cso = NULL;
91 }
92
93 r->pipe->destroy(r->pipe);
94 }
95
96 XA_EXPORT int
97 xa_surface_dma(struct xa_context *ctx,
98 struct xa_surface *srf,
99 void *data,
100 unsigned int pitch,
101 int to_surface, struct xa_box *boxes, unsigned int num_boxes)
102 {
103 struct pipe_transfer *transfer;
104 void *map;
105 int w, h, i;
106 enum pipe_transfer_usage transfer_direction;
107 struct pipe_context *pipe = ctx->pipe;
108
109 transfer_direction = (to_surface ? PIPE_TRANSFER_WRITE :
110 PIPE_TRANSFER_READ);
111
112 for (i = 0; i < num_boxes; ++i, ++boxes) {
113 w = boxes->x2 - boxes->x1;
114 h = boxes->y2 - boxes->y1;
115
116 map = pipe_transfer_map(pipe, srf->tex, 0, 0,
117 transfer_direction, boxes->x1, boxes->y1,
118 w, h, &transfer);
119 if (!map)
120 return -XA_ERR_NORES;
121
122 if (to_surface) {
123 util_copy_rect(map, srf->tex->format, transfer->stride,
124 0, 0, w, h, data, pitch, boxes->x1, boxes->y1);
125 } else {
126 util_copy_rect(data, srf->tex->format, pitch,
127 boxes->x1, boxes->y1, w, h, map, transfer->stride, 0,
128 0);
129 }
130 pipe->transfer_unmap(pipe, transfer);
131 }
132 return XA_ERR_NONE;
133 }
134
135 XA_EXPORT void *
136 xa_surface_map(struct xa_context *ctx,
137 struct xa_surface *srf, unsigned int usage)
138 {
139 void *map;
140 unsigned int gallium_usage = 0;
141 struct pipe_context *pipe = ctx->pipe;
142
143 /*
144 * A surface may only have a single map.
145 */
146 if (srf->transfer)
147 return NULL;
148
149 if (usage & XA_MAP_READ)
150 gallium_usage |= PIPE_TRANSFER_READ;
151 if (usage & XA_MAP_WRITE)
152 gallium_usage |= PIPE_TRANSFER_WRITE;
153 if (usage & XA_MAP_MAP_DIRECTLY)
154 gallium_usage |= PIPE_TRANSFER_MAP_DIRECTLY;
155 if (usage & XA_MAP_UNSYNCHRONIZED)
156 gallium_usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
157 if (usage & XA_MAP_DONTBLOCK)
158 gallium_usage |= PIPE_TRANSFER_DONTBLOCK;
159 if (usage & XA_MAP_DISCARD_WHOLE_RESOURCE)
160 gallium_usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
161
162 if (!(gallium_usage & (PIPE_TRANSFER_READ_WRITE)))
163 return NULL;
164
165 map = pipe_transfer_map(pipe, srf->tex, 0, 0,
166 gallium_usage, 0, 0,
167 srf->tex->width0, srf->tex->height0,
168 &srf->transfer);
169 if (!map)
170 return NULL;
171
172 srf->mapping_pipe = pipe;
173 return map;
174 }
175
176 XA_EXPORT void
177 xa_surface_unmap(struct xa_surface *srf)
178 {
179 if (srf->transfer) {
180 struct pipe_context *pipe = srf->mapping_pipe;
181
182 pipe->transfer_unmap(pipe, srf->transfer);
183 srf->transfer = NULL;
184 }
185 }
186
187 int
188 xa_ctx_srf_create(struct xa_context *ctx, struct xa_surface *dst)
189 {
190 struct pipe_screen *screen = ctx->pipe->screen;
191 struct pipe_surface srf_templ;
192
193 /*
194 * Cache surfaces unless we change render target
195 */
196 if (ctx->srf) {
197 if (ctx->srf->texture == dst->tex)
198 return XA_ERR_NONE;
199
200 pipe_surface_reference(&ctx->srf, NULL);
201 }
202
203 if (!screen->is_format_supported(screen, dst->tex->format,
204 PIPE_TEXTURE_2D, 0,
205 PIPE_BIND_RENDER_TARGET))
206 return -XA_ERR_INVAL;
207
208 u_surface_default_template(&srf_templ, dst->tex);
209 ctx->srf = ctx->pipe->create_surface(ctx->pipe, dst->tex, &srf_templ);
210 if (!ctx->srf)
211 return -XA_ERR_NORES;
212
213 return XA_ERR_NONE;
214 }
215
216 void
217 xa_ctx_srf_destroy(struct xa_context *ctx)
218 {
219 /*
220 * Cache surfaces unless we change render target.
221 * Final destruction on context destroy.
222 */
223 }
224
225 XA_EXPORT int
226 xa_copy_prepare(struct xa_context *ctx,
227 struct xa_surface *dst, struct xa_surface *src)
228 {
229 if (src == dst)
230 return -XA_ERR_INVAL;
231
232 if (src->tex->format != dst->tex->format) {
233 int ret = xa_ctx_srf_create(ctx, dst);
234 if (ret != XA_ERR_NONE)
235 return ret;
236 renderer_copy_prepare(ctx, ctx->srf, src->tex,
237 src->fdesc.xa_format,
238 dst->fdesc.xa_format);
239 ctx->simple_copy = 0;
240 } else
241 ctx->simple_copy = 1;
242
243 ctx->src = src;
244 ctx->dst = dst;
245 xa_ctx_srf_destroy(ctx);
246
247 return 0;
248 }
249
250 XA_EXPORT void
251 xa_copy(struct xa_context *ctx,
252 int dx, int dy, int sx, int sy, int width, int height)
253 {
254 struct pipe_box src_box;
255
256 xa_scissor_update(ctx, dx, dy, dx + width, dy + height);
257
258 if (ctx->simple_copy) {
259 u_box_2d(sx, sy, width, height, &src_box);
260 ctx->pipe->resource_copy_region(ctx->pipe,
261 ctx->dst->tex, 0, dx, dy, 0,
262 ctx->src->tex,
263 0, &src_box);
264 } else
265 renderer_copy(ctx, dx, dy, sx, sy, width, height,
266 (float) ctx->src->tex->width0,
267 (float) ctx->src->tex->height0);
268 }
269
270 XA_EXPORT void
271 xa_copy_done(struct xa_context *ctx)
272 {
273 if (!ctx->simple_copy) {
274 renderer_draw_flush(ctx);
275 }
276 }
277
278 static void
279 bind_solid_blend_state(struct xa_context *ctx)
280 {
281 struct pipe_blend_state blend;
282
283 memset(&blend, 0, sizeof(struct pipe_blend_state));
284 blend.rt[0].blend_enable = 0;
285 blend.rt[0].colormask = PIPE_MASK_RGBA;
286
287 blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
288 blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
289 blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
290 blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
291
292 cso_set_blend(ctx->cso, &blend);
293 }
294
295 XA_EXPORT int
296 xa_solid_prepare(struct xa_context *ctx, struct xa_surface *dst,
297 uint32_t fg)
298 {
299 unsigned vs_traits, fs_traits;
300 struct xa_shader shader;
301 int ret;
302
303 ret = xa_ctx_srf_create(ctx, dst);
304 if (ret != XA_ERR_NONE)
305 return ret;
306
307 if (ctx->srf->format == PIPE_FORMAT_L8_UNORM)
308 xa_pixel_to_float4_a8(fg, ctx->solid_color);
309 else
310 xa_pixel_to_float4(fg, ctx->solid_color);
311 ctx->has_solid_color = 1;
312
313 ctx->dst = dst;
314
315 #if 0
316 debug_printf("Color Pixel=(%d, %d, %d, %d), RGBA=(%f, %f, %f, %f)\n",
317 (fg >> 24) & 0xff, (fg >> 16) & 0xff,
318 (fg >> 8) & 0xff, (fg >> 0) & 0xff,
319 exa->solid_color[0], exa->solid_color[1],
320 exa->solid_color[2], exa->solid_color[3]);
321 #endif
322
323 vs_traits = VS_SOLID_FILL;
324 fs_traits = FS_SOLID_FILL;
325
326 renderer_bind_destination(ctx, ctx->srf);
327 bind_solid_blend_state(ctx);
328 cso_set_samplers(ctx->cso, PIPE_SHADER_FRAGMENT, 0, NULL);
329 cso_set_sampler_views(ctx->cso, PIPE_SHADER_FRAGMENT, 0, NULL);
330
331 shader = xa_shaders_get(ctx->shaders, vs_traits, fs_traits);
332 cso_set_vertex_shader_handle(ctx->cso, shader.vs);
333 cso_set_fragment_shader_handle(ctx->cso, shader.fs);
334
335 renderer_begin_solid(ctx);
336
337 xa_ctx_srf_destroy(ctx);
338 return XA_ERR_NONE;
339 }
340
341 XA_EXPORT void
342 xa_solid(struct xa_context *ctx, int x, int y, int width, int height)
343 {
344 xa_scissor_update(ctx, x, y, x + width, y + height);
345 renderer_solid(ctx, x, y, x + width, y + height, ctx->solid_color);
346 }
347
348 XA_EXPORT void
349 xa_solid_done(struct xa_context *ctx)
350 {
351 renderer_draw_flush(ctx);
352 ctx->comp = NULL;
353 ctx->has_solid_color = FALSE;
354 ctx->num_bound_samplers = 0;
355 }
356
357 XA_EXPORT struct xa_fence *
358 xa_fence_get(struct xa_context *ctx)
359 {
360 struct xa_fence *fence = calloc(1, sizeof(*fence));
361 struct pipe_screen *screen = ctx->xa->screen;
362
363 if (!fence)
364 return NULL;
365
366 fence->xa = ctx->xa;
367
368 if (ctx->last_fence == NULL)
369 fence->pipe_fence = NULL;
370 else
371 screen->fence_reference(screen, &fence->pipe_fence, ctx->last_fence);
372
373 return fence;
374 }
375
376 XA_EXPORT int
377 xa_fence_wait(struct xa_fence *fence, uint64_t timeout)
378 {
379 if (!fence)
380 return XA_ERR_NONE;
381
382 if (fence->pipe_fence) {
383 struct pipe_screen *screen = fence->xa->screen;
384 boolean timed_out;
385
386 timed_out = !screen->fence_finish(screen, fence->pipe_fence, timeout);
387 if (timed_out)
388 return -XA_ERR_BUSY;
389
390 screen->fence_reference(screen, &fence->pipe_fence, NULL);
391 }
392 return XA_ERR_NONE;
393 }
394
395 XA_EXPORT void
396 xa_fence_destroy(struct xa_fence *fence)
397 {
398 if (!fence)
399 return;
400
401 if (fence->pipe_fence) {
402 struct pipe_screen *screen = fence->xa->screen;
403
404 screen->fence_reference(screen, &fence->pipe_fence, NULL);
405 }
406
407 free(fence);
408 }
409
410 void
411 xa_ctx_sampler_views_destroy(struct xa_context *ctx)
412 {
413 int i;
414
415 for (i = 0; i < ctx->num_bound_samplers; ++i)
416 pipe_sampler_view_reference(&ctx->bound_sampler_views[i], NULL);
417 ctx->num_bound_samplers = 0;
418 }