swr/rast: add SwrInit() to init backend/memory tables
[mesa.git] / src / gallium / drivers / swr / swr_context.cpp
1 /****************************************************************************
2 * Copyright (C) 2015 Intel Corporation. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 ***************************************************************************/
23
24 #include "swr_context.h"
25 #include "swr_memory.h"
26 #include "swr_screen.h"
27 #include "swr_resource.h"
28 #include "swr_scratch.h"
29 #include "swr_query.h"
30 #include "swr_fence.h"
31
32 #include "util/u_memory.h"
33 #include "util/u_inlines.h"
34 #include "util/u_format.h"
35 #include "util/u_atomic.h"
36 #include "util/u_upload_mgr.h"
37 #include "util/u_transfer.h"
38 #include "util/u_surface.h"
39
40 #include "api.h"
41 #include "backend.h"
42
43 static struct pipe_surface *
44 swr_create_surface(struct pipe_context *pipe,
45 struct pipe_resource *pt,
46 const struct pipe_surface *surf_tmpl)
47 {
48 struct pipe_surface *ps;
49
50 ps = CALLOC_STRUCT(pipe_surface);
51 if (ps) {
52 pipe_reference_init(&ps->reference, 1);
53 pipe_resource_reference(&ps->texture, pt);
54 ps->context = pipe;
55 ps->format = surf_tmpl->format;
56 if (pt->target != PIPE_BUFFER) {
57 assert(surf_tmpl->u.tex.level <= pt->last_level);
58 ps->width = u_minify(pt->width0, surf_tmpl->u.tex.level);
59 ps->height = u_minify(pt->height0, surf_tmpl->u.tex.level);
60 ps->u.tex.level = surf_tmpl->u.tex.level;
61 ps->u.tex.first_layer = surf_tmpl->u.tex.first_layer;
62 ps->u.tex.last_layer = surf_tmpl->u.tex.last_layer;
63 } else {
64 /* setting width as number of elements should get us correct
65 * renderbuffer width */
66 ps->width = surf_tmpl->u.buf.last_element
67 - surf_tmpl->u.buf.first_element + 1;
68 ps->height = pt->height0;
69 ps->u.buf.first_element = surf_tmpl->u.buf.first_element;
70 ps->u.buf.last_element = surf_tmpl->u.buf.last_element;
71 assert(ps->u.buf.first_element <= ps->u.buf.last_element);
72 assert(ps->u.buf.last_element < ps->width);
73 }
74 }
75 return ps;
76 }
77
78 static void
79 swr_surface_destroy(struct pipe_context *pipe, struct pipe_surface *surf)
80 {
81 assert(surf->texture);
82 struct pipe_resource *resource = surf->texture;
83
84 /* If the resource has been drawn to, store tiles. */
85 swr_store_dirty_resource(pipe, resource, SWR_TILE_RESOLVED);
86
87 pipe_resource_reference(&resource, NULL);
88 FREE(surf);
89 }
90
91
92 static void *
93 swr_transfer_map(struct pipe_context *pipe,
94 struct pipe_resource *resource,
95 unsigned level,
96 unsigned usage,
97 const struct pipe_box *box,
98 struct pipe_transfer **transfer)
99 {
100 struct swr_screen *screen = swr_screen(pipe->screen);
101 struct swr_resource *spr = swr_resource(resource);
102 struct pipe_transfer *pt;
103 enum pipe_format format = resource->format;
104
105 assert(resource);
106 assert(level <= resource->last_level);
107
108 /* If mapping an attached rendertarget, store tiles to surface and set
109 * postStoreTileState to SWR_TILE_INVALID so tiles get reloaded on next use
110 * and nothing needs to be done at unmap. */
111 swr_store_dirty_resource(pipe, resource, SWR_TILE_INVALID);
112
113 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
114 /* If resource is in use, finish fence before mapping.
115 * Unless requested not to block, then if not done return NULL map */
116 if (usage & PIPE_TRANSFER_DONTBLOCK) {
117 if (swr_is_fence_pending(screen->flush_fence))
118 return NULL;
119 } else {
120 if (spr->status) {
121 /* But, if there's no fence pending, submit one.
122 * XXX: Remove once draw timestamps are finished. */
123 if (!swr_is_fence_pending(screen->flush_fence))
124 swr_fence_submit(swr_context(pipe), screen->flush_fence);
125
126 swr_fence_finish(pipe->screen, NULL, screen->flush_fence, 0);
127 swr_resource_unused(resource);
128 }
129 }
130 }
131
132 pt = CALLOC_STRUCT(pipe_transfer);
133 if (!pt)
134 return NULL;
135 pipe_resource_reference(&pt->resource, resource);
136 pt->usage = (pipe_transfer_usage)usage;
137 pt->level = level;
138 pt->box = *box;
139 pt->stride = spr->swr.pitch;
140 pt->layer_stride = spr->swr.qpitch * spr->swr.pitch;
141
142 /* if we're mapping the depth/stencil, copy in stencil for the section
143 * being read in
144 */
145 if (usage & PIPE_TRANSFER_READ && spr->has_depth && spr->has_stencil) {
146 size_t zbase, sbase;
147 for (int z = box->z; z < box->z + box->depth; z++) {
148 zbase = (z * spr->swr.qpitch + box->y) * spr->swr.pitch +
149 spr->mip_offsets[level];
150 sbase = (z * spr->secondary.qpitch + box->y) * spr->secondary.pitch +
151 spr->secondary_mip_offsets[level];
152 for (int y = box->y; y < box->y + box->height; y++) {
153 if (spr->base.format == PIPE_FORMAT_Z24_UNORM_S8_UINT) {
154 for (int x = box->x; x < box->x + box->width; x++)
155 spr->swr.pBaseAddress[zbase + 4 * x + 3] =
156 spr->secondary.pBaseAddress[sbase + x];
157 } else if (spr->base.format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
158 for (int x = box->x; x < box->x + box->width; x++)
159 spr->swr.pBaseAddress[zbase + 8 * x + 4] =
160 spr->secondary.pBaseAddress[sbase + x];
161 }
162 zbase += spr->swr.pitch;
163 sbase += spr->secondary.pitch;
164 }
165 }
166 }
167
168 unsigned offset = box->z * pt->layer_stride +
169 util_format_get_nblocksy(format, box->y) * pt->stride +
170 util_format_get_stride(format, box->x);
171
172 *transfer = pt;
173
174 return spr->swr.pBaseAddress + offset + spr->mip_offsets[level];
175 }
176
177 static void
178 swr_transfer_flush_region(struct pipe_context *pipe,
179 struct pipe_transfer *transfer,
180 const struct pipe_box *flush_box)
181 {
182 assert(transfer->resource);
183 assert(transfer->usage & PIPE_TRANSFER_WRITE);
184
185 struct swr_resource *spr = swr_resource(transfer->resource);
186 if (!spr->has_depth || !spr->has_stencil)
187 return;
188
189 size_t zbase, sbase;
190 struct pipe_box box = *flush_box;
191 box.x += transfer->box.x;
192 box.y += transfer->box.y;
193 box.z += transfer->box.z;
194 for (int z = box.z; z < box.z + box.depth; z++) {
195 zbase = (z * spr->swr.qpitch + box.y) * spr->swr.pitch +
196 spr->mip_offsets[transfer->level];
197 sbase = (z * spr->secondary.qpitch + box.y) * spr->secondary.pitch +
198 spr->secondary_mip_offsets[transfer->level];
199 for (int y = box.y; y < box.y + box.height; y++) {
200 if (spr->base.format == PIPE_FORMAT_Z24_UNORM_S8_UINT) {
201 for (int x = box.x; x < box.x + box.width; x++)
202 spr->secondary.pBaseAddress[sbase + x] =
203 spr->swr.pBaseAddress[zbase + 4 * x + 3];
204 } else if (spr->base.format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
205 for (int x = box.x; x < box.x + box.width; x++)
206 spr->secondary.pBaseAddress[sbase + x] =
207 spr->swr.pBaseAddress[zbase + 8 * x + 4];
208 }
209 zbase += spr->swr.pitch;
210 sbase += spr->secondary.pitch;
211 }
212 }
213 }
214
215 static void
216 swr_transfer_unmap(struct pipe_context *pipe, struct pipe_transfer *transfer)
217 {
218 assert(transfer->resource);
219
220 struct swr_resource *spr = swr_resource(transfer->resource);
221 /* if we're mapping the depth/stencil, copy in stencil for the section
222 * being written out
223 */
224 if (transfer->usage & PIPE_TRANSFER_WRITE &&
225 !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT) &&
226 spr->has_depth && spr->has_stencil) {
227 struct pipe_box box;
228 u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height,
229 transfer->box.depth, &box);
230 swr_transfer_flush_region(pipe, transfer, &box);
231 }
232
233 pipe_resource_reference(&transfer->resource, NULL);
234 FREE(transfer);
235 }
236
237
238 static void
239 swr_resource_copy(struct pipe_context *pipe,
240 struct pipe_resource *dst,
241 unsigned dst_level,
242 unsigned dstx,
243 unsigned dsty,
244 unsigned dstz,
245 struct pipe_resource *src,
246 unsigned src_level,
247 const struct pipe_box *src_box)
248 {
249 struct swr_screen *screen = swr_screen(pipe->screen);
250
251 /* If either the src or dst is a renderTarget, store tiles before copy */
252 swr_store_dirty_resource(pipe, src, SWR_TILE_RESOLVED);
253 swr_store_dirty_resource(pipe, dst, SWR_TILE_RESOLVED);
254
255 swr_fence_finish(pipe->screen, NULL, screen->flush_fence, 0);
256 swr_resource_unused(src);
257 swr_resource_unused(dst);
258
259 if ((dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER)
260 || (dst->target != PIPE_BUFFER && src->target != PIPE_BUFFER)) {
261 util_resource_copy_region(
262 pipe, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box);
263 return;
264 }
265
266 debug_printf("unhandled swr_resource_copy\n");
267 }
268
269
270 /* XXX: This resolve is incomplete and suboptimal. It will be removed once the
271 * pipelined resolve blit works. */
272 void
273 swr_do_msaa_resolve(struct pipe_resource *src_resource,
274 struct pipe_resource *dst_resource)
275 {
276 /* This is a pretty dumb inline resolve. It only supports 8-bit formats
277 * (ex RGBA8/BGRA8) - which are most common display formats anyway.
278 */
279
280 /* quick check for 8-bit and number of components */
281 uint8_t bits_per_component =
282 util_format_get_component_bits(src_resource->format,
283 UTIL_FORMAT_COLORSPACE_RGB, 0);
284
285 /* Unsupported resolve format */
286 assert(src_resource->format == dst_resource->format);
287 assert(bits_per_component == 8);
288 if ((src_resource->format != dst_resource->format) ||
289 (bits_per_component != 8)) {
290 return;
291 }
292
293 uint8_t src_num_comps = util_format_get_nr_components(src_resource->format);
294
295 SWR_SURFACE_STATE *src_surface = &swr_resource(src_resource)->swr;
296 SWR_SURFACE_STATE *dst_surface = &swr_resource(dst_resource)->swr;
297
298 uint32_t *src, *dst, offset;
299 uint32_t num_samples = src_surface->numSamples;
300 float recip_num_samples = 1.0f / num_samples;
301 for (uint32_t y = 0; y < src_surface->height; y++) {
302 for (uint32_t x = 0; x < src_surface->width; x++) {
303 float r = 0.0f;
304 float g = 0.0f;
305 float b = 0.0f;
306 float a = 0.0f;
307 for (uint32_t sampleNum = 0; sampleNum < num_samples; sampleNum++) {
308 offset = ComputeSurfaceOffset<false>(x, y, 0, 0, sampleNum, 0, src_surface);
309 src = (uint32_t *) src_surface->pBaseAddress + offset/src_num_comps;
310 const uint32_t sample = *src;
311 r += (float)((sample >> 24) & 0xff) / 255.0f * recip_num_samples;
312 g += (float)((sample >> 16) & 0xff) / 255.0f * recip_num_samples;
313 b += (float)((sample >> 8) & 0xff) / 255.0f * recip_num_samples;
314 a += (float)((sample ) & 0xff) / 255.0f * recip_num_samples;
315 }
316 uint32_t result = 0;
317 result = ((uint8_t)(r * 255.0f) & 0xff) << 24;
318 result |= ((uint8_t)(g * 255.0f) & 0xff) << 16;
319 result |= ((uint8_t)(b * 255.0f) & 0xff) << 8;
320 result |= ((uint8_t)(a * 255.0f) & 0xff);
321 offset = ComputeSurfaceOffset<false>(x, y, 0, 0, 0, 0, src_surface);
322 dst = (uint32_t *) dst_surface->pBaseAddress + offset/src_num_comps;
323 *dst = result;
324 }
325 }
326 }
327
328
329 static void
330 swr_blit(struct pipe_context *pipe, const struct pipe_blit_info *blit_info)
331 {
332 struct swr_context *ctx = swr_context(pipe);
333 /* Make a copy of the const blit_info, so we can modify it */
334 struct pipe_blit_info info = *blit_info;
335
336 if (info.render_condition_enable && !swr_check_render_cond(pipe))
337 return;
338
339 if (info.src.resource->nr_samples > 1 && info.dst.resource->nr_samples <= 1
340 && !util_format_is_depth_or_stencil(info.src.resource->format)
341 && !util_format_is_pure_integer(info.src.resource->format)) {
342 debug_printf("swr_blit: color resolve : %d -> %d\n",
343 info.src.resource->nr_samples, info.dst.resource->nr_samples);
344
345 /* Because the resolve is being done inline (not pipelined),
346 * resources need to be stored out of hottiles and the pipeline empty.
347 *
348 * Resources are marked unused following fence finish because all
349 * pipeline operations are complete. Validation of the blit will mark
350 * them are read/write again.
351 */
352 swr_store_dirty_resource(pipe, info.src.resource, SWR_TILE_RESOLVED);
353 swr_store_dirty_resource(pipe, info.dst.resource, SWR_TILE_RESOLVED);
354 swr_fence_finish(pipe->screen, NULL, swr_screen(pipe->screen)->flush_fence, 0);
355 swr_resource_unused(info.src.resource);
356 swr_resource_unused(info.dst.resource);
357
358 struct pipe_resource *src_resource = info.src.resource;
359 struct pipe_resource *resolve_target =
360 swr_resource(src_resource)->resolve_target;
361
362 /* Inline resolve samples into resolve target resource, then continue
363 * the blit. */
364 swr_do_msaa_resolve(src_resource, resolve_target);
365
366 /* The resolve target becomes the new source for the blit. */
367 info.src.resource = resolve_target;
368 }
369
370 if (util_try_blit_via_copy_region(pipe, &info)) {
371 return; /* done */
372 }
373
374 if (info.mask & PIPE_MASK_S) {
375 debug_printf("swr: cannot blit stencil, skipping\n");
376 info.mask &= ~PIPE_MASK_S;
377 }
378
379 if (!util_blitter_is_blit_supported(ctx->blitter, &info)) {
380 debug_printf("swr: blit unsupported %s -> %s\n",
381 util_format_short_name(info.src.resource->format),
382 util_format_short_name(info.dst.resource->format));
383 return;
384 }
385
386 if (ctx->active_queries) {
387 SwrEnableStatsFE(ctx->swrContext, FALSE);
388 SwrEnableStatsBE(ctx->swrContext, FALSE);
389 }
390
391 util_blitter_save_vertex_buffer_slot(ctx->blitter, ctx->vertex_buffer);
392 util_blitter_save_vertex_elements(ctx->blitter, (void *)ctx->velems);
393 util_blitter_save_vertex_shader(ctx->blitter, (void *)ctx->vs);
394 util_blitter_save_geometry_shader(ctx->blitter, (void*)ctx->gs);
395 util_blitter_save_so_targets(
396 ctx->blitter,
397 ctx->num_so_targets,
398 (struct pipe_stream_output_target **)ctx->so_targets);
399 util_blitter_save_rasterizer(ctx->blitter, (void *)ctx->rasterizer);
400 util_blitter_save_viewport(ctx->blitter, &ctx->viewport);
401 util_blitter_save_scissor(ctx->blitter, &ctx->scissor);
402 util_blitter_save_fragment_shader(ctx->blitter, ctx->fs);
403 util_blitter_save_blend(ctx->blitter, (void *)ctx->blend);
404 util_blitter_save_depth_stencil_alpha(ctx->blitter,
405 (void *)ctx->depth_stencil);
406 util_blitter_save_stencil_ref(ctx->blitter, &ctx->stencil_ref);
407 util_blitter_save_sample_mask(ctx->blitter, ctx->sample_mask);
408 util_blitter_save_framebuffer(ctx->blitter, &ctx->framebuffer);
409 util_blitter_save_fragment_sampler_states(
410 ctx->blitter,
411 ctx->num_samplers[PIPE_SHADER_FRAGMENT],
412 (void **)ctx->samplers[PIPE_SHADER_FRAGMENT]);
413 util_blitter_save_fragment_sampler_views(
414 ctx->blitter,
415 ctx->num_sampler_views[PIPE_SHADER_FRAGMENT],
416 ctx->sampler_views[PIPE_SHADER_FRAGMENT]);
417 util_blitter_save_render_condition(ctx->blitter,
418 ctx->render_cond_query,
419 ctx->render_cond_cond,
420 ctx->render_cond_mode);
421
422 util_blitter_blit(ctx->blitter, &info);
423
424 if (ctx->active_queries) {
425 SwrEnableStatsFE(ctx->swrContext, TRUE);
426 SwrEnableStatsBE(ctx->swrContext, TRUE);
427 }
428 }
429
430
431 static void
432 swr_destroy(struct pipe_context *pipe)
433 {
434 struct swr_context *ctx = swr_context(pipe);
435 struct swr_screen *screen = swr_screen(pipe->screen);
436
437 if (ctx->blitter)
438 util_blitter_destroy(ctx->blitter);
439
440 for (unsigned i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
441 pipe_surface_reference(&ctx->framebuffer.cbufs[i], NULL);
442 }
443
444 pipe_surface_reference(&ctx->framebuffer.zsbuf, NULL);
445
446 for (unsigned i = 0; i < ARRAY_SIZE(ctx->sampler_views[0]); i++) {
447 pipe_sampler_view_reference(&ctx->sampler_views[PIPE_SHADER_FRAGMENT][i], NULL);
448 }
449
450 for (unsigned i = 0; i < ARRAY_SIZE(ctx->sampler_views[0]); i++) {
451 pipe_sampler_view_reference(&ctx->sampler_views[PIPE_SHADER_VERTEX][i], NULL);
452 }
453
454 if (ctx->pipe.stream_uploader)
455 u_upload_destroy(ctx->pipe.stream_uploader);
456
457 /* Idle core after destroying buffer resources, but before deleting
458 * context. Destroying resources has potentially called StoreTiles.*/
459 SwrWaitForIdle(ctx->swrContext);
460
461 if (ctx->swrContext)
462 SwrDestroyContext(ctx->swrContext);
463
464 delete ctx->blendJIT;
465
466 swr_destroy_scratch_buffers(ctx);
467
468 /* Only update screen->pipe if current context is being destroyed */
469 assert(screen);
470 if (screen->pipe == pipe)
471 screen->pipe = NULL;
472
473 AlignedFree(ctx);
474 }
475
476
477 static void
478 swr_render_condition(struct pipe_context *pipe,
479 struct pipe_query *query,
480 boolean condition,
481 enum pipe_render_cond_flag mode)
482 {
483 struct swr_context *ctx = swr_context(pipe);
484
485 ctx->render_cond_query = query;
486 ctx->render_cond_mode = mode;
487 ctx->render_cond_cond = condition;
488 }
489
490 static void
491 swr_UpdateStats(HANDLE hPrivateContext, const SWR_STATS *pStats)
492 {
493 swr_draw_context *pDC = (swr_draw_context*)hPrivateContext;
494
495 if (!pDC)
496 return;
497
498 struct swr_query_result *pqr = (struct swr_query_result *)pDC->pStats;
499
500 SWR_STATS *pSwrStats = &pqr->core;
501
502 pSwrStats->DepthPassCount += pStats->DepthPassCount;
503 pSwrStats->PsInvocations += pStats->PsInvocations;
504 pSwrStats->CsInvocations += pStats->CsInvocations;
505 }
506
507 static void
508 swr_UpdateStatsFE(HANDLE hPrivateContext, const SWR_STATS_FE *pStats)
509 {
510 swr_draw_context *pDC = (swr_draw_context*)hPrivateContext;
511
512 if (!pDC)
513 return;
514
515 struct swr_query_result *pqr = (struct swr_query_result *)pDC->pStats;
516
517 SWR_STATS_FE *pSwrStats = &pqr->coreFE;
518 p_atomic_add(&pSwrStats->IaVertices, pStats->IaVertices);
519 p_atomic_add(&pSwrStats->IaPrimitives, pStats->IaPrimitives);
520 p_atomic_add(&pSwrStats->VsInvocations, pStats->VsInvocations);
521 p_atomic_add(&pSwrStats->HsInvocations, pStats->HsInvocations);
522 p_atomic_add(&pSwrStats->DsInvocations, pStats->DsInvocations);
523 p_atomic_add(&pSwrStats->GsInvocations, pStats->GsInvocations);
524 p_atomic_add(&pSwrStats->CInvocations, pStats->CInvocations);
525 p_atomic_add(&pSwrStats->CPrimitives, pStats->CPrimitives);
526 p_atomic_add(&pSwrStats->GsPrimitives, pStats->GsPrimitives);
527
528 for (unsigned i = 0; i < 4; i++) {
529 p_atomic_add(&pSwrStats->SoPrimStorageNeeded[i],
530 pStats->SoPrimStorageNeeded[i]);
531 p_atomic_add(&pSwrStats->SoNumPrimsWritten[i],
532 pStats->SoNumPrimsWritten[i]);
533 }
534 }
535
536 struct pipe_context *
537 swr_create_context(struct pipe_screen *p_screen, void *priv, unsigned flags)
538 {
539 struct swr_context *ctx = (struct swr_context *)
540 AlignedMalloc(sizeof(struct swr_context), KNOB_SIMD_BYTES);
541 memset(ctx, 0, sizeof(struct swr_context));
542
543 ctx->blendJIT =
544 new std::unordered_map<BLEND_COMPILE_STATE, PFN_BLEND_JIT_FUNC>;
545
546 SWR_CREATECONTEXT_INFO createInfo;
547 memset(&createInfo, 0, sizeof(createInfo));
548 createInfo.privateStateSize = sizeof(swr_draw_context);
549 createInfo.pfnLoadTile = swr_LoadHotTile;
550 createInfo.pfnStoreTile = swr_StoreHotTile;
551 createInfo.pfnClearTile = swr_StoreHotTileClear;
552 createInfo.pfnUpdateStats = swr_UpdateStats;
553 createInfo.pfnUpdateStatsFE = swr_UpdateStatsFE;
554 ctx->swrContext = SwrCreateContext(&createInfo);
555
556 SwrInit();
557
558 if (ctx->swrContext == NULL)
559 goto fail;
560
561 ctx->pipe.screen = p_screen;
562 ctx->pipe.destroy = swr_destroy;
563 ctx->pipe.priv = priv;
564 ctx->pipe.create_surface = swr_create_surface;
565 ctx->pipe.surface_destroy = swr_surface_destroy;
566 ctx->pipe.transfer_map = swr_transfer_map;
567 ctx->pipe.transfer_unmap = swr_transfer_unmap;
568 ctx->pipe.transfer_flush_region = swr_transfer_flush_region;
569
570 ctx->pipe.buffer_subdata = u_default_buffer_subdata;
571 ctx->pipe.texture_subdata = u_default_texture_subdata;
572
573 ctx->pipe.clear_texture = util_clear_texture;
574 ctx->pipe.resource_copy_region = swr_resource_copy;
575 ctx->pipe.render_condition = swr_render_condition;
576
577 swr_state_init(&ctx->pipe);
578 swr_clear_init(&ctx->pipe);
579 swr_draw_init(&ctx->pipe);
580 swr_query_init(&ctx->pipe);
581
582 ctx->pipe.stream_uploader = u_upload_create_default(&ctx->pipe);
583 if (!ctx->pipe.stream_uploader)
584 goto fail;
585 ctx->pipe.const_uploader = ctx->pipe.stream_uploader;
586
587 ctx->pipe.blit = swr_blit;
588 ctx->blitter = util_blitter_create(&ctx->pipe);
589 if (!ctx->blitter)
590 goto fail;
591
592 swr_init_scratch_buffers(ctx);
593
594 return &ctx->pipe;
595
596 fail:
597 /* Should really validate the init steps and fail gracefully */
598 swr_destroy(&ctx->pipe);
599 return NULL;
600 }