gallium/swr: Fix compilation warnings
[mesa.git] / src / gallium / drivers / swr / swr_context.cpp
1 /****************************************************************************
2 * Copyright (C) 2015 Intel Corporation. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 ***************************************************************************/
23
24 #include "swr_context.h"
25 #include "swr_memory.h"
26 #include "swr_screen.h"
27 #include "swr_resource.h"
28 #include "swr_scratch.h"
29 #include "swr_query.h"
30 #include "swr_fence.h"
31
32 #include "util/u_memory.h"
33 #include "util/u_inlines.h"
34 #include "util/format/u_format.h"
35 #include "util/u_atomic.h"
36 #include "util/u_upload_mgr.h"
37 #include "util/u_transfer.h"
38 #include "util/u_surface.h"
39
40 #include "api.h"
41 #include "backend.h"
42 #include "knobs.h"
43
44 static struct pipe_surface *
45 swr_create_surface(struct pipe_context *pipe,
46 struct pipe_resource *pt,
47 const struct pipe_surface *surf_tmpl)
48 {
49 struct pipe_surface *ps;
50
51 ps = CALLOC_STRUCT(pipe_surface);
52 if (ps) {
53 pipe_reference_init(&ps->reference, 1);
54 pipe_resource_reference(&ps->texture, pt);
55 ps->context = pipe;
56 ps->format = surf_tmpl->format;
57 if (pt->target != PIPE_BUFFER) {
58 assert(surf_tmpl->u.tex.level <= pt->last_level);
59 ps->width = u_minify(pt->width0, surf_tmpl->u.tex.level);
60 ps->height = u_minify(pt->height0, surf_tmpl->u.tex.level);
61 ps->u.tex.level = surf_tmpl->u.tex.level;
62 ps->u.tex.first_layer = surf_tmpl->u.tex.first_layer;
63 ps->u.tex.last_layer = surf_tmpl->u.tex.last_layer;
64 } else {
65 /* setting width as number of elements should get us correct
66 * renderbuffer width */
67 ps->width = surf_tmpl->u.buf.last_element
68 - surf_tmpl->u.buf.first_element + 1;
69 ps->height = pt->height0;
70 ps->u.buf.first_element = surf_tmpl->u.buf.first_element;
71 ps->u.buf.last_element = surf_tmpl->u.buf.last_element;
72 assert(ps->u.buf.first_element <= ps->u.buf.last_element);
73 assert(ps->u.buf.last_element < ps->width);
74 }
75 }
76 return ps;
77 }
78
79 static void
80 swr_surface_destroy(struct pipe_context *pipe, struct pipe_surface *surf)
81 {
82 assert(surf->texture);
83 struct pipe_resource *resource = surf->texture;
84
85 /* If the resource has been drawn to, store tiles. */
86 swr_store_dirty_resource(pipe, resource, SWR_TILE_RESOLVED);
87
88 pipe_resource_reference(&resource, NULL);
89 FREE(surf);
90 }
91
92
93 static void *
94 swr_transfer_map(struct pipe_context *pipe,
95 struct pipe_resource *resource,
96 unsigned level,
97 unsigned usage,
98 const struct pipe_box *box,
99 struct pipe_transfer **transfer)
100 {
101 struct swr_screen *screen = swr_screen(pipe->screen);
102 struct swr_resource *spr = swr_resource(resource);
103 struct pipe_transfer *pt;
104 enum pipe_format format = resource->format;
105
106 assert(resource);
107 assert(level <= resource->last_level);
108
109 /* If mapping an attached rendertarget, store tiles to surface and set
110 * postStoreTileState to SWR_TILE_INVALID so tiles get reloaded on next use
111 * and nothing needs to be done at unmap. */
112 swr_store_dirty_resource(pipe, resource, SWR_TILE_INVALID);
113
114 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
115 /* If resource is in use, finish fence before mapping.
116 * Unless requested not to block, then if not done return NULL map */
117 if (usage & PIPE_TRANSFER_DONTBLOCK) {
118 if (swr_is_fence_pending(screen->flush_fence))
119 return NULL;
120 } else {
121 if (spr->status) {
122 /* But, if there's no fence pending, submit one.
123 * XXX: Remove once draw timestamps are finished. */
124 if (!swr_is_fence_pending(screen->flush_fence))
125 swr_fence_submit(swr_context(pipe), screen->flush_fence);
126
127 swr_fence_finish(pipe->screen, NULL, screen->flush_fence, 0);
128 swr_resource_unused(resource);
129 }
130 }
131 }
132
133 pt = CALLOC_STRUCT(pipe_transfer);
134 if (!pt)
135 return NULL;
136 pipe_resource_reference(&pt->resource, resource);
137 pt->usage = (pipe_transfer_usage)usage;
138 pt->level = level;
139 pt->box = *box;
140 pt->stride = spr->swr.pitch;
141 pt->layer_stride = spr->swr.qpitch * spr->swr.pitch;
142
143 /* if we're mapping the depth/stencil, copy in stencil for the section
144 * being read in
145 */
146 if (usage & PIPE_TRANSFER_READ && spr->has_depth && spr->has_stencil) {
147 size_t zbase, sbase;
148 for (int z = box->z; z < box->z + box->depth; z++) {
149 zbase = (z * spr->swr.qpitch + box->y) * spr->swr.pitch +
150 spr->mip_offsets[level];
151 sbase = (z * spr->secondary.qpitch + box->y) * spr->secondary.pitch +
152 spr->secondary_mip_offsets[level];
153 for (int y = box->y; y < box->y + box->height; y++) {
154 if (spr->base.format == PIPE_FORMAT_Z24_UNORM_S8_UINT) {
155 for (int x = box->x; x < box->x + box->width; x++)
156 ((uint8_t*)(spr->swr.xpBaseAddress))[zbase + 4 * x + 3] =
157 ((uint8_t*)(spr->secondary.xpBaseAddress))[sbase + x];
158 } else if (spr->base.format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
159 for (int x = box->x; x < box->x + box->width; x++)
160 ((uint8_t*)(spr->swr.xpBaseAddress))[zbase + 8 * x + 4] =
161 ((uint8_t*)(spr->secondary.xpBaseAddress))[sbase + x];
162 }
163 zbase += spr->swr.pitch;
164 sbase += spr->secondary.pitch;
165 }
166 }
167 }
168
169 unsigned offset = box->z * pt->layer_stride +
170 util_format_get_nblocksy(format, box->y) * pt->stride +
171 util_format_get_stride(format, box->x);
172
173 *transfer = pt;
174
175 return (void*)(spr->swr.xpBaseAddress + offset + spr->mip_offsets[level]);
176 }
177
178 static void
179 swr_transfer_flush_region(struct pipe_context *pipe,
180 struct pipe_transfer *transfer,
181 const struct pipe_box *flush_box)
182 {
183 assert(transfer->resource);
184 assert(transfer->usage & PIPE_TRANSFER_WRITE);
185
186 struct swr_resource *spr = swr_resource(transfer->resource);
187 if (!spr->has_depth || !spr->has_stencil)
188 return;
189
190 size_t zbase, sbase;
191 struct pipe_box box = *flush_box;
192 box.x += transfer->box.x;
193 box.y += transfer->box.y;
194 box.z += transfer->box.z;
195 for (int z = box.z; z < box.z + box.depth; z++) {
196 zbase = (z * spr->swr.qpitch + box.y) * spr->swr.pitch +
197 spr->mip_offsets[transfer->level];
198 sbase = (z * spr->secondary.qpitch + box.y) * spr->secondary.pitch +
199 spr->secondary_mip_offsets[transfer->level];
200 for (int y = box.y; y < box.y + box.height; y++) {
201 if (spr->base.format == PIPE_FORMAT_Z24_UNORM_S8_UINT) {
202 for (int x = box.x; x < box.x + box.width; x++)
203 ((uint8_t*)(spr->secondary.xpBaseAddress))[sbase + x] =
204 ((uint8_t*)(spr->swr.xpBaseAddress))[zbase + 4 * x + 3];
205 } else if (spr->base.format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
206 for (int x = box.x; x < box.x + box.width; x++)
207 ((uint8_t*)(spr->secondary.xpBaseAddress))[sbase + x] =
208 ((uint8_t*)(spr->swr.xpBaseAddress))[zbase + 8 * x + 4];
209 }
210 zbase += spr->swr.pitch;
211 sbase += spr->secondary.pitch;
212 }
213 }
214 }
215
216 static void
217 swr_transfer_unmap(struct pipe_context *pipe, struct pipe_transfer *transfer)
218 {
219 assert(transfer->resource);
220
221 struct swr_resource *spr = swr_resource(transfer->resource);
222 /* if we're mapping the depth/stencil, copy in stencil for the section
223 * being written out
224 */
225 if (transfer->usage & PIPE_TRANSFER_WRITE &&
226 !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT) &&
227 spr->has_depth && spr->has_stencil) {
228 struct pipe_box box;
229 u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height,
230 transfer->box.depth, &box);
231 swr_transfer_flush_region(pipe, transfer, &box);
232 }
233
234 pipe_resource_reference(&transfer->resource, NULL);
235 FREE(transfer);
236 }
237
238
239 static void
240 swr_resource_copy(struct pipe_context *pipe,
241 struct pipe_resource *dst,
242 unsigned dst_level,
243 unsigned dstx,
244 unsigned dsty,
245 unsigned dstz,
246 struct pipe_resource *src,
247 unsigned src_level,
248 const struct pipe_box *src_box)
249 {
250 struct swr_screen *screen = swr_screen(pipe->screen);
251
252 /* If either the src or dst is a renderTarget, store tiles before copy */
253 swr_store_dirty_resource(pipe, src, SWR_TILE_RESOLVED);
254 swr_store_dirty_resource(pipe, dst, SWR_TILE_RESOLVED);
255
256 swr_fence_finish(pipe->screen, NULL, screen->flush_fence, 0);
257 swr_resource_unused(src);
258 swr_resource_unused(dst);
259
260 if ((dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER)
261 || (dst->target != PIPE_BUFFER && src->target != PIPE_BUFFER)) {
262 util_resource_copy_region(
263 pipe, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box);
264 return;
265 }
266
267 debug_printf("unhandled swr_resource_copy\n");
268 }
269
270
271 static void
272 swr_blit(struct pipe_context *pipe, const struct pipe_blit_info *blit_info)
273 {
274 struct swr_context *ctx = swr_context(pipe);
275 /* Make a copy of the const blit_info, so we can modify it */
276 struct pipe_blit_info info = *blit_info;
277
278 if (info.render_condition_enable && !swr_check_render_cond(pipe))
279 return;
280
281 if (info.src.resource->nr_samples > 1 && info.dst.resource->nr_samples <= 1
282 && !util_format_is_depth_or_stencil(info.src.resource->format)
283 && !util_format_is_pure_integer(info.src.resource->format)) {
284 debug_printf("swr_blit: color resolve : %d -> %d\n",
285 info.src.resource->nr_samples, info.dst.resource->nr_samples);
286
287 /* Resolve is done as part of the surface store. */
288 swr_store_dirty_resource(pipe, info.src.resource, SWR_TILE_RESOLVED);
289
290 struct pipe_resource *src_resource = info.src.resource;
291 struct pipe_resource *resolve_target =
292 swr_resource(src_resource)->resolve_target;
293
294 /* The resolve target becomes the new source for the blit. */
295 info.src.resource = resolve_target;
296 }
297
298 if (util_try_blit_via_copy_region(pipe, &info)) {
299 return; /* done */
300 }
301
302 if (info.mask & PIPE_MASK_S) {
303 debug_printf("swr: cannot blit stencil, skipping\n");
304 info.mask &= ~PIPE_MASK_S;
305 }
306
307 if (!util_blitter_is_blit_supported(ctx->blitter, &info)) {
308 debug_printf("swr: blit unsupported %s -> %s\n",
309 util_format_short_name(info.src.resource->format),
310 util_format_short_name(info.dst.resource->format));
311 return;
312 }
313
314 if (ctx->active_queries) {
315 ctx->api.pfnSwrEnableStatsFE(ctx->swrContext, FALSE);
316 ctx->api.pfnSwrEnableStatsBE(ctx->swrContext, FALSE);
317 }
318
319 util_blitter_save_vertex_buffer_slot(ctx->blitter, ctx->vertex_buffer);
320 util_blitter_save_vertex_elements(ctx->blitter, (void *)ctx->velems);
321 util_blitter_save_vertex_shader(ctx->blitter, (void *)ctx->vs);
322 util_blitter_save_geometry_shader(ctx->blitter, (void*)ctx->gs);
323 util_blitter_save_tessctrl_shader(ctx->blitter, (void*)ctx->tcs);
324 util_blitter_save_tesseval_shader(ctx->blitter, (void*)ctx->tes);
325 util_blitter_save_so_targets(
326 ctx->blitter,
327 ctx->num_so_targets,
328 (struct pipe_stream_output_target **)ctx->so_targets);
329 util_blitter_save_rasterizer(ctx->blitter, (void *)ctx->rasterizer);
330 util_blitter_save_viewport(ctx->blitter, &ctx->viewports[0]);
331 util_blitter_save_scissor(ctx->blitter, &ctx->scissors[0]);
332 util_blitter_save_fragment_shader(ctx->blitter, ctx->fs);
333 util_blitter_save_blend(ctx->blitter, (void *)ctx->blend);
334 util_blitter_save_depth_stencil_alpha(ctx->blitter,
335 (void *)ctx->depth_stencil);
336 util_blitter_save_stencil_ref(ctx->blitter, &ctx->stencil_ref);
337 util_blitter_save_sample_mask(ctx->blitter, ctx->sample_mask);
338 util_blitter_save_framebuffer(ctx->blitter, &ctx->framebuffer);
339 util_blitter_save_fragment_sampler_states(
340 ctx->blitter,
341 ctx->num_samplers[PIPE_SHADER_FRAGMENT],
342 (void **)ctx->samplers[PIPE_SHADER_FRAGMENT]);
343 util_blitter_save_fragment_sampler_views(
344 ctx->blitter,
345 ctx->num_sampler_views[PIPE_SHADER_FRAGMENT],
346 ctx->sampler_views[PIPE_SHADER_FRAGMENT]);
347 util_blitter_save_render_condition(ctx->blitter,
348 ctx->render_cond_query,
349 ctx->render_cond_cond,
350 ctx->render_cond_mode);
351
352 util_blitter_blit(ctx->blitter, &info);
353
354 if (ctx->active_queries) {
355 ctx->api.pfnSwrEnableStatsFE(ctx->swrContext, TRUE);
356 ctx->api.pfnSwrEnableStatsBE(ctx->swrContext, TRUE);
357 }
358 }
359
360
361 static void
362 swr_destroy(struct pipe_context *pipe)
363 {
364 struct swr_context *ctx = swr_context(pipe);
365 struct swr_screen *screen = swr_screen(pipe->screen);
366
367 if (ctx->blitter)
368 util_blitter_destroy(ctx->blitter);
369
370 for (unsigned i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
371 if (ctx->framebuffer.cbufs[i]) {
372 struct swr_resource *res = swr_resource(ctx->framebuffer.cbufs[i]->texture);
373 /* NULL curr_pipe, so we don't have a reference to a deleted pipe */
374 res->curr_pipe = NULL;
375 pipe_surface_reference(&ctx->framebuffer.cbufs[i], NULL);
376 }
377 }
378
379 if (ctx->framebuffer.zsbuf) {
380 struct swr_resource *res = swr_resource(ctx->framebuffer.zsbuf->texture);
381 /* NULL curr_pipe, so we don't have a reference to a deleted pipe */
382 res->curr_pipe = NULL;
383 pipe_surface_reference(&ctx->framebuffer.zsbuf, NULL);
384 }
385
386 for (unsigned i = 0; i < ARRAY_SIZE(ctx->sampler_views[0]); i++) {
387 pipe_sampler_view_reference(&ctx->sampler_views[PIPE_SHADER_FRAGMENT][i], NULL);
388 }
389
390 for (unsigned i = 0; i < ARRAY_SIZE(ctx->sampler_views[0]); i++) {
391 pipe_sampler_view_reference(&ctx->sampler_views[PIPE_SHADER_VERTEX][i], NULL);
392 }
393
394 if (ctx->pipe.stream_uploader)
395 u_upload_destroy(ctx->pipe.stream_uploader);
396
397 /* Idle core after destroying buffer resources, but before deleting
398 * context. Destroying resources has potentially called StoreTiles.*/
399 ctx->api.pfnSwrWaitForIdle(ctx->swrContext);
400
401 if (ctx->swrContext)
402 ctx->api.pfnSwrDestroyContext(ctx->swrContext);
403
404 delete ctx->blendJIT;
405
406 swr_destroy_scratch_buffers(ctx);
407
408
409 /* Only update screen->pipe if current context is being destroyed */
410 assert(screen);
411 if (screen->pipe == pipe)
412 screen->pipe = NULL;
413
414 AlignedFree(ctx);
415 }
416
417
418 static void
419 swr_render_condition(struct pipe_context *pipe,
420 struct pipe_query *query,
421 bool condition,
422 enum pipe_render_cond_flag mode)
423 {
424 struct swr_context *ctx = swr_context(pipe);
425
426 ctx->render_cond_query = query;
427 ctx->render_cond_mode = mode;
428 ctx->render_cond_cond = condition;
429 }
430
431 static void
432 swr_UpdateStats(HANDLE hPrivateContext, const SWR_STATS *pStats)
433 {
434 swr_draw_context *pDC = (swr_draw_context*)hPrivateContext;
435
436 if (!pDC)
437 return;
438
439 struct swr_query_result *pqr = pDC->pStats;
440
441 SWR_STATS *pSwrStats = &pqr->core;
442
443 pSwrStats->DepthPassCount += pStats->DepthPassCount;
444 pSwrStats->PsInvocations += pStats->PsInvocations;
445 pSwrStats->CsInvocations += pStats->CsInvocations;
446 }
447
448 static void
449 swr_UpdateStatsFE(HANDLE hPrivateContext, const SWR_STATS_FE *pStats)
450 {
451 swr_draw_context *pDC = (swr_draw_context*)hPrivateContext;
452
453 if (!pDC)
454 return;
455
456 struct swr_query_result *pqr = pDC->pStats;
457
458 SWR_STATS_FE *pSwrStats = &pqr->coreFE;
459 p_atomic_add(&pSwrStats->IaVertices, pStats->IaVertices);
460 p_atomic_add(&pSwrStats->IaPrimitives, pStats->IaPrimitives);
461 p_atomic_add(&pSwrStats->VsInvocations, pStats->VsInvocations);
462 p_atomic_add(&pSwrStats->HsInvocations, pStats->HsInvocations);
463 p_atomic_add(&pSwrStats->DsInvocations, pStats->DsInvocations);
464 p_atomic_add(&pSwrStats->GsInvocations, pStats->GsInvocations);
465 p_atomic_add(&pSwrStats->CInvocations, pStats->CInvocations);
466 p_atomic_add(&pSwrStats->CPrimitives, pStats->CPrimitives);
467 p_atomic_add(&pSwrStats->GsPrimitives, pStats->GsPrimitives);
468
469 for (unsigned i = 0; i < 4; i++) {
470 p_atomic_add(&pSwrStats->SoPrimStorageNeeded[i],
471 pStats->SoPrimStorageNeeded[i]);
472 p_atomic_add(&pSwrStats->SoNumPrimsWritten[i],
473 pStats->SoNumPrimsWritten[i]);
474 }
475 }
476
477 static void
478 swr_UpdateStreamOut(HANDLE hPrivateContext, uint64_t numPrims)
479 {
480 swr_draw_context *pDC = (swr_draw_context*)hPrivateContext;
481
482 if (!pDC)
483 return;
484
485 if (pDC->soPrims)
486 *pDC->soPrims += numPrims;
487 }
488
489 struct pipe_context *
490 swr_create_context(struct pipe_screen *p_screen, void *priv, unsigned flags)
491 {
492 struct swr_context *ctx = (struct swr_context *)
493 AlignedMalloc(sizeof(struct swr_context), KNOB_SIMD_BYTES);
494 memset((void*)ctx, 0, sizeof(struct swr_context));
495
496 swr_screen(p_screen)->pfnSwrGetInterface(ctx->api);
497 swr_screen(p_screen)->pfnSwrGetTileInterface(ctx->tileApi);
498 ctx->swrDC.pAPI = &ctx->api;
499 ctx->swrDC.pTileAPI = &ctx->tileApi;
500
501 ctx->blendJIT =
502 new std::unordered_map<BLEND_COMPILE_STATE, PFN_BLEND_JIT_FUNC>;
503
504 ctx->max_draws_in_flight = KNOB_MAX_DRAWS_IN_FLIGHT;
505
506 SWR_CREATECONTEXT_INFO createInfo {0};
507
508 createInfo.privateStateSize = sizeof(swr_draw_context);
509 createInfo.pfnLoadTile = swr_LoadHotTile;
510 createInfo.pfnStoreTile = swr_StoreHotTile;
511 createInfo.pfnUpdateStats = swr_UpdateStats;
512 createInfo.pfnUpdateStatsFE = swr_UpdateStatsFE;
513 createInfo.pfnUpdateStreamOut = swr_UpdateStreamOut;
514 createInfo.pfnMakeGfxPtr = swr_MakeGfxPtr;
515
516 SWR_THREADING_INFO threadingInfo {0};
517
518 threadingInfo.MAX_WORKER_THREADS = KNOB_MAX_WORKER_THREADS;
519 threadingInfo.MAX_NUMA_NODES = KNOB_MAX_NUMA_NODES;
520 threadingInfo.MAX_CORES_PER_NUMA_NODE = KNOB_MAX_CORES_PER_NUMA_NODE;
521 threadingInfo.MAX_THREADS_PER_CORE = KNOB_MAX_THREADS_PER_CORE;
522 threadingInfo.SINGLE_THREADED = KNOB_SINGLE_THREADED;
523
524 // Use non-standard settings for KNL
525 if (swr_screen(p_screen)->is_knl)
526 {
527 if (nullptr == getenv("KNOB_MAX_THREADS_PER_CORE"))
528 threadingInfo.MAX_THREADS_PER_CORE = 2;
529
530 if (nullptr == getenv("KNOB_MAX_DRAWS_IN_FLIGHT"))
531 {
532 ctx->max_draws_in_flight = 2048;
533 createInfo.MAX_DRAWS_IN_FLIGHT = ctx->max_draws_in_flight;
534 }
535 }
536
537 createInfo.pThreadInfo = &threadingInfo;
538
539 ctx->swrContext = ctx->api.pfnSwrCreateContext(&createInfo);
540
541 ctx->api.pfnSwrInit();
542
543 if (ctx->swrContext == NULL)
544 goto fail;
545
546 ctx->pipe.screen = p_screen;
547 ctx->pipe.destroy = swr_destroy;
548 ctx->pipe.priv = priv;
549 ctx->pipe.create_surface = swr_create_surface;
550 ctx->pipe.surface_destroy = swr_surface_destroy;
551 ctx->pipe.transfer_map = swr_transfer_map;
552 ctx->pipe.transfer_unmap = swr_transfer_unmap;
553 ctx->pipe.transfer_flush_region = swr_transfer_flush_region;
554
555 ctx->pipe.buffer_subdata = u_default_buffer_subdata;
556 ctx->pipe.texture_subdata = u_default_texture_subdata;
557
558 ctx->pipe.clear_texture = util_clear_texture;
559 ctx->pipe.resource_copy_region = swr_resource_copy;
560 ctx->pipe.render_condition = swr_render_condition;
561
562 swr_state_init(&ctx->pipe);
563 swr_clear_init(&ctx->pipe);
564 swr_draw_init(&ctx->pipe);
565 swr_query_init(&ctx->pipe);
566
567 ctx->pipe.stream_uploader = u_upload_create_default(&ctx->pipe);
568 if (!ctx->pipe.stream_uploader)
569 goto fail;
570 ctx->pipe.const_uploader = ctx->pipe.stream_uploader;
571
572 ctx->pipe.blit = swr_blit;
573 ctx->blitter = util_blitter_create(&ctx->pipe);
574 if (!ctx->blitter)
575 goto fail;
576
577 swr_init_scratch_buffers(ctx);
578
579 return &ctx->pipe;
580
581 fail:
582 /* Should really validate the init steps and fail gracefully */
583 swr_destroy(&ctx->pipe);
584 return NULL;
585 }