1 /****************************************************************************
2 * Copyright (C) 2015 Intel Corporation. All Rights Reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 ***************************************************************************/
24 #include "swr_context.h"
25 #include "swr_memory.h"
26 #include "swr_screen.h"
27 #include "swr_resource.h"
28 #include "swr_scratch.h"
29 #include "swr_query.h"
30 #include "swr_fence.h"
32 #include "util/u_memory.h"
33 #include "util/u_inlines.h"
34 #include "util/format/u_format.h"
35 #include "util/u_atomic.h"
36 #include "util/u_upload_mgr.h"
37 #include "util/u_transfer.h"
38 #include "util/u_surface.h"
44 static struct pipe_surface
*
45 swr_create_surface(struct pipe_context
*pipe
,
46 struct pipe_resource
*pt
,
47 const struct pipe_surface
*surf_tmpl
)
49 struct pipe_surface
*ps
;
51 ps
= CALLOC_STRUCT(pipe_surface
);
53 pipe_reference_init(&ps
->reference
, 1);
54 pipe_resource_reference(&ps
->texture
, pt
);
56 ps
->format
= surf_tmpl
->format
;
57 if (pt
->target
!= PIPE_BUFFER
) {
58 assert(surf_tmpl
->u
.tex
.level
<= pt
->last_level
);
59 ps
->width
= u_minify(pt
->width0
, surf_tmpl
->u
.tex
.level
);
60 ps
->height
= u_minify(pt
->height0
, surf_tmpl
->u
.tex
.level
);
61 ps
->u
.tex
.level
= surf_tmpl
->u
.tex
.level
;
62 ps
->u
.tex
.first_layer
= surf_tmpl
->u
.tex
.first_layer
;
63 ps
->u
.tex
.last_layer
= surf_tmpl
->u
.tex
.last_layer
;
65 /* setting width as number of elements should get us correct
66 * renderbuffer width */
67 ps
->width
= surf_tmpl
->u
.buf
.last_element
68 - surf_tmpl
->u
.buf
.first_element
+ 1;
69 ps
->height
= pt
->height0
;
70 ps
->u
.buf
.first_element
= surf_tmpl
->u
.buf
.first_element
;
71 ps
->u
.buf
.last_element
= surf_tmpl
->u
.buf
.last_element
;
72 assert(ps
->u
.buf
.first_element
<= ps
->u
.buf
.last_element
);
73 assert(ps
->u
.buf
.last_element
< ps
->width
);
80 swr_surface_destroy(struct pipe_context
*pipe
, struct pipe_surface
*surf
)
82 assert(surf
->texture
);
83 struct pipe_resource
*resource
= surf
->texture
;
85 /* If the resource has been drawn to, store tiles. */
86 swr_store_dirty_resource(pipe
, resource
, SWR_TILE_RESOLVED
);
88 pipe_resource_reference(&resource
, NULL
);
94 swr_transfer_map(struct pipe_context
*pipe
,
95 struct pipe_resource
*resource
,
98 const struct pipe_box
*box
,
99 struct pipe_transfer
**transfer
)
101 struct swr_screen
*screen
= swr_screen(pipe
->screen
);
102 struct swr_resource
*spr
= swr_resource(resource
);
103 struct pipe_transfer
*pt
;
104 enum pipe_format format
= resource
->format
;
107 assert(level
<= resource
->last_level
);
109 /* If mapping an attached rendertarget, store tiles to surface and set
110 * postStoreTileState to SWR_TILE_INVALID so tiles get reloaded on next use
111 * and nothing needs to be done at unmap. */
112 swr_store_dirty_resource(pipe
, resource
, SWR_TILE_INVALID
);
114 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
115 /* If resource is in use, finish fence before mapping.
116 * Unless requested not to block, then if not done return NULL map */
117 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
118 if (swr_is_fence_pending(screen
->flush_fence
))
122 /* But, if there's no fence pending, submit one.
123 * XXX: Remove once draw timestamps are finished. */
124 if (!swr_is_fence_pending(screen
->flush_fence
))
125 swr_fence_submit(swr_context(pipe
), screen
->flush_fence
);
127 swr_fence_finish(pipe
->screen
, NULL
, screen
->flush_fence
, 0);
128 swr_resource_unused(resource
);
133 pt
= CALLOC_STRUCT(pipe_transfer
);
136 pipe_resource_reference(&pt
->resource
, resource
);
137 pt
->usage
= (pipe_transfer_usage
)usage
;
140 pt
->stride
= spr
->swr
.pitch
;
141 pt
->layer_stride
= spr
->swr
.qpitch
* spr
->swr
.pitch
;
143 /* if we're mapping the depth/stencil, copy in stencil for the section
146 if (usage
& PIPE_TRANSFER_READ
&& spr
->has_depth
&& spr
->has_stencil
) {
148 for (int z
= box
->z
; z
< box
->z
+ box
->depth
; z
++) {
149 zbase
= (z
* spr
->swr
.qpitch
+ box
->y
) * spr
->swr
.pitch
+
150 spr
->mip_offsets
[level
];
151 sbase
= (z
* spr
->secondary
.qpitch
+ box
->y
) * spr
->secondary
.pitch
+
152 spr
->secondary_mip_offsets
[level
];
153 for (int y
= box
->y
; y
< box
->y
+ box
->height
; y
++) {
154 if (spr
->base
.format
== PIPE_FORMAT_Z24_UNORM_S8_UINT
) {
155 for (int x
= box
->x
; x
< box
->x
+ box
->width
; x
++)
156 ((uint8_t*)(spr
->swr
.xpBaseAddress
))[zbase
+ 4 * x
+ 3] =
157 ((uint8_t*)(spr
->secondary
.xpBaseAddress
))[sbase
+ x
];
158 } else if (spr
->base
.format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
159 for (int x
= box
->x
; x
< box
->x
+ box
->width
; x
++)
160 ((uint8_t*)(spr
->swr
.xpBaseAddress
))[zbase
+ 8 * x
+ 4] =
161 ((uint8_t*)(spr
->secondary
.xpBaseAddress
))[sbase
+ x
];
163 zbase
+= spr
->swr
.pitch
;
164 sbase
+= spr
->secondary
.pitch
;
169 unsigned offset
= box
->z
* pt
->layer_stride
+
170 util_format_get_nblocksy(format
, box
->y
) * pt
->stride
+
171 util_format_get_stride(format
, box
->x
);
175 return (void*)(spr
->swr
.xpBaseAddress
+ offset
+ spr
->mip_offsets
[level
]);
179 swr_transfer_flush_region(struct pipe_context
*pipe
,
180 struct pipe_transfer
*transfer
,
181 const struct pipe_box
*flush_box
)
183 assert(transfer
->resource
);
184 assert(transfer
->usage
& PIPE_TRANSFER_WRITE
);
186 struct swr_resource
*spr
= swr_resource(transfer
->resource
);
187 if (!spr
->has_depth
|| !spr
->has_stencil
)
191 struct pipe_box box
= *flush_box
;
192 box
.x
+= transfer
->box
.x
;
193 box
.y
+= transfer
->box
.y
;
194 box
.z
+= transfer
->box
.z
;
195 for (int z
= box
.z
; z
< box
.z
+ box
.depth
; z
++) {
196 zbase
= (z
* spr
->swr
.qpitch
+ box
.y
) * spr
->swr
.pitch
+
197 spr
->mip_offsets
[transfer
->level
];
198 sbase
= (z
* spr
->secondary
.qpitch
+ box
.y
) * spr
->secondary
.pitch
+
199 spr
->secondary_mip_offsets
[transfer
->level
];
200 for (int y
= box
.y
; y
< box
.y
+ box
.height
; y
++) {
201 if (spr
->base
.format
== PIPE_FORMAT_Z24_UNORM_S8_UINT
) {
202 for (int x
= box
.x
; x
< box
.x
+ box
.width
; x
++)
203 ((uint8_t*)(spr
->secondary
.xpBaseAddress
))[sbase
+ x
] =
204 ((uint8_t*)(spr
->swr
.xpBaseAddress
))[zbase
+ 4 * x
+ 3];
205 } else if (spr
->base
.format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
206 for (int x
= box
.x
; x
< box
.x
+ box
.width
; x
++)
207 ((uint8_t*)(spr
->secondary
.xpBaseAddress
))[sbase
+ x
] =
208 ((uint8_t*)(spr
->swr
.xpBaseAddress
))[zbase
+ 8 * x
+ 4];
210 zbase
+= spr
->swr
.pitch
;
211 sbase
+= spr
->secondary
.pitch
;
217 swr_transfer_unmap(struct pipe_context
*pipe
, struct pipe_transfer
*transfer
)
219 assert(transfer
->resource
);
221 struct swr_resource
*spr
= swr_resource(transfer
->resource
);
222 /* if we're mapping the depth/stencil, copy in stencil for the section
225 if (transfer
->usage
& PIPE_TRANSFER_WRITE
&&
226 !(transfer
->usage
& PIPE_TRANSFER_FLUSH_EXPLICIT
) &&
227 spr
->has_depth
&& spr
->has_stencil
) {
229 u_box_3d(0, 0, 0, transfer
->box
.width
, transfer
->box
.height
,
230 transfer
->box
.depth
, &box
);
231 swr_transfer_flush_region(pipe
, transfer
, &box
);
234 pipe_resource_reference(&transfer
->resource
, NULL
);
240 swr_resource_copy(struct pipe_context
*pipe
,
241 struct pipe_resource
*dst
,
246 struct pipe_resource
*src
,
248 const struct pipe_box
*src_box
)
250 struct swr_screen
*screen
= swr_screen(pipe
->screen
);
252 /* If either the src or dst is a renderTarget, store tiles before copy */
253 swr_store_dirty_resource(pipe
, src
, SWR_TILE_RESOLVED
);
254 swr_store_dirty_resource(pipe
, dst
, SWR_TILE_RESOLVED
);
256 swr_fence_finish(pipe
->screen
, NULL
, screen
->flush_fence
, 0);
257 swr_resource_unused(src
);
258 swr_resource_unused(dst
);
260 if ((dst
->target
== PIPE_BUFFER
&& src
->target
== PIPE_BUFFER
)
261 || (dst
->target
!= PIPE_BUFFER
&& src
->target
!= PIPE_BUFFER
)) {
262 util_resource_copy_region(
263 pipe
, dst
, dst_level
, dstx
, dsty
, dstz
, src
, src_level
, src_box
);
267 debug_printf("unhandled swr_resource_copy\n");
272 swr_blit(struct pipe_context
*pipe
, const struct pipe_blit_info
*blit_info
)
274 struct swr_context
*ctx
= swr_context(pipe
);
275 /* Make a copy of the const blit_info, so we can modify it */
276 struct pipe_blit_info info
= *blit_info
;
278 if (info
.render_condition_enable
&& !swr_check_render_cond(pipe
))
281 if (info
.src
.resource
->nr_samples
> 1 && info
.dst
.resource
->nr_samples
<= 1
282 && !util_format_is_depth_or_stencil(info
.src
.resource
->format
)
283 && !util_format_is_pure_integer(info
.src
.resource
->format
)) {
284 debug_printf("swr_blit: color resolve : %d -> %d\n",
285 info
.src
.resource
->nr_samples
, info
.dst
.resource
->nr_samples
);
287 /* Resolve is done as part of the surface store. */
288 swr_store_dirty_resource(pipe
, info
.src
.resource
, SWR_TILE_RESOLVED
);
290 struct pipe_resource
*src_resource
= info
.src
.resource
;
291 struct pipe_resource
*resolve_target
=
292 swr_resource(src_resource
)->resolve_target
;
294 /* The resolve target becomes the new source for the blit. */
295 info
.src
.resource
= resolve_target
;
298 if (util_try_blit_via_copy_region(pipe
, &info
)) {
302 if (info
.mask
& PIPE_MASK_S
) {
303 debug_printf("swr: cannot blit stencil, skipping\n");
304 info
.mask
&= ~PIPE_MASK_S
;
307 if (!util_blitter_is_blit_supported(ctx
->blitter
, &info
)) {
308 debug_printf("swr: blit unsupported %s -> %s\n",
309 util_format_short_name(info
.src
.resource
->format
),
310 util_format_short_name(info
.dst
.resource
->format
));
314 if (ctx
->active_queries
) {
315 ctx
->api
.pfnSwrEnableStatsFE(ctx
->swrContext
, FALSE
);
316 ctx
->api
.pfnSwrEnableStatsBE(ctx
->swrContext
, FALSE
);
319 util_blitter_save_vertex_buffer_slot(ctx
->blitter
, ctx
->vertex_buffer
);
320 util_blitter_save_vertex_elements(ctx
->blitter
, (void *)ctx
->velems
);
321 util_blitter_save_vertex_shader(ctx
->blitter
, (void *)ctx
->vs
);
322 util_blitter_save_geometry_shader(ctx
->blitter
, (void*)ctx
->gs
);
323 util_blitter_save_tessctrl_shader(ctx
->blitter
, (void*)ctx
->tcs
);
324 util_blitter_save_tesseval_shader(ctx
->blitter
, (void*)ctx
->tes
);
325 util_blitter_save_so_targets(
328 (struct pipe_stream_output_target
**)ctx
->so_targets
);
329 util_blitter_save_rasterizer(ctx
->blitter
, (void *)ctx
->rasterizer
);
330 util_blitter_save_viewport(ctx
->blitter
, &ctx
->viewports
[0]);
331 util_blitter_save_scissor(ctx
->blitter
, &ctx
->scissors
[0]);
332 util_blitter_save_fragment_shader(ctx
->blitter
, ctx
->fs
);
333 util_blitter_save_blend(ctx
->blitter
, (void *)ctx
->blend
);
334 util_blitter_save_depth_stencil_alpha(ctx
->blitter
,
335 (void *)ctx
->depth_stencil
);
336 util_blitter_save_stencil_ref(ctx
->blitter
, &ctx
->stencil_ref
);
337 util_blitter_save_sample_mask(ctx
->blitter
, ctx
->sample_mask
);
338 util_blitter_save_framebuffer(ctx
->blitter
, &ctx
->framebuffer
);
339 util_blitter_save_fragment_sampler_states(
341 ctx
->num_samplers
[PIPE_SHADER_FRAGMENT
],
342 (void **)ctx
->samplers
[PIPE_SHADER_FRAGMENT
]);
343 util_blitter_save_fragment_sampler_views(
345 ctx
->num_sampler_views
[PIPE_SHADER_FRAGMENT
],
346 ctx
->sampler_views
[PIPE_SHADER_FRAGMENT
]);
347 util_blitter_save_render_condition(ctx
->blitter
,
348 ctx
->render_cond_query
,
349 ctx
->render_cond_cond
,
350 ctx
->render_cond_mode
);
352 util_blitter_blit(ctx
->blitter
, &info
);
354 if (ctx
->active_queries
) {
355 ctx
->api
.pfnSwrEnableStatsFE(ctx
->swrContext
, TRUE
);
356 ctx
->api
.pfnSwrEnableStatsBE(ctx
->swrContext
, TRUE
);
362 swr_destroy(struct pipe_context
*pipe
)
364 struct swr_context
*ctx
= swr_context(pipe
);
365 struct swr_screen
*screen
= swr_screen(pipe
->screen
);
368 util_blitter_destroy(ctx
->blitter
);
370 for (unsigned i
= 0; i
< PIPE_MAX_COLOR_BUFS
; i
++) {
371 if (ctx
->framebuffer
.cbufs
[i
]) {
372 struct swr_resource
*res
= swr_resource(ctx
->framebuffer
.cbufs
[i
]->texture
);
373 /* NULL curr_pipe, so we don't have a reference to a deleted pipe */
374 res
->curr_pipe
= NULL
;
375 pipe_surface_reference(&ctx
->framebuffer
.cbufs
[i
], NULL
);
379 if (ctx
->framebuffer
.zsbuf
) {
380 struct swr_resource
*res
= swr_resource(ctx
->framebuffer
.zsbuf
->texture
);
381 /* NULL curr_pipe, so we don't have a reference to a deleted pipe */
382 res
->curr_pipe
= NULL
;
383 pipe_surface_reference(&ctx
->framebuffer
.zsbuf
, NULL
);
386 for (unsigned i
= 0; i
< ARRAY_SIZE(ctx
->sampler_views
[0]); i
++) {
387 pipe_sampler_view_reference(&ctx
->sampler_views
[PIPE_SHADER_FRAGMENT
][i
], NULL
);
390 for (unsigned i
= 0; i
< ARRAY_SIZE(ctx
->sampler_views
[0]); i
++) {
391 pipe_sampler_view_reference(&ctx
->sampler_views
[PIPE_SHADER_VERTEX
][i
], NULL
);
394 if (ctx
->pipe
.stream_uploader
)
395 u_upload_destroy(ctx
->pipe
.stream_uploader
);
397 /* Idle core after destroying buffer resources, but before deleting
398 * context. Destroying resources has potentially called StoreTiles.*/
399 ctx
->api
.pfnSwrWaitForIdle(ctx
->swrContext
);
402 ctx
->api
.pfnSwrDestroyContext(ctx
->swrContext
);
404 delete ctx
->blendJIT
;
406 swr_destroy_scratch_buffers(ctx
);
409 /* Only update screen->pipe if current context is being destroyed */
411 if (screen
->pipe
== pipe
)
419 swr_render_condition(struct pipe_context
*pipe
,
420 struct pipe_query
*query
,
422 enum pipe_render_cond_flag mode
)
424 struct swr_context
*ctx
= swr_context(pipe
);
426 ctx
->render_cond_query
= query
;
427 ctx
->render_cond_mode
= mode
;
428 ctx
->render_cond_cond
= condition
;
432 swr_UpdateStats(HANDLE hPrivateContext
, const SWR_STATS
*pStats
)
434 swr_draw_context
*pDC
= (swr_draw_context
*)hPrivateContext
;
439 struct swr_query_result
*pqr
= pDC
->pStats
;
441 SWR_STATS
*pSwrStats
= &pqr
->core
;
443 pSwrStats
->DepthPassCount
+= pStats
->DepthPassCount
;
444 pSwrStats
->PsInvocations
+= pStats
->PsInvocations
;
445 pSwrStats
->CsInvocations
+= pStats
->CsInvocations
;
449 swr_UpdateStatsFE(HANDLE hPrivateContext
, const SWR_STATS_FE
*pStats
)
451 swr_draw_context
*pDC
= (swr_draw_context
*)hPrivateContext
;
456 struct swr_query_result
*pqr
= pDC
->pStats
;
458 SWR_STATS_FE
*pSwrStats
= &pqr
->coreFE
;
459 p_atomic_add(&pSwrStats
->IaVertices
, pStats
->IaVertices
);
460 p_atomic_add(&pSwrStats
->IaPrimitives
, pStats
->IaPrimitives
);
461 p_atomic_add(&pSwrStats
->VsInvocations
, pStats
->VsInvocations
);
462 p_atomic_add(&pSwrStats
->HsInvocations
, pStats
->HsInvocations
);
463 p_atomic_add(&pSwrStats
->DsInvocations
, pStats
->DsInvocations
);
464 p_atomic_add(&pSwrStats
->GsInvocations
, pStats
->GsInvocations
);
465 p_atomic_add(&pSwrStats
->CInvocations
, pStats
->CInvocations
);
466 p_atomic_add(&pSwrStats
->CPrimitives
, pStats
->CPrimitives
);
467 p_atomic_add(&pSwrStats
->GsPrimitives
, pStats
->GsPrimitives
);
469 for (unsigned i
= 0; i
< 4; i
++) {
470 p_atomic_add(&pSwrStats
->SoPrimStorageNeeded
[i
],
471 pStats
->SoPrimStorageNeeded
[i
]);
472 p_atomic_add(&pSwrStats
->SoNumPrimsWritten
[i
],
473 pStats
->SoNumPrimsWritten
[i
]);
478 swr_UpdateStreamOut(HANDLE hPrivateContext
, uint64_t numPrims
)
480 swr_draw_context
*pDC
= (swr_draw_context
*)hPrivateContext
;
486 *pDC
->soPrims
+= numPrims
;
489 struct pipe_context
*
490 swr_create_context(struct pipe_screen
*p_screen
, void *priv
, unsigned flags
)
492 struct swr_context
*ctx
= (struct swr_context
*)
493 AlignedMalloc(sizeof(struct swr_context
), KNOB_SIMD_BYTES
);
494 memset((void*)ctx
, 0, sizeof(struct swr_context
));
496 swr_screen(p_screen
)->pfnSwrGetInterface(ctx
->api
);
497 swr_screen(p_screen
)->pfnSwrGetTileInterface(ctx
->tileApi
);
498 ctx
->swrDC
.pAPI
= &ctx
->api
;
499 ctx
->swrDC
.pTileAPI
= &ctx
->tileApi
;
502 new std::unordered_map
<BLEND_COMPILE_STATE
, PFN_BLEND_JIT_FUNC
>;
504 ctx
->max_draws_in_flight
= KNOB_MAX_DRAWS_IN_FLIGHT
;
506 SWR_CREATECONTEXT_INFO createInfo
{0};
508 createInfo
.privateStateSize
= sizeof(swr_draw_context
);
509 createInfo
.pfnLoadTile
= swr_LoadHotTile
;
510 createInfo
.pfnStoreTile
= swr_StoreHotTile
;
511 createInfo
.pfnUpdateStats
= swr_UpdateStats
;
512 createInfo
.pfnUpdateStatsFE
= swr_UpdateStatsFE
;
513 createInfo
.pfnUpdateStreamOut
= swr_UpdateStreamOut
;
514 createInfo
.pfnMakeGfxPtr
= swr_MakeGfxPtr
;
516 SWR_THREADING_INFO threadingInfo
{0};
518 threadingInfo
.MAX_WORKER_THREADS
= KNOB_MAX_WORKER_THREADS
;
519 threadingInfo
.MAX_NUMA_NODES
= KNOB_MAX_NUMA_NODES
;
520 threadingInfo
.MAX_CORES_PER_NUMA_NODE
= KNOB_MAX_CORES_PER_NUMA_NODE
;
521 threadingInfo
.MAX_THREADS_PER_CORE
= KNOB_MAX_THREADS_PER_CORE
;
522 threadingInfo
.SINGLE_THREADED
= KNOB_SINGLE_THREADED
;
524 // Use non-standard settings for KNL
525 if (swr_screen(p_screen
)->is_knl
)
527 if (nullptr == getenv("KNOB_MAX_THREADS_PER_CORE"))
528 threadingInfo
.MAX_THREADS_PER_CORE
= 2;
530 if (nullptr == getenv("KNOB_MAX_DRAWS_IN_FLIGHT"))
532 ctx
->max_draws_in_flight
= 2048;
533 createInfo
.MAX_DRAWS_IN_FLIGHT
= ctx
->max_draws_in_flight
;
537 createInfo
.pThreadInfo
= &threadingInfo
;
539 ctx
->swrContext
= ctx
->api
.pfnSwrCreateContext(&createInfo
);
541 ctx
->api
.pfnSwrInit();
543 if (ctx
->swrContext
== NULL
)
546 ctx
->pipe
.screen
= p_screen
;
547 ctx
->pipe
.destroy
= swr_destroy
;
548 ctx
->pipe
.priv
= priv
;
549 ctx
->pipe
.create_surface
= swr_create_surface
;
550 ctx
->pipe
.surface_destroy
= swr_surface_destroy
;
551 ctx
->pipe
.transfer_map
= swr_transfer_map
;
552 ctx
->pipe
.transfer_unmap
= swr_transfer_unmap
;
553 ctx
->pipe
.transfer_flush_region
= swr_transfer_flush_region
;
555 ctx
->pipe
.buffer_subdata
= u_default_buffer_subdata
;
556 ctx
->pipe
.texture_subdata
= u_default_texture_subdata
;
558 ctx
->pipe
.clear_texture
= util_clear_texture
;
559 ctx
->pipe
.resource_copy_region
= swr_resource_copy
;
560 ctx
->pipe
.render_condition
= swr_render_condition
;
562 swr_state_init(&ctx
->pipe
);
563 swr_clear_init(&ctx
->pipe
);
564 swr_draw_init(&ctx
->pipe
);
565 swr_query_init(&ctx
->pipe
);
567 ctx
->pipe
.stream_uploader
= u_upload_create_default(&ctx
->pipe
);
568 if (!ctx
->pipe
.stream_uploader
)
570 ctx
->pipe
.const_uploader
= ctx
->pipe
.stream_uploader
;
572 ctx
->pipe
.blit
= swr_blit
;
573 ctx
->blitter
= util_blitter_create(&ctx
->pipe
);
577 swr_init_scratch_buffers(ctx
);
582 /* Should really validate the init steps and fail gracefully */
583 swr_destroy(&ctx
->pipe
);