1 /****************************************************************************
2 * Copyright (C) 2015 Intel Corporation. All Rights Reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 ***************************************************************************/
24 #include "swr_context.h"
25 #include "swr_memory.h"
26 #include "swr_screen.h"
27 #include "swr_resource.h"
28 #include "swr_scratch.h"
29 #include "swr_query.h"
30 #include "swr_fence.h"
32 #include "util/u_memory.h"
33 #include "util/u_inlines.h"
34 #include "util/u_format.h"
35 #include "util/u_atomic.h"
36 #include "util/u_upload_mgr.h"
37 #include "util/u_transfer.h"
38 #include "util/u_surface.h"
43 static struct pipe_surface
*
44 swr_create_surface(struct pipe_context
*pipe
,
45 struct pipe_resource
*pt
,
46 const struct pipe_surface
*surf_tmpl
)
48 struct pipe_surface
*ps
;
50 ps
= CALLOC_STRUCT(pipe_surface
);
52 pipe_reference_init(&ps
->reference
, 1);
53 pipe_resource_reference(&ps
->texture
, pt
);
55 ps
->format
= surf_tmpl
->format
;
56 if (pt
->target
!= PIPE_BUFFER
) {
57 assert(surf_tmpl
->u
.tex
.level
<= pt
->last_level
);
58 ps
->width
= u_minify(pt
->width0
, surf_tmpl
->u
.tex
.level
);
59 ps
->height
= u_minify(pt
->height0
, surf_tmpl
->u
.tex
.level
);
60 ps
->u
.tex
.level
= surf_tmpl
->u
.tex
.level
;
61 ps
->u
.tex
.first_layer
= surf_tmpl
->u
.tex
.first_layer
;
62 ps
->u
.tex
.last_layer
= surf_tmpl
->u
.tex
.last_layer
;
64 /* setting width as number of elements should get us correct
65 * renderbuffer width */
66 ps
->width
= surf_tmpl
->u
.buf
.last_element
67 - surf_tmpl
->u
.buf
.first_element
+ 1;
68 ps
->height
= pt
->height0
;
69 ps
->u
.buf
.first_element
= surf_tmpl
->u
.buf
.first_element
;
70 ps
->u
.buf
.last_element
= surf_tmpl
->u
.buf
.last_element
;
71 assert(ps
->u
.buf
.first_element
<= ps
->u
.buf
.last_element
);
72 assert(ps
->u
.buf
.last_element
< ps
->width
);
79 swr_surface_destroy(struct pipe_context
*pipe
, struct pipe_surface
*surf
)
81 assert(surf
->texture
);
82 struct pipe_resource
*resource
= surf
->texture
;
84 /* If the resource has been drawn to, store tiles. */
85 swr_store_dirty_resource(pipe
, resource
, SWR_TILE_RESOLVED
);
87 pipe_resource_reference(&resource
, NULL
);
93 swr_transfer_map(struct pipe_context
*pipe
,
94 struct pipe_resource
*resource
,
97 const struct pipe_box
*box
,
98 struct pipe_transfer
**transfer
)
100 struct swr_screen
*screen
= swr_screen(pipe
->screen
);
101 struct swr_resource
*spr
= swr_resource(resource
);
102 struct pipe_transfer
*pt
;
103 enum pipe_format format
= resource
->format
;
106 assert(level
<= resource
->last_level
);
108 /* If mapping an attached rendertarget, store tiles to surface and set
109 * postStoreTileState to SWR_TILE_INVALID so tiles get reloaded on next use
110 * and nothing needs to be done at unmap. */
111 swr_store_dirty_resource(pipe
, resource
, SWR_TILE_INVALID
);
113 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
114 /* If resource is in use, finish fence before mapping.
115 * Unless requested not to block, then if not done return NULL map */
116 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
117 if (swr_is_fence_pending(screen
->flush_fence
))
121 /* But, if there's no fence pending, submit one.
122 * XXX: Remove once draw timestamps are finished. */
123 if (!swr_is_fence_pending(screen
->flush_fence
))
124 swr_fence_submit(swr_context(pipe
), screen
->flush_fence
);
126 swr_fence_finish(pipe
->screen
, NULL
, screen
->flush_fence
, 0);
127 swr_resource_unused(resource
);
132 pt
= CALLOC_STRUCT(pipe_transfer
);
135 pipe_resource_reference(&pt
->resource
, resource
);
136 pt
->usage
= (pipe_transfer_usage
)usage
;
139 pt
->stride
= spr
->swr
.pitch
;
140 pt
->layer_stride
= spr
->swr
.qpitch
* spr
->swr
.pitch
;
142 /* if we're mapping the depth/stencil, copy in stencil for the section
145 if (usage
& PIPE_TRANSFER_READ
&& spr
->has_depth
&& spr
->has_stencil
) {
147 for (int z
= box
->z
; z
< box
->z
+ box
->depth
; z
++) {
148 zbase
= (z
* spr
->swr
.qpitch
+ box
->y
) * spr
->swr
.pitch
+
149 spr
->mip_offsets
[level
];
150 sbase
= (z
* spr
->secondary
.qpitch
+ box
->y
) * spr
->secondary
.pitch
+
151 spr
->secondary_mip_offsets
[level
];
152 for (int y
= box
->y
; y
< box
->y
+ box
->height
; y
++) {
153 if (spr
->base
.format
== PIPE_FORMAT_Z24_UNORM_S8_UINT
) {
154 for (int x
= box
->x
; x
< box
->x
+ box
->width
; x
++)
155 spr
->swr
.pBaseAddress
[zbase
+ 4 * x
+ 3] =
156 spr
->secondary
.pBaseAddress
[sbase
+ x
];
157 } else if (spr
->base
.format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
158 for (int x
= box
->x
; x
< box
->x
+ box
->width
; x
++)
159 spr
->swr
.pBaseAddress
[zbase
+ 8 * x
+ 4] =
160 spr
->secondary
.pBaseAddress
[sbase
+ x
];
162 zbase
+= spr
->swr
.pitch
;
163 sbase
+= spr
->secondary
.pitch
;
168 unsigned offset
= box
->z
* pt
->layer_stride
+
169 util_format_get_nblocksy(format
, box
->y
) * pt
->stride
+
170 util_format_get_stride(format
, box
->x
);
174 return spr
->swr
.pBaseAddress
+ offset
+ spr
->mip_offsets
[level
];
178 swr_transfer_flush_region(struct pipe_context
*pipe
,
179 struct pipe_transfer
*transfer
,
180 const struct pipe_box
*flush_box
)
182 assert(transfer
->resource
);
183 assert(transfer
->usage
& PIPE_TRANSFER_WRITE
);
185 struct swr_resource
*spr
= swr_resource(transfer
->resource
);
186 if (!spr
->has_depth
|| !spr
->has_stencil
)
190 struct pipe_box box
= *flush_box
;
191 box
.x
+= transfer
->box
.x
;
192 box
.y
+= transfer
->box
.y
;
193 box
.z
+= transfer
->box
.z
;
194 for (int z
= box
.z
; z
< box
.z
+ box
.depth
; z
++) {
195 zbase
= (z
* spr
->swr
.qpitch
+ box
.y
) * spr
->swr
.pitch
+
196 spr
->mip_offsets
[transfer
->level
];
197 sbase
= (z
* spr
->secondary
.qpitch
+ box
.y
) * spr
->secondary
.pitch
+
198 spr
->secondary_mip_offsets
[transfer
->level
];
199 for (int y
= box
.y
; y
< box
.y
+ box
.height
; y
++) {
200 if (spr
->base
.format
== PIPE_FORMAT_Z24_UNORM_S8_UINT
) {
201 for (int x
= box
.x
; x
< box
.x
+ box
.width
; x
++)
202 spr
->secondary
.pBaseAddress
[sbase
+ x
] =
203 spr
->swr
.pBaseAddress
[zbase
+ 4 * x
+ 3];
204 } else if (spr
->base
.format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
205 for (int x
= box
.x
; x
< box
.x
+ box
.width
; x
++)
206 spr
->secondary
.pBaseAddress
[sbase
+ x
] =
207 spr
->swr
.pBaseAddress
[zbase
+ 8 * x
+ 4];
209 zbase
+= spr
->swr
.pitch
;
210 sbase
+= spr
->secondary
.pitch
;
216 swr_transfer_unmap(struct pipe_context
*pipe
, struct pipe_transfer
*transfer
)
218 assert(transfer
->resource
);
220 struct swr_resource
*spr
= swr_resource(transfer
->resource
);
221 /* if we're mapping the depth/stencil, copy in stencil for the section
224 if (transfer
->usage
& PIPE_TRANSFER_WRITE
&&
225 !(transfer
->usage
& PIPE_TRANSFER_FLUSH_EXPLICIT
) &&
226 spr
->has_depth
&& spr
->has_stencil
) {
228 u_box_3d(0, 0, 0, transfer
->box
.width
, transfer
->box
.height
,
229 transfer
->box
.depth
, &box
);
230 swr_transfer_flush_region(pipe
, transfer
, &box
);
233 pipe_resource_reference(&transfer
->resource
, NULL
);
239 swr_resource_copy(struct pipe_context
*pipe
,
240 struct pipe_resource
*dst
,
245 struct pipe_resource
*src
,
247 const struct pipe_box
*src_box
)
249 struct swr_screen
*screen
= swr_screen(pipe
->screen
);
251 /* If either the src or dst is a renderTarget, store tiles before copy */
252 swr_store_dirty_resource(pipe
, src
, SWR_TILE_RESOLVED
);
253 swr_store_dirty_resource(pipe
, dst
, SWR_TILE_RESOLVED
);
255 swr_fence_finish(pipe
->screen
, NULL
, screen
->flush_fence
, 0);
256 swr_resource_unused(src
);
257 swr_resource_unused(dst
);
259 if ((dst
->target
== PIPE_BUFFER
&& src
->target
== PIPE_BUFFER
)
260 || (dst
->target
!= PIPE_BUFFER
&& src
->target
!= PIPE_BUFFER
)) {
261 util_resource_copy_region(
262 pipe
, dst
, dst_level
, dstx
, dsty
, dstz
, src
, src_level
, src_box
);
266 debug_printf("unhandled swr_resource_copy\n");
270 /* XXX: This resolve is incomplete and suboptimal. It will be removed once the
271 * pipelined resolve blit works. */
273 swr_do_msaa_resolve(struct pipe_resource
*src_resource
,
274 struct pipe_resource
*dst_resource
)
276 /* This is a pretty dumb inline resolve. It only supports 8-bit formats
277 * (ex RGBA8/BGRA8) - which are most common display formats anyway.
280 /* quick check for 8-bit and number of components */
281 uint8_t bits_per_component
=
282 util_format_get_component_bits(src_resource
->format
,
283 UTIL_FORMAT_COLORSPACE_RGB
, 0);
285 /* Unsupported resolve format */
286 assert(src_resource
->format
== dst_resource
->format
);
287 assert(bits_per_component
== 8);
288 if ((src_resource
->format
!= dst_resource
->format
) ||
289 (bits_per_component
!= 8)) {
293 uint8_t src_num_comps
= util_format_get_nr_components(src_resource
->format
);
295 SWR_SURFACE_STATE
*src_surface
= &swr_resource(src_resource
)->swr
;
296 SWR_SURFACE_STATE
*dst_surface
= &swr_resource(dst_resource
)->swr
;
298 uint32_t *src
, *dst
, offset
;
299 uint32_t num_samples
= src_surface
->numSamples
;
300 float recip_num_samples
= 1.0f
/ num_samples
;
301 for (uint32_t y
= 0; y
< src_surface
->height
; y
++) {
302 for (uint32_t x
= 0; x
< src_surface
->width
; x
++) {
307 for (uint32_t sampleNum
= 0; sampleNum
< num_samples
; sampleNum
++) {
308 offset
= ComputeSurfaceOffset
<false>(x
, y
, 0, 0, sampleNum
, 0, src_surface
);
309 src
= (uint32_t *) src_surface
->pBaseAddress
+ offset
/src_num_comps
;
310 const uint32_t sample
= *src
;
311 r
+= (float)((sample
>> 24) & 0xff) / 255.0f
* recip_num_samples
;
312 g
+= (float)((sample
>> 16) & 0xff) / 255.0f
* recip_num_samples
;
313 b
+= (float)((sample
>> 8) & 0xff) / 255.0f
* recip_num_samples
;
314 a
+= (float)((sample
) & 0xff) / 255.0f
* recip_num_samples
;
317 result
= ((uint8_t)(r
* 255.0f
) & 0xff) << 24;
318 result
|= ((uint8_t)(g
* 255.0f
) & 0xff) << 16;
319 result
|= ((uint8_t)(b
* 255.0f
) & 0xff) << 8;
320 result
|= ((uint8_t)(a
* 255.0f
) & 0xff);
321 offset
= ComputeSurfaceOffset
<false>(x
, y
, 0, 0, 0, 0, src_surface
);
322 dst
= (uint32_t *) dst_surface
->pBaseAddress
+ offset
/src_num_comps
;
330 swr_blit(struct pipe_context
*pipe
, const struct pipe_blit_info
*blit_info
)
332 struct swr_context
*ctx
= swr_context(pipe
);
333 /* Make a copy of the const blit_info, so we can modify it */
334 struct pipe_blit_info info
= *blit_info
;
336 if (info
.render_condition_enable
&& !swr_check_render_cond(pipe
))
339 if (info
.src
.resource
->nr_samples
> 1 && info
.dst
.resource
->nr_samples
<= 1
340 && !util_format_is_depth_or_stencil(info
.src
.resource
->format
)
341 && !util_format_is_pure_integer(info
.src
.resource
->format
)) {
342 debug_printf("swr_blit: color resolve : %d -> %d\n",
343 info
.src
.resource
->nr_samples
, info
.dst
.resource
->nr_samples
);
345 /* Because the resolve is being done inline (not pipelined),
346 * resources need to be stored out of hottiles and the pipeline empty.
348 * Resources are marked unused following fence finish because all
349 * pipeline operations are complete. Validation of the blit will mark
350 * them are read/write again.
352 swr_store_dirty_resource(pipe
, info
.src
.resource
, SWR_TILE_RESOLVED
);
353 swr_store_dirty_resource(pipe
, info
.dst
.resource
, SWR_TILE_RESOLVED
);
354 swr_fence_finish(pipe
->screen
, NULL
, swr_screen(pipe
->screen
)->flush_fence
, 0);
355 swr_resource_unused(info
.src
.resource
);
356 swr_resource_unused(info
.dst
.resource
);
358 struct pipe_resource
*src_resource
= info
.src
.resource
;
359 struct pipe_resource
*resolve_target
=
360 swr_resource(src_resource
)->resolve_target
;
362 /* Inline resolve samples into resolve target resource, then continue
364 swr_do_msaa_resolve(src_resource
, resolve_target
);
366 /* The resolve target becomes the new source for the blit. */
367 info
.src
.resource
= resolve_target
;
370 if (util_try_blit_via_copy_region(pipe
, &info
)) {
374 if (info
.mask
& PIPE_MASK_S
) {
375 debug_printf("swr: cannot blit stencil, skipping\n");
376 info
.mask
&= ~PIPE_MASK_S
;
379 if (!util_blitter_is_blit_supported(ctx
->blitter
, &info
)) {
380 debug_printf("swr: blit unsupported %s -> %s\n",
381 util_format_short_name(info
.src
.resource
->format
),
382 util_format_short_name(info
.dst
.resource
->format
));
386 if (ctx
->active_queries
) {
387 SwrEnableStatsFE(ctx
->swrContext
, FALSE
);
388 SwrEnableStatsBE(ctx
->swrContext
, FALSE
);
391 util_blitter_save_vertex_buffer_slot(ctx
->blitter
, ctx
->vertex_buffer
);
392 util_blitter_save_vertex_elements(ctx
->blitter
, (void *)ctx
->velems
);
393 util_blitter_save_vertex_shader(ctx
->blitter
, (void *)ctx
->vs
);
394 util_blitter_save_geometry_shader(ctx
->blitter
, (void*)ctx
->gs
);
395 util_blitter_save_so_targets(
398 (struct pipe_stream_output_target
**)ctx
->so_targets
);
399 util_blitter_save_rasterizer(ctx
->blitter
, (void *)ctx
->rasterizer
);
400 util_blitter_save_viewport(ctx
->blitter
, &ctx
->viewport
);
401 util_blitter_save_scissor(ctx
->blitter
, &ctx
->scissor
);
402 util_blitter_save_fragment_shader(ctx
->blitter
, ctx
->fs
);
403 util_blitter_save_blend(ctx
->blitter
, (void *)ctx
->blend
);
404 util_blitter_save_depth_stencil_alpha(ctx
->blitter
,
405 (void *)ctx
->depth_stencil
);
406 util_blitter_save_stencil_ref(ctx
->blitter
, &ctx
->stencil_ref
);
407 util_blitter_save_sample_mask(ctx
->blitter
, ctx
->sample_mask
);
408 util_blitter_save_framebuffer(ctx
->blitter
, &ctx
->framebuffer
);
409 util_blitter_save_fragment_sampler_states(
411 ctx
->num_samplers
[PIPE_SHADER_FRAGMENT
],
412 (void **)ctx
->samplers
[PIPE_SHADER_FRAGMENT
]);
413 util_blitter_save_fragment_sampler_views(
415 ctx
->num_sampler_views
[PIPE_SHADER_FRAGMENT
],
416 ctx
->sampler_views
[PIPE_SHADER_FRAGMENT
]);
417 util_blitter_save_render_condition(ctx
->blitter
,
418 ctx
->render_cond_query
,
419 ctx
->render_cond_cond
,
420 ctx
->render_cond_mode
);
422 util_blitter_blit(ctx
->blitter
, &info
);
424 if (ctx
->active_queries
) {
425 SwrEnableStatsFE(ctx
->swrContext
, TRUE
);
426 SwrEnableStatsBE(ctx
->swrContext
, TRUE
);
432 swr_destroy(struct pipe_context
*pipe
)
434 struct swr_context
*ctx
= swr_context(pipe
);
435 struct swr_screen
*screen
= swr_screen(pipe
->screen
);
438 util_blitter_destroy(ctx
->blitter
);
440 for (unsigned i
= 0; i
< PIPE_MAX_COLOR_BUFS
; i
++) {
441 pipe_surface_reference(&ctx
->framebuffer
.cbufs
[i
], NULL
);
444 pipe_surface_reference(&ctx
->framebuffer
.zsbuf
, NULL
);
446 for (unsigned i
= 0; i
< ARRAY_SIZE(ctx
->sampler_views
[0]); i
++) {
447 pipe_sampler_view_reference(&ctx
->sampler_views
[PIPE_SHADER_FRAGMENT
][i
], NULL
);
450 for (unsigned i
= 0; i
< ARRAY_SIZE(ctx
->sampler_views
[0]); i
++) {
451 pipe_sampler_view_reference(&ctx
->sampler_views
[PIPE_SHADER_VERTEX
][i
], NULL
);
454 if (ctx
->pipe
.stream_uploader
)
455 u_upload_destroy(ctx
->pipe
.stream_uploader
);
457 /* Idle core after destroying buffer resources, but before deleting
458 * context. Destroying resources has potentially called StoreTiles.*/
459 SwrWaitForIdle(ctx
->swrContext
);
462 SwrDestroyContext(ctx
->swrContext
);
464 delete ctx
->blendJIT
;
466 swr_destroy_scratch_buffers(ctx
);
468 /* Only update screen->pipe if current context is being destroyed */
470 if (screen
->pipe
== pipe
)
478 swr_render_condition(struct pipe_context
*pipe
,
479 struct pipe_query
*query
,
481 enum pipe_render_cond_flag mode
)
483 struct swr_context
*ctx
= swr_context(pipe
);
485 ctx
->render_cond_query
= query
;
486 ctx
->render_cond_mode
= mode
;
487 ctx
->render_cond_cond
= condition
;
491 swr_UpdateStats(HANDLE hPrivateContext
, const SWR_STATS
*pStats
)
493 swr_draw_context
*pDC
= (swr_draw_context
*)hPrivateContext
;
498 struct swr_query_result
*pqr
= (struct swr_query_result
*)pDC
->pStats
;
500 SWR_STATS
*pSwrStats
= &pqr
->core
;
502 pSwrStats
->DepthPassCount
+= pStats
->DepthPassCount
;
503 pSwrStats
->PsInvocations
+= pStats
->PsInvocations
;
504 pSwrStats
->CsInvocations
+= pStats
->CsInvocations
;
508 swr_UpdateStatsFE(HANDLE hPrivateContext
, const SWR_STATS_FE
*pStats
)
510 swr_draw_context
*pDC
= (swr_draw_context
*)hPrivateContext
;
515 struct swr_query_result
*pqr
= (struct swr_query_result
*)pDC
->pStats
;
517 SWR_STATS_FE
*pSwrStats
= &pqr
->coreFE
;
518 p_atomic_add(&pSwrStats
->IaVertices
, pStats
->IaVertices
);
519 p_atomic_add(&pSwrStats
->IaPrimitives
, pStats
->IaPrimitives
);
520 p_atomic_add(&pSwrStats
->VsInvocations
, pStats
->VsInvocations
);
521 p_atomic_add(&pSwrStats
->HsInvocations
, pStats
->HsInvocations
);
522 p_atomic_add(&pSwrStats
->DsInvocations
, pStats
->DsInvocations
);
523 p_atomic_add(&pSwrStats
->GsInvocations
, pStats
->GsInvocations
);
524 p_atomic_add(&pSwrStats
->CInvocations
, pStats
->CInvocations
);
525 p_atomic_add(&pSwrStats
->CPrimitives
, pStats
->CPrimitives
);
526 p_atomic_add(&pSwrStats
->GsPrimitives
, pStats
->GsPrimitives
);
528 for (unsigned i
= 0; i
< 4; i
++) {
529 p_atomic_add(&pSwrStats
->SoPrimStorageNeeded
[i
],
530 pStats
->SoPrimStorageNeeded
[i
]);
531 p_atomic_add(&pSwrStats
->SoNumPrimsWritten
[i
],
532 pStats
->SoNumPrimsWritten
[i
]);
536 struct pipe_context
*
537 swr_create_context(struct pipe_screen
*p_screen
, void *priv
, unsigned flags
)
539 struct swr_context
*ctx
= (struct swr_context
*)
540 AlignedMalloc(sizeof(struct swr_context
), KNOB_SIMD_BYTES
);
541 memset(ctx
, 0, sizeof(struct swr_context
));
544 new std::unordered_map
<BLEND_COMPILE_STATE
, PFN_BLEND_JIT_FUNC
>;
546 SWR_CREATECONTEXT_INFO createInfo
;
547 memset(&createInfo
, 0, sizeof(createInfo
));
548 createInfo
.privateStateSize
= sizeof(swr_draw_context
);
549 createInfo
.pfnLoadTile
= swr_LoadHotTile
;
550 createInfo
.pfnStoreTile
= swr_StoreHotTile
;
551 createInfo
.pfnClearTile
= swr_StoreHotTileClear
;
552 createInfo
.pfnUpdateStats
= swr_UpdateStats
;
553 createInfo
.pfnUpdateStatsFE
= swr_UpdateStatsFE
;
554 ctx
->swrContext
= SwrCreateContext(&createInfo
);
558 if (ctx
->swrContext
== NULL
)
561 ctx
->pipe
.screen
= p_screen
;
562 ctx
->pipe
.destroy
= swr_destroy
;
563 ctx
->pipe
.priv
= priv
;
564 ctx
->pipe
.create_surface
= swr_create_surface
;
565 ctx
->pipe
.surface_destroy
= swr_surface_destroy
;
566 ctx
->pipe
.transfer_map
= swr_transfer_map
;
567 ctx
->pipe
.transfer_unmap
= swr_transfer_unmap
;
568 ctx
->pipe
.transfer_flush_region
= swr_transfer_flush_region
;
570 ctx
->pipe
.buffer_subdata
= u_default_buffer_subdata
;
571 ctx
->pipe
.texture_subdata
= u_default_texture_subdata
;
573 ctx
->pipe
.clear_texture
= util_clear_texture
;
574 ctx
->pipe
.resource_copy_region
= swr_resource_copy
;
575 ctx
->pipe
.render_condition
= swr_render_condition
;
577 swr_state_init(&ctx
->pipe
);
578 swr_clear_init(&ctx
->pipe
);
579 swr_draw_init(&ctx
->pipe
);
580 swr_query_init(&ctx
->pipe
);
582 ctx
->pipe
.stream_uploader
= u_upload_create_default(&ctx
->pipe
);
583 if (!ctx
->pipe
.stream_uploader
)
585 ctx
->pipe
.const_uploader
= ctx
->pipe
.stream_uploader
;
587 ctx
->pipe
.blit
= swr_blit
;
588 ctx
->blitter
= util_blitter_create(&ctx
->pipe
);
592 swr_init_scratch_buffers(ctx
);
597 /* Should really validate the init steps and fail gracefully */
598 swr_destroy(&ctx
->pipe
);