2 * Copyright 2007 VMware, Inc.
3 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 * Common helper functions for PBO up- and downloads.
31 #include "state_tracker/st_context.h"
32 #include "state_tracker/st_nir.h"
33 #include "state_tracker/st_pbo.h"
34 #include "state_tracker/st_cb_bufferobjects.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_defines.h"
38 #include "pipe/p_screen.h"
39 #include "cso_cache/cso_context.h"
40 #include "tgsi/tgsi_ureg.h"
41 #include "util/u_format.h"
42 #include "util/u_inlines.h"
43 #include "util/u_upload_mgr.h"
45 #include "compiler/nir/nir_builder.h"
47 /* Conversion to apply in the fragment shader. */
48 enum st_pbo_conversion
{
49 ST_PBO_CONVERT_NONE
= 0,
50 ST_PBO_CONVERT_UINT_TO_SINT
,
51 ST_PBO_CONVERT_SINT_TO_UINT
,
53 ST_NUM_PBO_CONVERSIONS
56 /* Final setup of buffer addressing information.
58 * buf_offset is in pixels.
60 * Returns false if something (e.g. alignment) prevents PBO upload/download.
63 st_pbo_addresses_setup(struct st_context
*st
,
64 struct pipe_resource
*buf
, intptr_t buf_offset
,
65 struct st_pbo_addresses
*addr
)
69 /* Check alignment against texture buffer requirements. */
71 unsigned ofs
= (buf_offset
* addr
->bytes_per_pixel
) % st
->ctx
->Const
.TextureBufferOffsetAlignment
;
73 if (ofs
% addr
->bytes_per_pixel
!= 0)
76 skip_pixels
= ofs
/ addr
->bytes_per_pixel
;
77 buf_offset
-= skip_pixels
;
83 assert(buf_offset
>= 0);
86 addr
->first_element
= buf_offset
;
87 addr
->last_element
= buf_offset
+ skip_pixels
+ addr
->width
- 1
88 + (addr
->height
- 1 + (addr
->depth
- 1) * addr
->image_height
) * addr
->pixels_per_row
;
90 if (addr
->last_element
- addr
->first_element
> st
->ctx
->Const
.MaxTextureBufferSize
- 1)
93 /* This should be ensured by Mesa before calling our callbacks */
94 assert((addr
->last_element
+ 1) * addr
->bytes_per_pixel
<= buf
->width0
);
96 addr
->constants
.xoffset
= -addr
->xoffset
+ skip_pixels
;
97 addr
->constants
.yoffset
= -addr
->yoffset
;
98 addr
->constants
.stride
= addr
->pixels_per_row
;
99 addr
->constants
.image_size
= addr
->pixels_per_row
* addr
->image_height
;
100 addr
->constants
.layer_offset
= 0;
105 /* Validate and fill buffer addressing information based on GL pixelstore
108 * Returns false if some aspect of the addressing (e.g. alignment) prevents
109 * PBO upload/download.
112 st_pbo_addresses_pixelstore(struct st_context
*st
,
113 GLenum gl_target
, bool skip_images
,
114 const struct gl_pixelstore_attrib
*store
,
116 struct st_pbo_addresses
*addr
)
118 struct pipe_resource
*buf
= st_buffer_object(store
->BufferObj
)->buffer
;
119 intptr_t buf_offset
= (intptr_t) pixels
;
121 if (buf_offset
% addr
->bytes_per_pixel
)
124 /* Convert to texels */
125 buf_offset
= buf_offset
/ addr
->bytes_per_pixel
;
127 /* Determine image height */
128 if (gl_target
== GL_TEXTURE_1D_ARRAY
) {
129 addr
->image_height
= 1;
131 addr
->image_height
= store
->ImageHeight
> 0 ? store
->ImageHeight
: addr
->height
;
134 /* Compute the stride, taking store->Alignment into account */
136 unsigned pixels_per_row
= store
->RowLength
> 0 ?
137 store
->RowLength
: addr
->width
;
138 unsigned bytes_per_row
= pixels_per_row
* addr
->bytes_per_pixel
;
139 unsigned remainder
= bytes_per_row
% store
->Alignment
;
140 unsigned offset_rows
;
143 bytes_per_row
+= store
->Alignment
- remainder
;
145 if (bytes_per_row
% addr
->bytes_per_pixel
)
148 addr
->pixels_per_row
= bytes_per_row
/ addr
->bytes_per_pixel
;
150 offset_rows
= store
->SkipRows
;
152 offset_rows
+= addr
->image_height
* store
->SkipImages
;
154 buf_offset
+= store
->SkipPixels
+ addr
->pixels_per_row
* offset_rows
;
157 if (!st_pbo_addresses_setup(st
, buf
, buf_offset
, addr
))
160 /* Support GL_PACK_INVERT_MESA */
162 addr
->constants
.xoffset
+= (addr
->height
- 1) * addr
->constants
.stride
;
163 addr
->constants
.stride
= -addr
->constants
.stride
;
169 /* For download from a framebuffer, we may have to invert the Y axis. The
170 * setup is as follows:
171 * - set viewport to inverted, so that the position sysval is correct for
173 * - this function adjusts the fragment shader's constant buffer to compute
174 * the correct destination addresses.
177 st_pbo_addresses_invert_y(struct st_pbo_addresses
*addr
,
178 unsigned viewport_height
)
180 addr
->constants
.xoffset
+=
181 (viewport_height
- 1 + 2 * addr
->constants
.yoffset
) * addr
->constants
.stride
;
182 addr
->constants
.stride
= -addr
->constants
.stride
;
185 /* Setup all vertex pipeline state, rasterizer state, and fragment shader
186 * constants, and issue the draw call for PBO upload/download.
188 * The caller is responsible for saving and restoring state, as well as for
189 * setting other fragment shader state (fragment shader, samplers), and
190 * framebuffer/viewport/DSA/blend state.
193 st_pbo_draw(struct st_context
*st
, const struct st_pbo_addresses
*addr
,
194 unsigned surface_width
, unsigned surface_height
)
196 struct cso_context
*cso
= st
->cso_context
;
198 /* Setup vertex and geometry shaders */
200 st
->pbo
.vs
= st_pbo_create_vs(st
);
205 if (addr
->depth
!= 1 && st
->pbo
.use_gs
&& !st
->pbo
.gs
) {
206 st
->pbo
.gs
= st_pbo_create_gs(st
);
211 cso_set_vertex_shader_handle(cso
, st
->pbo
.vs
);
213 cso_set_geometry_shader_handle(cso
, addr
->depth
!= 1 ? st
->pbo
.gs
: NULL
);
215 cso_set_tessctrl_shader_handle(cso
, NULL
);
217 cso_set_tesseval_shader_handle(cso
, NULL
);
219 /* Upload vertices */
221 struct pipe_vertex_buffer vbo
= {0};
222 struct pipe_vertex_element velem
;
224 float x0
= (float) addr
->xoffset
/ surface_width
* 2.0f
- 1.0f
;
225 float y0
= (float) addr
->yoffset
/ surface_height
* 2.0f
- 1.0f
;
226 float x1
= (float) (addr
->xoffset
+ addr
->width
) / surface_width
* 2.0f
- 1.0f
;
227 float y1
= (float) (addr
->yoffset
+ addr
->height
) / surface_height
* 2.0f
- 1.0f
;
231 vbo
.stride
= 2 * sizeof(float);
233 u_upload_alloc(st
->pipe
->stream_uploader
, 0, 8 * sizeof(float), 4,
234 &vbo
.buffer_offset
, &vbo
.buffer
.resource
, (void **) &verts
);
247 u_upload_unmap(st
->pipe
->stream_uploader
);
249 velem
.src_offset
= 0;
250 velem
.instance_divisor
= 0;
251 velem
.vertex_buffer_index
= 0;
252 velem
.src_format
= PIPE_FORMAT_R32G32_FLOAT
;
254 cso_set_vertex_elements(cso
, 1, &velem
);
256 cso_set_vertex_buffers(cso
, velem
.vertex_buffer_index
, 1, &vbo
);
258 pipe_resource_reference(&vbo
.buffer
.resource
, NULL
);
261 /* Upload constants */
263 struct pipe_constant_buffer cb
;
266 cb
.user_buffer
= &addr
->constants
;
267 cb
.buffer_offset
= 0;
268 cb
.buffer_size
= sizeof(addr
->constants
);
270 cso_set_constant_buffer(cso
, PIPE_SHADER_FRAGMENT
, 0, &cb
);
272 pipe_resource_reference(&cb
.buffer
, NULL
);
275 /* Rasterizer state */
276 cso_set_rasterizer(cso
, &st
->pbo
.raster
);
278 /* Disable stream output */
279 cso_set_stream_outputs(cso
, 0, NULL
, 0);
281 if (addr
->depth
== 1) {
282 cso_draw_arrays(cso
, PIPE_PRIM_TRIANGLE_STRIP
, 0, 4);
284 cso_draw_arrays_instanced(cso
, PIPE_PRIM_TRIANGLE_STRIP
,
285 0, 4, 0, addr
->depth
);
292 st_pbo_create_vs(struct st_context
*st
)
294 struct pipe_screen
*pscreen
= st
->pipe
->screen
;
295 bool use_nir
= PIPE_SHADER_IR_NIR
==
296 pscreen
->get_shader_param(pscreen
, PIPE_SHADER_VERTEX
,
297 PIPE_SHADER_CAP_PREFERRED_IR
);
300 unsigned inputs
[] = { VERT_ATTRIB_POS
, SYSTEM_VALUE_INSTANCE_ID
, };
301 unsigned outputs
[] = { VARYING_SLOT_POS
, VARYING_SLOT_LAYER
};
303 return st_nir_make_passthrough_shader(st
, "st/pbo VS",
305 st
->pbo
.layers
? 2 : 1,
306 inputs
, outputs
, NULL
, (1 << 1));
309 struct ureg_program
*ureg
;
310 struct ureg_src in_pos
;
311 struct ureg_src in_instanceid
;
312 struct ureg_dst out_pos
;
313 struct ureg_dst out_layer
;
315 ureg
= ureg_create(PIPE_SHADER_VERTEX
);
319 in_pos
= ureg_DECL_vs_input(ureg
, TGSI_SEMANTIC_POSITION
);
321 out_pos
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_POSITION
, 0);
323 if (st
->pbo
.layers
) {
324 in_instanceid
= ureg_DECL_system_value(ureg
, TGSI_SEMANTIC_INSTANCEID
, 0);
327 out_layer
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_LAYER
, 0);
330 /* out_pos = in_pos */
331 ureg_MOV(ureg
, out_pos
, in_pos
);
333 if (st
->pbo
.layers
) {
334 if (st
->pbo
.use_gs
) {
335 /* out_pos.z = i2f(gl_InstanceID) */
336 ureg_I2F(ureg
, ureg_writemask(out_pos
, TGSI_WRITEMASK_Z
),
337 ureg_scalar(in_instanceid
, TGSI_SWIZZLE_X
));
339 /* out_layer = gl_InstanceID */
340 ureg_MOV(ureg
, ureg_writemask(out_layer
, TGSI_WRITEMASK_X
),
341 ureg_scalar(in_instanceid
, TGSI_SWIZZLE_X
));
347 return ureg_create_shader_and_destroy(ureg
, st
->pipe
);
351 st_pbo_create_gs(struct st_context
*st
)
353 static const int zero
= 0;
354 struct ureg_program
*ureg
;
355 struct ureg_dst out_pos
;
356 struct ureg_dst out_layer
;
357 struct ureg_src in_pos
;
361 ureg
= ureg_create(PIPE_SHADER_GEOMETRY
);
365 ureg_property(ureg
, TGSI_PROPERTY_GS_INPUT_PRIM
, PIPE_PRIM_TRIANGLES
);
366 ureg_property(ureg
, TGSI_PROPERTY_GS_OUTPUT_PRIM
, PIPE_PRIM_TRIANGLE_STRIP
);
367 ureg_property(ureg
, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES
, 3);
369 out_pos
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_POSITION
, 0);
370 out_layer
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_LAYER
, 0);
372 in_pos
= ureg_DECL_input(ureg
, TGSI_SEMANTIC_POSITION
, 0, 0, 1);
374 imm
= ureg_DECL_immediate_int(ureg
, &zero
, 1);
376 for (i
= 0; i
< 3; ++i
) {
377 struct ureg_src in_pos_vertex
= ureg_src_dimension(in_pos
, i
);
379 /* out_pos = in_pos[i] */
380 ureg_MOV(ureg
, out_pos
, in_pos_vertex
);
382 /* out_layer.x = f2i(in_pos[i].z) */
383 ureg_F2I(ureg
, ureg_writemask(out_layer
, TGSI_WRITEMASK_X
),
384 ureg_scalar(in_pos_vertex
, TGSI_SWIZZLE_Z
));
386 ureg_EMIT(ureg
, ureg_scalar(imm
, TGSI_SWIZZLE_X
));
391 return ureg_create_shader_and_destroy(ureg
, st
->pipe
);
395 build_conversion(struct ureg_program
*ureg
, const struct ureg_dst
*temp
,
396 enum st_pbo_conversion conversion
)
398 switch (conversion
) {
399 case ST_PBO_CONVERT_SINT_TO_UINT
:
400 ureg_IMAX(ureg
, *temp
, ureg_src(*temp
), ureg_imm1i(ureg
, 0));
402 case ST_PBO_CONVERT_UINT_TO_SINT
:
403 ureg_UMIN(ureg
, *temp
, ureg_src(*temp
), ureg_imm1u(ureg
, (1u << 31) - 1));
411 static const struct glsl_type
*
412 sampler_type_for_target(enum pipe_texture_target target
)
414 bool is_array
= target
>= PIPE_TEXTURE_1D_ARRAY
;
415 static const enum glsl_sampler_dim dim
[] = {
416 [PIPE_BUFFER
] = GLSL_SAMPLER_DIM_BUF
,
417 [PIPE_TEXTURE_1D
] = GLSL_SAMPLER_DIM_1D
,
418 [PIPE_TEXTURE_2D
] = GLSL_SAMPLER_DIM_2D
,
419 [PIPE_TEXTURE_3D
] = GLSL_SAMPLER_DIM_3D
,
420 [PIPE_TEXTURE_CUBE
] = GLSL_SAMPLER_DIM_CUBE
,
421 [PIPE_TEXTURE_RECT
] = GLSL_SAMPLER_DIM_RECT
,
422 [PIPE_TEXTURE_1D_ARRAY
] = GLSL_SAMPLER_DIM_1D
,
423 [PIPE_TEXTURE_2D_ARRAY
] = GLSL_SAMPLER_DIM_2D
,
424 [PIPE_TEXTURE_CUBE_ARRAY
] = GLSL_SAMPLER_DIM_CUBE
,
427 return glsl_sampler_type(dim
[target
], false, is_array
, GLSL_TYPE_FLOAT
);
431 create_fs_nir(struct st_context
*st
,
433 enum pipe_texture_target target
,
434 enum st_pbo_conversion conversion
)
436 struct pipe_screen
*screen
= st
->pipe
->screen
;
437 struct nir_builder b
;
438 const nir_shader_compiler_options
*options
=
439 st
->ctx
->Const
.ShaderCompilerOptions
[MESA_SHADER_FRAGMENT
].NirOptions
;
441 screen
->get_param(screen
, PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL
);
443 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_FRAGMENT
, options
);
445 nir_ssa_def
*zero
= nir_imm_int(&b
, 0);
447 /* param = [ -xoffset + skip_pixels, -yoffset, stride, image_height ] */
448 nir_variable
*param_var
=
449 nir_variable_create(b
.shader
, nir_var_uniform
, glsl_vec4_type(), "param");
450 nir_ssa_def
*param
= nir_load_var(&b
, param_var
);
452 nir_variable
*fragcoord
=
453 nir_variable_create(b
.shader
, pos_is_sysval
? nir_var_system_value
:
454 nir_var_shader_in
, glsl_vec4_type(), "gl_FragCoord");
455 fragcoord
->data
.location
= pos_is_sysval
? SYSTEM_VALUE_FRAG_COORD
457 nir_ssa_def
*coord
= nir_load_var(&b
, fragcoord
);
459 nir_ssa_def
*layer
= NULL
;
460 if (st
->pbo
.layers
&& (!download
|| target
== PIPE_TEXTURE_1D_ARRAY
||
461 target
== PIPE_TEXTURE_2D_ARRAY
||
462 target
== PIPE_TEXTURE_3D
||
463 target
== PIPE_TEXTURE_CUBE
||
464 target
== PIPE_TEXTURE_CUBE_ARRAY
)) {
465 nir_variable
*var
= nir_variable_create(b
.shader
, nir_var_shader_in
,
466 glsl_int_type(), "gl_Layer");
467 var
->data
.location
= VARYING_SLOT_LAYER
;
468 var
->data
.interpolation
= INTERP_MODE_FLAT
;
469 layer
= nir_load_var(&b
, var
);
472 /* offset_pos = param.xy + f2i(coord.xy) */
473 nir_ssa_def
*offset_pos
=
474 nir_iadd(&b
, nir_channels(&b
, param
, TGSI_WRITEMASK_XY
),
475 nir_f2i32(&b
, nir_channels(&b
, coord
, TGSI_WRITEMASK_XY
)));
477 /* addr = offset_pos.x + offset_pos.y * stride */
478 nir_ssa_def
*pbo_addr
=
479 nir_iadd(&b
, nir_channel(&b
, offset_pos
, 0),
480 nir_imul(&b
, nir_channel(&b
, offset_pos
, 1),
481 nir_channel(&b
, param
, 2)));
483 /* pbo_addr += image_height * layer */
484 pbo_addr
= nir_iadd(&b
, pbo_addr
,
485 nir_imul(&b
, layer
, nir_channel(&b
, param
, 3)));
488 nir_ssa_def
*texcoord
;
490 texcoord
= nir_f2i32(&b
, nir_channels(&b
, coord
, TGSI_WRITEMASK_XY
));
493 nir_ssa_def
*src_layer
= layer
;
495 if (target
== PIPE_TEXTURE_3D
) {
496 nir_variable
*layer_offset_var
=
497 nir_variable_create(b
.shader
, nir_var_uniform
,
498 glsl_int_type(), "layer_offset");
499 layer_offset_var
->data
.driver_location
= 4;
500 nir_ssa_def
*layer_offset
= nir_load_var(&b
, layer_offset_var
);
502 src_layer
= nir_iadd(&b
, layer
, layer_offset
);
505 texcoord
= nir_vec3(&b
, nir_channel(&b
, texcoord
, 0),
506 nir_channel(&b
, texcoord
, 1),
513 nir_variable
*tex_var
=
514 nir_variable_create(b
.shader
, nir_var_uniform
,
515 sampler_type_for_target(target
), "tex");
516 tex_var
->data
.explicit_binding
= true;
517 tex_var
->data
.binding
= 0;
519 nir_deref_instr
*tex_deref
= nir_build_deref_var(&b
, tex_var
);
521 nir_tex_instr
*tex
= nir_tex_instr_create(b
.shader
, 3);
522 tex
->op
= nir_texop_txf
;
523 tex
->sampler_dim
= glsl_get_sampler_dim(tex_var
->type
);
524 tex
->coord_components
=
525 glsl_get_sampler_coordinate_components(tex_var
->type
);
526 tex
->dest_type
= nir_type_float
;
527 tex
->src
[0].src_type
= nir_tex_src_texture_deref
;
528 tex
->src
[0].src
= nir_src_for_ssa(&tex_deref
->dest
.ssa
);
529 tex
->src
[1].src_type
= nir_tex_src_sampler_deref
;
530 tex
->src
[1].src
= nir_src_for_ssa(&tex_deref
->dest
.ssa
);
531 tex
->src
[2].src_type
= nir_tex_src_coord
;
532 tex
->src
[2].src
= nir_src_for_ssa(texcoord
);
533 nir_ssa_dest_init(&tex
->instr
, &tex
->dest
, 4, 32, NULL
);
534 nir_builder_instr_insert(&b
, &tex
->instr
);
535 nir_ssa_def
*result
= &tex
->dest
.ssa
;
537 if (conversion
== ST_PBO_CONVERT_SINT_TO_UINT
)
538 result
= nir_imax(&b
, result
, zero
);
539 else if (conversion
== ST_PBO_CONVERT_UINT_TO_SINT
)
540 result
= nir_umin(&b
, result
, nir_imm_int(&b
, (1u << 31) - 1));
543 nir_variable
*img_var
=
544 nir_variable_create(b
.shader
, nir_var_uniform
,
545 glsl_image_type(GLSL_SAMPLER_DIM_BUF
, false,
546 GLSL_TYPE_FLOAT
), "img");
547 img_var
->data
.image
.access
= ACCESS_NON_READABLE
;
548 img_var
->data
.explicit_binding
= true;
549 img_var
->data
.binding
= 0;
550 nir_deref_instr
*img_deref
= nir_build_deref_var(&b
, img_var
);
551 nir_intrinsic_instr
*intrin
=
552 nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_image_deref_store
);
553 intrin
->src
[0] = nir_src_for_ssa(&img_deref
->dest
.ssa
);
555 nir_src_for_ssa(nir_vec4(&b
, pbo_addr
, zero
, zero
, zero
));
556 intrin
->src
[2] = nir_src_for_ssa(zero
);
557 intrin
->src
[3] = nir_src_for_ssa(result
);
558 intrin
->num_components
= 4;
559 nir_builder_instr_insert(&b
, &intrin
->instr
);
561 nir_variable
*color
=
562 nir_variable_create(b
.shader
, nir_var_shader_out
, glsl_vec4_type(),
564 color
->data
.location
= FRAG_RESULT_COLOR
;
566 nir_store_var(&b
, color
, result
, TGSI_WRITEMASK_XYZW
);
569 return st_nir_finish_builtin_shader(st
, b
.shader
, download
?
570 "st/pbo download FS" :
575 create_fs_tgsi(struct st_context
*st
, bool download
,
576 enum pipe_texture_target target
,
577 enum st_pbo_conversion conversion
)
579 struct pipe_context
*pipe
= st
->pipe
;
580 struct pipe_screen
*screen
= pipe
->screen
;
581 struct ureg_program
*ureg
;
584 struct ureg_src sampler
;
586 struct ureg_src layer
;
587 struct ureg_src const0
;
588 struct ureg_src const1
;
589 struct ureg_dst temp0
;
593 (!download
|| target
== PIPE_TEXTURE_1D_ARRAY
594 || target
== PIPE_TEXTURE_2D_ARRAY
595 || target
== PIPE_TEXTURE_3D
596 || target
== PIPE_TEXTURE_CUBE
597 || target
== PIPE_TEXTURE_CUBE_ARRAY
);
599 ureg
= ureg_create(PIPE_SHADER_FRAGMENT
);
604 out
= ureg_DECL_output(ureg
, TGSI_SEMANTIC_COLOR
, 0);
606 struct ureg_src image
;
608 /* writeonly images do not require an explicitly given format. */
609 image
= ureg_DECL_image(ureg
, 0, TGSI_TEXTURE_BUFFER
, PIPE_FORMAT_NONE
,
611 out
= ureg_dst(image
);
614 sampler
= ureg_DECL_sampler(ureg
, 0);
615 if (screen
->get_param(screen
, PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL
)) {
616 pos
= ureg_DECL_system_value(ureg
, TGSI_SEMANTIC_POSITION
, 0);
618 pos
= ureg_DECL_fs_input(ureg
, TGSI_SEMANTIC_POSITION
, 0,
619 TGSI_INTERPOLATE_LINEAR
);
622 layer
= ureg_DECL_fs_input(ureg
, TGSI_SEMANTIC_LAYER
, 0,
623 TGSI_INTERPOLATE_CONSTANT
);
625 const0
= ureg_DECL_constant(ureg
, 0);
626 const1
= ureg_DECL_constant(ureg
, 1);
627 temp0
= ureg_DECL_temporary(ureg
);
629 /* Note: const0 = [ -xoffset + skip_pixels, -yoffset, stride, image_height ] */
631 /* temp0.xy = f2i(temp0.xy) */
632 ureg_F2I(ureg
, ureg_writemask(temp0
, TGSI_WRITEMASK_XY
),
634 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_Y
,
635 TGSI_SWIZZLE_Y
, TGSI_SWIZZLE_Y
));
637 /* temp0.xy = temp0.xy + const0.xy */
638 ureg_UADD(ureg
, ureg_writemask(temp0
, TGSI_WRITEMASK_XY
),
639 ureg_swizzle(ureg_src(temp0
),
640 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_Y
,
641 TGSI_SWIZZLE_Y
, TGSI_SWIZZLE_Y
),
643 TGSI_SWIZZLE_X
, TGSI_SWIZZLE_Y
,
644 TGSI_SWIZZLE_Y
, TGSI_SWIZZLE_Y
));
646 /* temp0.x = const0.z * temp0.y + temp0.x */
647 ureg_UMAD(ureg
, ureg_writemask(temp0
, TGSI_WRITEMASK_X
),
648 ureg_scalar(const0
, TGSI_SWIZZLE_Z
),
649 ureg_scalar(ureg_src(temp0
), TGSI_SWIZZLE_Y
),
650 ureg_scalar(ureg_src(temp0
), TGSI_SWIZZLE_X
));
653 /* temp0.x = const0.w * layer + temp0.x */
654 ureg_UMAD(ureg
, ureg_writemask(temp0
, TGSI_WRITEMASK_X
),
655 ureg_scalar(const0
, TGSI_SWIZZLE_W
),
656 ureg_scalar(layer
, TGSI_SWIZZLE_X
),
657 ureg_scalar(ureg_src(temp0
), TGSI_SWIZZLE_X
));
661 ureg_MOV(ureg
, ureg_writemask(temp0
, TGSI_WRITEMASK_W
), ureg_imm1u(ureg
, 0));
664 struct ureg_dst temp1
;
665 struct ureg_src op
[2];
667 temp1
= ureg_DECL_temporary(ureg
);
669 /* temp1.xy = pos.xy */
670 ureg_F2I(ureg
, ureg_writemask(temp1
, TGSI_WRITEMASK_XY
), pos
);
673 ureg_MOV(ureg
, ureg_writemask(temp1
, TGSI_WRITEMASK_ZW
), ureg_imm1u(ureg
, 0));
676 struct ureg_dst temp1_layer
=
677 ureg_writemask(temp1
, target
== PIPE_TEXTURE_1D_ARRAY
? TGSI_WRITEMASK_Y
680 /* temp1.y/z = layer */
681 ureg_MOV(ureg
, temp1_layer
, ureg_scalar(layer
, TGSI_SWIZZLE_X
));
683 if (target
== PIPE_TEXTURE_3D
) {
684 /* temp1.z += layer_offset */
685 ureg_UADD(ureg
, temp1_layer
,
686 ureg_scalar(ureg_src(temp1
), TGSI_SWIZZLE_Z
),
687 ureg_scalar(const1
, TGSI_SWIZZLE_X
));
691 /* temp1 = txf(sampler, temp1) */
692 ureg_TXF(ureg
, temp1
, util_pipe_tex_to_tgsi_tex(target
, 1),
693 ureg_src(temp1
), sampler
);
695 build_conversion(ureg
, &temp1
, conversion
);
697 /* store(out, temp0, temp1) */
698 op
[0] = ureg_src(temp0
);
699 op
[1] = ureg_src(temp1
);
700 ureg_memory_insn(ureg
, TGSI_OPCODE_STORE
, &out
, 1, op
, 2, 0,
701 TGSI_TEXTURE_BUFFER
, PIPE_FORMAT_NONE
);
703 ureg_release_temporary(ureg
, temp1
);
705 /* out = txf(sampler, temp0.x) */
706 ureg_TXF(ureg
, temp0
, TGSI_TEXTURE_BUFFER
, ureg_src(temp0
), sampler
);
708 build_conversion(ureg
, &temp0
, conversion
);
710 ureg_MOV(ureg
, out
, ureg_src(temp0
));
713 ureg_release_temporary(ureg
, temp0
);
717 return ureg_create_shader_and_destroy(ureg
, pipe
);
721 create_fs(struct st_context
*st
, bool download
,
722 enum pipe_texture_target target
,
723 enum st_pbo_conversion conversion
)
725 struct pipe_screen
*pscreen
= st
->pipe
->screen
;
726 bool use_nir
= PIPE_SHADER_IR_NIR
==
727 pscreen
->get_shader_param(pscreen
, PIPE_SHADER_VERTEX
,
728 PIPE_SHADER_CAP_PREFERRED_IR
);
731 return create_fs_nir(st
, download
, target
, conversion
);
733 return create_fs_tgsi(st
, download
, target
, conversion
);
736 static enum st_pbo_conversion
737 get_pbo_conversion(enum pipe_format src_format
, enum pipe_format dst_format
)
739 if (util_format_is_pure_uint(src_format
)) {
740 if (util_format_is_pure_sint(dst_format
))
741 return ST_PBO_CONVERT_UINT_TO_SINT
;
742 } else if (util_format_is_pure_sint(src_format
)) {
743 if (util_format_is_pure_uint(dst_format
))
744 return ST_PBO_CONVERT_SINT_TO_UINT
;
747 return ST_PBO_CONVERT_NONE
;
751 st_pbo_get_upload_fs(struct st_context
*st
,
752 enum pipe_format src_format
,
753 enum pipe_format dst_format
)
755 STATIC_ASSERT(ARRAY_SIZE(st
->pbo
.upload_fs
) == ST_NUM_PBO_CONVERSIONS
);
757 enum st_pbo_conversion conversion
= get_pbo_conversion(src_format
, dst_format
);
759 if (!st
->pbo
.upload_fs
[conversion
])
760 st
->pbo
.upload_fs
[conversion
] = create_fs(st
, false, 0, conversion
);
762 return st
->pbo
.upload_fs
[conversion
];
766 st_pbo_get_download_fs(struct st_context
*st
, enum pipe_texture_target target
,
767 enum pipe_format src_format
,
768 enum pipe_format dst_format
)
770 STATIC_ASSERT(ARRAY_SIZE(st
->pbo
.download_fs
) == ST_NUM_PBO_CONVERSIONS
);
771 assert(target
< PIPE_MAX_TEXTURE_TYPES
);
773 enum st_pbo_conversion conversion
= get_pbo_conversion(src_format
, dst_format
);
775 if (!st
->pbo
.download_fs
[conversion
][target
])
776 st
->pbo
.download_fs
[conversion
][target
] = create_fs(st
, true, target
, conversion
);
778 return st
->pbo
.download_fs
[conversion
][target
];
782 st_init_pbo_helpers(struct st_context
*st
)
784 struct pipe_context
*pipe
= st
->pipe
;
785 struct pipe_screen
*screen
= pipe
->screen
;
787 st
->pbo
.upload_enabled
=
788 screen
->get_param(screen
, PIPE_CAP_TEXTURE_BUFFER_OBJECTS
) &&
789 screen
->get_param(screen
, PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT
) >= 1 &&
790 screen
->get_shader_param(screen
, PIPE_SHADER_FRAGMENT
, PIPE_SHADER_CAP_INTEGERS
);
791 if (!st
->pbo
.upload_enabled
)
794 st
->pbo
.download_enabled
=
795 st
->pbo
.upload_enabled
&&
796 screen
->get_param(screen
, PIPE_CAP_SAMPLER_VIEW_TARGET
) &&
797 screen
->get_param(screen
, PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT
) &&
798 screen
->get_shader_param(screen
, PIPE_SHADER_FRAGMENT
,
799 PIPE_SHADER_CAP_MAX_SHADER_IMAGES
) >= 1;
802 screen
->get_param(screen
, PIPE_CAP_BUFFER_SAMPLER_VIEW_RGBA_ONLY
);
804 if (screen
->get_param(screen
, PIPE_CAP_TGSI_INSTANCEID
)) {
805 if (screen
->get_param(screen
, PIPE_CAP_TGSI_VS_LAYER_VIEWPORT
)) {
806 st
->pbo
.layers
= true;
807 } else if (screen
->get_param(screen
, PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES
) >= 3) {
808 st
->pbo
.layers
= true;
809 st
->pbo
.use_gs
= true;
814 memset(&st
->pbo
.upload_blend
, 0, sizeof(struct pipe_blend_state
));
815 st
->pbo
.upload_blend
.rt
[0].colormask
= PIPE_MASK_RGBA
;
817 /* Rasterizer state */
818 memset(&st
->pbo
.raster
, 0, sizeof(struct pipe_rasterizer_state
));
819 st
->pbo
.raster
.half_pixel_center
= 1;
823 st_destroy_pbo_helpers(struct st_context
*st
)
827 for (i
= 0; i
< ARRAY_SIZE(st
->pbo
.upload_fs
); ++i
) {
828 if (st
->pbo
.upload_fs
[i
]) {
829 cso_delete_fragment_shader(st
->cso_context
, st
->pbo
.upload_fs
[i
]);
830 st
->pbo
.upload_fs
[i
] = NULL
;
834 for (i
= 0; i
< ARRAY_SIZE(st
->pbo
.download_fs
); ++i
) {
835 for (unsigned j
= 0; j
< ARRAY_SIZE(st
->pbo
.download_fs
[0]); ++j
) {
836 if (st
->pbo
.download_fs
[i
][j
]) {
837 cso_delete_fragment_shader(st
->cso_context
, st
->pbo
.download_fs
[i
][j
]);
838 st
->pbo
.download_fs
[i
][j
] = NULL
;
844 cso_delete_geometry_shader(st
->cso_context
, st
->pbo
.gs
);
849 cso_delete_vertex_shader(st
->cso_context
, st
->pbo
.vs
);