gallium: decrease the size of pipe_vertex_buffer - 24 -> 16 bytes
[mesa.git] / src / mesa / state_tracker / st_pbo.c
1 /*
2 * Copyright 2007 VMware, Inc.
3 * Copyright 2016 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 /**
26 * \file
27 *
28 * Common helper functions for PBO up- and downloads.
29 */
30
31 #include "state_tracker/st_context.h"
32 #include "state_tracker/st_pbo.h"
33 #include "state_tracker/st_cb_bufferobjects.h"
34
35 #include "pipe/p_context.h"
36 #include "pipe/p_defines.h"
37 #include "pipe/p_screen.h"
38 #include "cso_cache/cso_context.h"
39 #include "tgsi/tgsi_ureg.h"
40 #include "util/u_format.h"
41 #include "util/u_inlines.h"
42 #include "util/u_upload_mgr.h"
43
44 /* Conversion to apply in the fragment shader. */
45 enum st_pbo_conversion {
46 ST_PBO_CONVERT_NONE = 0,
47 ST_PBO_CONVERT_UINT_TO_SINT,
48 ST_PBO_CONVERT_SINT_TO_UINT,
49
50 ST_NUM_PBO_CONVERSIONS
51 };
52
53 /* Final setup of buffer addressing information.
54 *
55 * buf_offset is in pixels.
56 *
57 * Returns false if something (e.g. alignment) prevents PBO upload/download.
58 */
59 bool
60 st_pbo_addresses_setup(struct st_context *st,
61 struct pipe_resource *buf, intptr_t buf_offset,
62 struct st_pbo_addresses *addr)
63 {
64 unsigned skip_pixels;
65
66 /* Check alignment against texture buffer requirements. */
67 {
68 unsigned ofs = (buf_offset * addr->bytes_per_pixel) % st->ctx->Const.TextureBufferOffsetAlignment;
69 if (ofs != 0) {
70 if (ofs % addr->bytes_per_pixel != 0)
71 return false;
72
73 skip_pixels = ofs / addr->bytes_per_pixel;
74 buf_offset -= skip_pixels;
75 } else {
76 skip_pixels = 0;
77 }
78 }
79
80 assert(buf_offset >= 0);
81
82 addr->buffer = buf;
83 addr->first_element = buf_offset;
84 addr->last_element = buf_offset + skip_pixels + addr->width - 1
85 + (addr->height - 1 + (addr->depth - 1) * addr->image_height) * addr->pixels_per_row;
86
87 if (addr->last_element - addr->first_element > st->ctx->Const.MaxTextureBufferSize - 1)
88 return false;
89
90 /* This should be ensured by Mesa before calling our callbacks */
91 assert((addr->last_element + 1) * addr->bytes_per_pixel <= buf->width0);
92
93 addr->constants.xoffset = -addr->xoffset + skip_pixels;
94 addr->constants.yoffset = -addr->yoffset;
95 addr->constants.stride = addr->pixels_per_row;
96 addr->constants.image_size = addr->pixels_per_row * addr->image_height;
97 addr->constants.layer_offset = 0;
98
99 return true;
100 }
101
102 /* Validate and fill buffer addressing information based on GL pixelstore
103 * attributes.
104 *
105 * Returns false if some aspect of the addressing (e.g. alignment) prevents
106 * PBO upload/download.
107 */
108 bool
109 st_pbo_addresses_pixelstore(struct st_context *st,
110 GLenum gl_target, bool skip_images,
111 const struct gl_pixelstore_attrib *store,
112 const void *pixels,
113 struct st_pbo_addresses *addr)
114 {
115 struct pipe_resource *buf = st_buffer_object(store->BufferObj)->buffer;
116 intptr_t buf_offset = (intptr_t) pixels;
117
118 if (buf_offset % addr->bytes_per_pixel)
119 return false;
120
121 /* Convert to texels */
122 buf_offset = buf_offset / addr->bytes_per_pixel;
123
124 /* Determine image height */
125 if (gl_target == GL_TEXTURE_1D_ARRAY) {
126 addr->image_height = 1;
127 } else {
128 addr->image_height = store->ImageHeight > 0 ? store->ImageHeight : addr->height;
129 }
130
131 /* Compute the stride, taking store->Alignment into account */
132 {
133 unsigned pixels_per_row = store->RowLength > 0 ?
134 store->RowLength : addr->width;
135 unsigned bytes_per_row = pixels_per_row * addr->bytes_per_pixel;
136 unsigned remainder = bytes_per_row % store->Alignment;
137 unsigned offset_rows;
138
139 if (remainder > 0)
140 bytes_per_row += store->Alignment - remainder;
141
142 if (bytes_per_row % addr->bytes_per_pixel)
143 return false;
144
145 addr->pixels_per_row = bytes_per_row / addr->bytes_per_pixel;
146
147 offset_rows = store->SkipRows;
148 if (skip_images)
149 offset_rows += addr->image_height * store->SkipImages;
150
151 buf_offset += store->SkipPixels + addr->pixels_per_row * offset_rows;
152 }
153
154 if (!st_pbo_addresses_setup(st, buf, buf_offset, addr))
155 return false;
156
157 /* Support GL_PACK_INVERT_MESA */
158 if (store->Invert) {
159 addr->constants.xoffset += (addr->height - 1) * addr->constants.stride;
160 addr->constants.stride = -addr->constants.stride;
161 }
162
163 return true;
164 }
165
166 /* For download from a framebuffer, we may have to invert the Y axis. The
167 * setup is as follows:
168 * - set viewport to inverted, so that the position sysval is correct for
169 * texel fetches
170 * - this function adjusts the fragment shader's constant buffer to compute
171 * the correct destination addresses.
172 */
173 void
174 st_pbo_addresses_invert_y(struct st_pbo_addresses *addr,
175 unsigned viewport_height)
176 {
177 addr->constants.xoffset +=
178 (viewport_height - 1 + 2 * addr->constants.yoffset) * addr->constants.stride;
179 addr->constants.stride = -addr->constants.stride;
180 }
181
182 /* Setup all vertex pipeline state, rasterizer state, and fragment shader
183 * constants, and issue the draw call for PBO upload/download.
184 *
185 * The caller is responsible for saving and restoring state, as well as for
186 * setting other fragment shader state (fragment shader, samplers), and
187 * framebuffer/viewport/DSA/blend state.
188 */
189 bool
190 st_pbo_draw(struct st_context *st, const struct st_pbo_addresses *addr,
191 unsigned surface_width, unsigned surface_height)
192 {
193 struct cso_context *cso = st->cso_context;
194
195 /* Setup vertex and geometry shaders */
196 if (!st->pbo.vs) {
197 st->pbo.vs = st_pbo_create_vs(st);
198 if (!st->pbo.vs)
199 return false;
200 }
201
202 if (addr->depth != 1 && st->pbo.use_gs && !st->pbo.gs) {
203 st->pbo.gs = st_pbo_create_gs(st);
204 if (!st->pbo.gs)
205 return false;
206 }
207
208 cso_set_vertex_shader_handle(cso, st->pbo.vs);
209
210 cso_set_geometry_shader_handle(cso, addr->depth != 1 ? st->pbo.gs : NULL);
211
212 cso_set_tessctrl_shader_handle(cso, NULL);
213
214 cso_set_tesseval_shader_handle(cso, NULL);
215
216 /* Upload vertices */
217 {
218 struct pipe_vertex_buffer vbo = {0};
219 struct pipe_vertex_element velem;
220
221 float x0 = (float) addr->xoffset / surface_width * 2.0f - 1.0f;
222 float y0 = (float) addr->yoffset / surface_height * 2.0f - 1.0f;
223 float x1 = (float) (addr->xoffset + addr->width) / surface_width * 2.0f - 1.0f;
224 float y1 = (float) (addr->yoffset + addr->height) / surface_height * 2.0f - 1.0f;
225
226 float *verts = NULL;
227
228 vbo.stride = 2 * sizeof(float);
229
230 u_upload_alloc(st->pipe->stream_uploader, 0, 8 * sizeof(float), 4,
231 &vbo.buffer_offset, &vbo.buffer.resource, (void **) &verts);
232 if (!verts)
233 return false;
234
235 verts[0] = x0;
236 verts[1] = y0;
237 verts[2] = x0;
238 verts[3] = y1;
239 verts[4] = x1;
240 verts[5] = y0;
241 verts[6] = x1;
242 verts[7] = y1;
243
244 u_upload_unmap(st->pipe->stream_uploader);
245
246 velem.src_offset = 0;
247 velem.instance_divisor = 0;
248 velem.vertex_buffer_index = cso_get_aux_vertex_buffer_slot(cso);
249 velem.src_format = PIPE_FORMAT_R32G32_FLOAT;
250
251 cso_set_vertex_elements(cso, 1, &velem);
252
253 cso_set_vertex_buffers(cso, velem.vertex_buffer_index, 1, &vbo);
254
255 pipe_resource_reference(&vbo.buffer.resource, NULL);
256 }
257
258 /* Upload constants */
259 {
260 struct pipe_constant_buffer cb;
261
262 if (!st->has_user_constbuf) {
263 cb.buffer = NULL;
264 cb.user_buffer = NULL;
265 u_upload_data(st->pipe->const_uploader, 0, sizeof(addr->constants),
266 st->ctx->Const.UniformBufferOffsetAlignment,
267 &addr->constants, &cb.buffer_offset, &cb.buffer);
268 if (!cb.buffer)
269 return false;
270
271 u_upload_unmap(st->pipe->const_uploader);
272 } else {
273 cb.buffer = NULL;
274 cb.user_buffer = &addr->constants;
275 cb.buffer_offset = 0;
276 }
277 cb.buffer_size = sizeof(addr->constants);
278
279 cso_set_constant_buffer(cso, PIPE_SHADER_FRAGMENT, 0, &cb);
280
281 pipe_resource_reference(&cb.buffer, NULL);
282 }
283
284 /* Rasterizer state */
285 cso_set_rasterizer(cso, &st->pbo.raster);
286
287 /* Disable stream output */
288 cso_set_stream_outputs(cso, 0, NULL, 0);
289
290 if (addr->depth == 1) {
291 cso_draw_arrays(cso, PIPE_PRIM_TRIANGLE_STRIP, 0, 4);
292 } else {
293 cso_draw_arrays_instanced(cso, PIPE_PRIM_TRIANGLE_STRIP,
294 0, 4, 0, addr->depth);
295 }
296
297 return true;
298 }
299
300 void *
301 st_pbo_create_vs(struct st_context *st)
302 {
303 struct ureg_program *ureg;
304 struct ureg_src in_pos;
305 struct ureg_src in_instanceid;
306 struct ureg_dst out_pos;
307 struct ureg_dst out_layer;
308
309 ureg = ureg_create(PIPE_SHADER_VERTEX);
310 if (!ureg)
311 return NULL;
312
313 in_pos = ureg_DECL_vs_input(ureg, TGSI_SEMANTIC_POSITION);
314
315 out_pos = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
316
317 if (st->pbo.layers) {
318 in_instanceid = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_INSTANCEID, 0);
319
320 if (!st->pbo.use_gs)
321 out_layer = ureg_DECL_output(ureg, TGSI_SEMANTIC_LAYER, 0);
322 }
323
324 /* out_pos = in_pos */
325 ureg_MOV(ureg, out_pos, in_pos);
326
327 if (st->pbo.layers) {
328 if (st->pbo.use_gs) {
329 /* out_pos.z = i2f(gl_InstanceID) */
330 ureg_I2F(ureg, ureg_writemask(out_pos, TGSI_WRITEMASK_Z),
331 ureg_scalar(in_instanceid, TGSI_SWIZZLE_X));
332 } else {
333 /* out_layer = gl_InstanceID */
334 ureg_MOV(ureg, ureg_writemask(out_layer, TGSI_WRITEMASK_X),
335 ureg_scalar(in_instanceid, TGSI_SWIZZLE_X));
336 }
337 }
338
339 ureg_END(ureg);
340
341 return ureg_create_shader_and_destroy(ureg, st->pipe);
342 }
343
344 void *
345 st_pbo_create_gs(struct st_context *st)
346 {
347 static const int zero = 0;
348 struct ureg_program *ureg;
349 struct ureg_dst out_pos;
350 struct ureg_dst out_layer;
351 struct ureg_src in_pos;
352 struct ureg_src imm;
353 unsigned i;
354
355 ureg = ureg_create(PIPE_SHADER_GEOMETRY);
356 if (!ureg)
357 return NULL;
358
359 ureg_property(ureg, TGSI_PROPERTY_GS_INPUT_PRIM, PIPE_PRIM_TRIANGLES);
360 ureg_property(ureg, TGSI_PROPERTY_GS_OUTPUT_PRIM, PIPE_PRIM_TRIANGLE_STRIP);
361 ureg_property(ureg, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES, 3);
362
363 out_pos = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
364 out_layer = ureg_DECL_output(ureg, TGSI_SEMANTIC_LAYER, 0);
365
366 in_pos = ureg_DECL_input(ureg, TGSI_SEMANTIC_POSITION, 0, 0, 1);
367
368 imm = ureg_DECL_immediate_int(ureg, &zero, 1);
369
370 for (i = 0; i < 3; ++i) {
371 struct ureg_src in_pos_vertex = ureg_src_dimension(in_pos, i);
372
373 /* out_pos = in_pos[i] */
374 ureg_MOV(ureg, out_pos, in_pos_vertex);
375
376 /* out_layer.x = f2i(in_pos[i].z) */
377 ureg_F2I(ureg, ureg_writemask(out_layer, TGSI_WRITEMASK_X),
378 ureg_scalar(in_pos_vertex, TGSI_SWIZZLE_Z));
379
380 ureg_EMIT(ureg, ureg_scalar(imm, TGSI_SWIZZLE_X));
381 }
382
383 ureg_END(ureg);
384
385 return ureg_create_shader_and_destroy(ureg, st->pipe);
386 }
387
388 static void
389 build_conversion(struct ureg_program *ureg, const struct ureg_dst *temp,
390 enum st_pbo_conversion conversion)
391 {
392 switch (conversion) {
393 case ST_PBO_CONVERT_SINT_TO_UINT:
394 ureg_IMAX(ureg, *temp, ureg_src(*temp), ureg_imm1i(ureg, 0));
395 break;
396 case ST_PBO_CONVERT_UINT_TO_SINT:
397 ureg_UMIN(ureg, *temp, ureg_src(*temp), ureg_imm1u(ureg, (1u << 31) - 1));
398 break;
399 default:
400 /* no-op */
401 break;
402 }
403 }
404
405 static void *
406 create_fs(struct st_context *st, bool download, enum pipe_texture_target target,
407 enum st_pbo_conversion conversion)
408 {
409 struct pipe_context *pipe = st->pipe;
410 struct pipe_screen *screen = pipe->screen;
411 struct ureg_program *ureg;
412 bool have_layer;
413 struct ureg_dst out;
414 struct ureg_src sampler;
415 struct ureg_src pos;
416 struct ureg_src layer;
417 struct ureg_src const0;
418 struct ureg_src const1;
419 struct ureg_dst temp0;
420
421 have_layer =
422 st->pbo.layers &&
423 (!download || target == PIPE_TEXTURE_1D_ARRAY
424 || target == PIPE_TEXTURE_2D_ARRAY
425 || target == PIPE_TEXTURE_3D
426 || target == PIPE_TEXTURE_CUBE
427 || target == PIPE_TEXTURE_CUBE_ARRAY);
428
429 ureg = ureg_create(PIPE_SHADER_FRAGMENT);
430 if (!ureg)
431 return NULL;
432
433 if (!download) {
434 out = ureg_DECL_output(ureg, TGSI_SEMANTIC_COLOR, 0);
435 } else {
436 struct ureg_src image;
437
438 /* writeonly images do not require an explicitly given format. */
439 image = ureg_DECL_image(ureg, 0, TGSI_TEXTURE_BUFFER, PIPE_FORMAT_NONE,
440 true, false);
441 out = ureg_dst(image);
442 }
443
444 sampler = ureg_DECL_sampler(ureg, 0);
445 if (screen->get_param(screen, PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL)) {
446 pos = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_POSITION, 0);
447 } else {
448 pos = ureg_DECL_fs_input(ureg, TGSI_SEMANTIC_POSITION, 0,
449 TGSI_INTERPOLATE_LINEAR);
450 }
451 if (have_layer) {
452 layer = ureg_DECL_fs_input(ureg, TGSI_SEMANTIC_LAYER, 0,
453 TGSI_INTERPOLATE_CONSTANT);
454 }
455 const0 = ureg_DECL_constant(ureg, 0);
456 const1 = ureg_DECL_constant(ureg, 1);
457 temp0 = ureg_DECL_temporary(ureg);
458
459 /* Note: const0 = [ -xoffset + skip_pixels, -yoffset, stride, image_height ] */
460
461 /* temp0.xy = f2i(temp0.xy) */
462 ureg_F2I(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_XY),
463 ureg_swizzle(pos,
464 TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y,
465 TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Y));
466
467 /* temp0.xy = temp0.xy + const0.xy */
468 ureg_UADD(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_XY),
469 ureg_swizzle(ureg_src(temp0),
470 TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y,
471 TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Y),
472 ureg_swizzle(const0,
473 TGSI_SWIZZLE_X, TGSI_SWIZZLE_Y,
474 TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Y));
475
476 /* temp0.x = const0.z * temp0.y + temp0.x */
477 ureg_UMAD(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_X),
478 ureg_scalar(const0, TGSI_SWIZZLE_Z),
479 ureg_scalar(ureg_src(temp0), TGSI_SWIZZLE_Y),
480 ureg_scalar(ureg_src(temp0), TGSI_SWIZZLE_X));
481
482 if (have_layer) {
483 /* temp0.x = const0.w * layer + temp0.x */
484 ureg_UMAD(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_X),
485 ureg_scalar(const0, TGSI_SWIZZLE_W),
486 ureg_scalar(layer, TGSI_SWIZZLE_X),
487 ureg_scalar(ureg_src(temp0), TGSI_SWIZZLE_X));
488 }
489
490 /* temp0.w = 0 */
491 ureg_MOV(ureg, ureg_writemask(temp0, TGSI_WRITEMASK_W), ureg_imm1u(ureg, 0));
492
493 if (download) {
494 struct ureg_dst temp1;
495 struct ureg_src op[2];
496
497 temp1 = ureg_DECL_temporary(ureg);
498
499 /* temp1.xy = pos.xy */
500 ureg_F2I(ureg, ureg_writemask(temp1, TGSI_WRITEMASK_XY), pos);
501
502 /* temp1.zw = 0 */
503 ureg_MOV(ureg, ureg_writemask(temp1, TGSI_WRITEMASK_ZW), ureg_imm1u(ureg, 0));
504
505 if (have_layer) {
506 struct ureg_dst temp1_layer =
507 ureg_writemask(temp1, target == PIPE_TEXTURE_1D_ARRAY ? TGSI_WRITEMASK_Y
508 : TGSI_WRITEMASK_Z);
509
510 /* temp1.y/z = layer */
511 ureg_MOV(ureg, temp1_layer, ureg_scalar(layer, TGSI_SWIZZLE_X));
512
513 if (target == PIPE_TEXTURE_3D) {
514 /* temp1.z += layer_offset */
515 ureg_UADD(ureg, temp1_layer,
516 ureg_scalar(ureg_src(temp1), TGSI_SWIZZLE_Z),
517 ureg_scalar(const1, TGSI_SWIZZLE_X));
518 }
519 }
520
521 /* temp1 = txf(sampler, temp1) */
522 ureg_TXF(ureg, temp1, util_pipe_tex_to_tgsi_tex(target, 1),
523 ureg_src(temp1), sampler);
524
525 build_conversion(ureg, &temp1, conversion);
526
527 /* store(out, temp0, temp1) */
528 op[0] = ureg_src(temp0);
529 op[1] = ureg_src(temp1);
530 ureg_memory_insn(ureg, TGSI_OPCODE_STORE, &out, 1, op, 2, 0,
531 TGSI_TEXTURE_BUFFER, PIPE_FORMAT_NONE);
532
533 ureg_release_temporary(ureg, temp1);
534 } else {
535 /* out = txf(sampler, temp0.x) */
536 ureg_TXF(ureg, temp0, TGSI_TEXTURE_BUFFER, ureg_src(temp0), sampler);
537
538 build_conversion(ureg, &temp0, conversion);
539
540 ureg_MOV(ureg, out, ureg_src(temp0));
541 }
542
543 ureg_release_temporary(ureg, temp0);
544
545 ureg_END(ureg);
546
547 return ureg_create_shader_and_destroy(ureg, pipe);
548 }
549
550 static enum st_pbo_conversion
551 get_pbo_conversion(enum pipe_format src_format, enum pipe_format dst_format)
552 {
553 if (util_format_is_pure_uint(src_format)) {
554 if (util_format_is_pure_sint(dst_format))
555 return ST_PBO_CONVERT_UINT_TO_SINT;
556 } else if (util_format_is_pure_sint(src_format)) {
557 if (util_format_is_pure_uint(dst_format))
558 return ST_PBO_CONVERT_SINT_TO_UINT;
559 }
560
561 return ST_PBO_CONVERT_NONE;
562 }
563
564 void *
565 st_pbo_get_upload_fs(struct st_context *st,
566 enum pipe_format src_format,
567 enum pipe_format dst_format)
568 {
569 STATIC_ASSERT(ARRAY_SIZE(st->pbo.upload_fs) == ST_NUM_PBO_CONVERSIONS);
570
571 enum st_pbo_conversion conversion = get_pbo_conversion(src_format, dst_format);
572
573 if (!st->pbo.upload_fs[conversion])
574 st->pbo.upload_fs[conversion] = create_fs(st, false, 0, conversion);
575
576 return st->pbo.upload_fs[conversion];
577 }
578
579 void *
580 st_pbo_get_download_fs(struct st_context *st, enum pipe_texture_target target,
581 enum pipe_format src_format,
582 enum pipe_format dst_format)
583 {
584 STATIC_ASSERT(ARRAY_SIZE(st->pbo.download_fs) == ST_NUM_PBO_CONVERSIONS);
585 assert(target < PIPE_MAX_TEXTURE_TYPES);
586
587 enum st_pbo_conversion conversion = get_pbo_conversion(src_format, dst_format);
588
589 if (!st->pbo.download_fs[conversion][target])
590 st->pbo.download_fs[conversion][target] = create_fs(st, true, target, conversion);
591
592 return st->pbo.download_fs[conversion][target];
593 }
594
595 void
596 st_init_pbo_helpers(struct st_context *st)
597 {
598 struct pipe_context *pipe = st->pipe;
599 struct pipe_screen *screen = pipe->screen;
600
601 st->pbo.upload_enabled =
602 screen->get_param(screen, PIPE_CAP_TEXTURE_BUFFER_OBJECTS) &&
603 screen->get_param(screen, PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT) >= 1 &&
604 screen->get_shader_param(screen, PIPE_SHADER_FRAGMENT, PIPE_SHADER_CAP_INTEGERS);
605 if (!st->pbo.upload_enabled)
606 return;
607
608 st->pbo.download_enabled =
609 st->pbo.upload_enabled &&
610 screen->get_param(screen, PIPE_CAP_SAMPLER_VIEW_TARGET) &&
611 screen->get_param(screen, PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT) &&
612 screen->get_shader_param(screen, PIPE_SHADER_FRAGMENT,
613 PIPE_SHADER_CAP_MAX_SHADER_IMAGES) >= 1;
614
615 st->pbo.rgba_only =
616 screen->get_param(screen, PIPE_CAP_BUFFER_SAMPLER_VIEW_RGBA_ONLY);
617
618 if (screen->get_param(screen, PIPE_CAP_TGSI_INSTANCEID)) {
619 if (screen->get_param(screen, PIPE_CAP_TGSI_VS_LAYER_VIEWPORT)) {
620 st->pbo.layers = true;
621 } else if (screen->get_param(screen, PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES) >= 3) {
622 st->pbo.layers = true;
623 st->pbo.use_gs = true;
624 }
625 }
626
627 /* Blend state */
628 memset(&st->pbo.upload_blend, 0, sizeof(struct pipe_blend_state));
629 st->pbo.upload_blend.rt[0].colormask = PIPE_MASK_RGBA;
630
631 /* Rasterizer state */
632 memset(&st->pbo.raster, 0, sizeof(struct pipe_rasterizer_state));
633 st->pbo.raster.half_pixel_center = 1;
634 }
635
636 void
637 st_destroy_pbo_helpers(struct st_context *st)
638 {
639 unsigned i;
640
641 for (i = 0; i < ARRAY_SIZE(st->pbo.upload_fs); ++i) {
642 if (st->pbo.upload_fs[i]) {
643 cso_delete_fragment_shader(st->cso_context, st->pbo.upload_fs[i]);
644 st->pbo.upload_fs[i] = NULL;
645 }
646 }
647
648 for (i = 0; i < ARRAY_SIZE(st->pbo.download_fs); ++i) {
649 for (unsigned j = 0; j < ARRAY_SIZE(st->pbo.download_fs[0]); ++j) {
650 if (st->pbo.download_fs[i][j]) {
651 cso_delete_fragment_shader(st->cso_context, st->pbo.download_fs[i][j]);
652 st->pbo.download_fs[i][j] = NULL;
653 }
654 }
655 }
656
657 if (st->pbo.gs) {
658 cso_delete_geometry_shader(st->cso_context, st->pbo.gs);
659 st->pbo.gs = NULL;
660 }
661
662 if (st->pbo.vs) {
663 cso_delete_vertex_shader(st->cso_context, st->pbo.vs);
664 st->pbo.vs = NULL;
665 }
666 }