r600g: add initial cube map array support (v2)
[mesa.git] / src / gallium / drivers / r600 / r600_pipe.h
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 */
26 #ifndef R600_PIPE_H
27 #define R600_PIPE_H
28
29 #include "util/u_blitter.h"
30 #include "util/u_slab.h"
31 #include "r600.h"
32 #include "r600_llvm.h"
33 #include "r600_public.h"
34 #include "r600_resource.h"
35 #include "evergreen_compute.h"
36
37 #define R600_NUM_ATOMS 36
38
39 #define R600_MAX_USER_CONST_BUFFERS 1
40 #define R600_MAX_DRIVER_CONST_BUFFERS 2
41 #define R600_MAX_CONST_BUFFERS (R600_MAX_USER_CONST_BUFFERS + R600_MAX_DRIVER_CONST_BUFFERS)
42
43 /* start driver buffers after user buffers */
44 #define R600_UCP_CONST_BUFFER (R600_MAX_USER_CONST_BUFFERS)
45 #define R600_TXQ_CONST_BUFFER (R600_MAX_USER_CONST_BUFFERS + 1)
46
47 #define R600_MAX_CONST_BUFFER_SIZE 4096
48
49 #ifdef PIPE_ARCH_BIG_ENDIAN
50 #define R600_BIG_ENDIAN 1
51 #else
52 #define R600_BIG_ENDIAN 0
53 #endif
54
55 struct r600_bytecode;
56 struct r600_shader_key;
57
58 /* This encapsulates a state or an operation which can emitted into the GPU
59 * command stream. It's not limited to states only, it can be used for anything
60 * that wants to write commands into the CS (e.g. cache flushes). */
61 struct r600_atom {
62 void (*emit)(struct r600_context *ctx, struct r600_atom *state);
63 unsigned id;
64 unsigned num_dw;
65 bool dirty;
66 };
67
68 /* This is an atom containing GPU commands that never change.
69 * This is supposed to be copied directly into the CS. */
70 struct r600_command_buffer {
71 uint32_t *buf;
72 unsigned num_dw;
73 unsigned max_num_dw;
74 unsigned pkt_flags;
75 };
76
77 struct r600_db_misc_state {
78 struct r600_atom atom;
79 bool occlusion_query_enabled;
80 bool flush_depthstencil_through_cb;
81 bool flush_depthstencil_in_place;
82 bool copy_depth, copy_stencil;
83 unsigned copy_sample;
84 unsigned log_samples;
85 unsigned db_shader_control;
86 };
87
88 struct r600_cb_misc_state {
89 struct r600_atom atom;
90 unsigned cb_color_control; /* this comes from blend state */
91 unsigned blend_colormask; /* 8*4 bits for 8 RGBA colorbuffers */
92 unsigned nr_cbufs;
93 unsigned nr_ps_color_outputs;
94 bool multiwrite;
95 bool dual_src_blend;
96 };
97
98 struct r600_clip_misc_state {
99 struct r600_atom atom;
100 unsigned pa_cl_clip_cntl; /* from rasterizer */
101 unsigned pa_cl_vs_out_cntl; /* from vertex shader */
102 unsigned clip_plane_enable; /* from rasterizer */
103 unsigned clip_dist_write; /* from vertex shader */
104 };
105
106 struct r600_alphatest_state {
107 struct r600_atom atom;
108 unsigned sx_alpha_test_control; /* this comes from dsa state */
109 unsigned sx_alpha_ref; /* this comes from dsa state */
110 bool bypass;
111 bool cb0_export_16bpc; /* from set_framebuffer_state */
112 };
113
114 struct r600_vgt_state {
115 struct r600_atom atom;
116 uint32_t vgt_multi_prim_ib_reset_en;
117 uint32_t vgt_multi_prim_ib_reset_indx;
118 };
119
120 struct r600_vgt2_state {
121 struct r600_atom atom;
122 uint32_t vgt_indx_offset;
123 };
124
125 struct r600_blend_color {
126 struct r600_atom atom;
127 struct pipe_blend_color state;
128 };
129
130 struct r600_clip_state {
131 struct r600_atom atom;
132 struct pipe_clip_state state;
133 };
134
135 struct r600_cs_shader_state {
136 struct r600_atom atom;
137 unsigned kernel_index;
138 struct r600_pipe_compute *shader;
139 };
140
141 struct r600_framebuffer {
142 struct r600_atom atom;
143 struct pipe_framebuffer_state state;
144 unsigned compressed_cb_mask;
145 unsigned nr_samples;
146 bool export_16bpc;
147 bool cb0_is_integer;
148 bool is_msaa_resolve;
149 };
150
151 struct r600_sample_mask {
152 struct r600_atom atom;
153 uint16_t sample_mask; /* there are only 8 bits on EG, 16 bits on Cayman */
154 };
155
156 struct r600_config_state {
157 struct r600_atom atom;
158 unsigned sq_gpr_resource_mgmt_1;
159 };
160
161 struct r600_stencil_ref
162 {
163 ubyte ref_value[2];
164 ubyte valuemask[2];
165 ubyte writemask[2];
166 };
167
168 struct r600_stencil_ref_state {
169 struct r600_atom atom;
170 struct r600_stencil_ref state;
171 struct pipe_stencil_ref pipe_state;
172 };
173
174 struct r600_viewport_state {
175 struct r600_atom atom;
176 struct pipe_viewport_state state;
177 };
178
179 struct compute_memory_pool;
180 void compute_memory_pool_delete(struct compute_memory_pool* pool);
181 struct compute_memory_pool* compute_memory_pool_new(
182 struct r600_screen *rscreen);
183
184 struct r600_pipe_fences {
185 struct r600_resource *bo;
186 unsigned *data;
187 unsigned next_index;
188 /* linked list of preallocated blocks */
189 struct list_head blocks;
190 /* linked list of freed fences */
191 struct list_head pool;
192 pipe_mutex mutex;
193 };
194
195 enum r600_msaa_texture_mode {
196 /* If the hw can fetch the first sample only (no decompression available).
197 * This means MSAA texturing is not fully implemented. */
198 MSAA_TEXTURE_SAMPLE_ZERO,
199
200 /* If the hw can fetch decompressed MSAA textures.
201 * Supported families: R600, R700, Evergreen.
202 * Cayman cannot use this, because it cannot do the decompression. */
203 MSAA_TEXTURE_DECOMPRESSED,
204
205 /* If the hw can fetch compressed MSAA textures, which means shaders can
206 * read resolved FMASK. This yields the best performance.
207 * Supported families: Evergreen, Cayman. */
208 MSAA_TEXTURE_COMPRESSED
209 };
210
211 struct r600_screen {
212 struct pipe_screen screen;
213 struct radeon_winsys *ws;
214 unsigned family;
215 enum chip_class chip_class;
216 struct radeon_info info;
217 bool has_streamout;
218 bool has_msaa;
219 enum r600_msaa_texture_mode msaa_texture_support;
220 struct r600_tiling_info tiling_info;
221 struct r600_pipe_fences fences;
222
223 /*for compute global memory binding, we allocate stuff here, instead of
224 * buffers.
225 * XXX: Not sure if this is the best place for global_pool. Also,
226 * it's not thread safe, so it won't work with multiple contexts. */
227 struct compute_memory_pool *global_pool;
228 };
229
230 struct r600_pipe_sampler_view {
231 struct pipe_sampler_view base;
232 struct r600_resource *tex_resource;
233 uint32_t tex_resource_words[8];
234 bool skip_mip_address_reloc;
235 };
236
237 struct r600_rasterizer_state {
238 struct r600_command_buffer buffer;
239 boolean flatshade;
240 boolean two_side;
241 unsigned sprite_coord_enable;
242 unsigned clip_plane_enable;
243 unsigned pa_sc_line_stipple;
244 unsigned pa_cl_clip_cntl;
245 float offset_units;
246 float offset_scale;
247 bool offset_enable;
248 bool scissor_enable;
249 bool multisample_enable;
250 };
251
252 struct r600_poly_offset_state {
253 struct r600_atom atom;
254 enum pipe_format zs_format;
255 float offset_units;
256 float offset_scale;
257 };
258
259 struct r600_blend_state {
260 struct r600_command_buffer buffer;
261 struct r600_command_buffer buffer_no_blend;
262 unsigned cb_target_mask;
263 unsigned cb_color_control;
264 unsigned cb_color_control_no_blend;
265 bool dual_src_blend;
266 bool alpha_to_one;
267 };
268
269 struct r600_dsa_state {
270 struct r600_command_buffer buffer;
271 unsigned alpha_ref;
272 ubyte valuemask[2];
273 ubyte writemask[2];
274 unsigned sx_alpha_test_control;
275 };
276
277 struct r600_pipe_shader;
278
279 struct r600_pipe_shader_selector {
280 struct r600_pipe_shader *current;
281
282 struct tgsi_token *tokens;
283 struct pipe_stream_output_info so;
284
285 unsigned num_shaders;
286
287 /* PIPE_SHADER_[VERTEX|FRAGMENT|...] */
288 unsigned type;
289
290 unsigned nr_ps_max_color_exports;
291 };
292
293 struct r600_pipe_sampler_state {
294 uint32_t tex_sampler_words[3];
295 union pipe_color_union border_color;
296 bool border_color_use;
297 bool seamless_cube_map;
298 };
299
300 /* needed for blitter save */
301 #define NUM_TEX_UNITS 16
302
303 struct r600_seamless_cube_map {
304 struct r600_atom atom;
305 bool enabled;
306 };
307
308 struct r600_samplerview_state {
309 struct r600_atom atom;
310 struct r600_pipe_sampler_view *views[NUM_TEX_UNITS];
311 uint32_t enabled_mask;
312 uint32_t dirty_mask;
313 uint32_t compressed_depthtex_mask; /* which textures are depth */
314 uint32_t compressed_colortex_mask;
315 boolean dirty_txq_constants;
316 };
317
318 struct r600_sampler_states {
319 struct r600_atom atom;
320 struct r600_pipe_sampler_state *states[NUM_TEX_UNITS];
321 uint32_t enabled_mask;
322 uint32_t dirty_mask;
323 uint32_t has_bordercolor_mask; /* which states contain the border color */
324 };
325
326 struct r600_textures_info {
327 struct r600_samplerview_state views;
328 struct r600_sampler_states states;
329 bool is_array_sampler[NUM_TEX_UNITS];
330
331 /* cube array txq workaround */
332 uint32_t *txq_constants;
333 };
334
335 struct r600_fence {
336 struct pipe_reference reference;
337 unsigned index; /* in the shared bo */
338 struct r600_resource *sleep_bo;
339 struct list_head head;
340 };
341
342 #define FENCE_BLOCK_SIZE 16
343
344 struct r600_fence_block {
345 struct r600_fence fences[FENCE_BLOCK_SIZE];
346 struct list_head head;
347 };
348
349 #define R600_CONSTANT_ARRAY_SIZE 256
350 #define R600_RESOURCE_ARRAY_SIZE 160
351
352 struct r600_constbuf_state
353 {
354 struct r600_atom atom;
355 struct pipe_constant_buffer cb[PIPE_MAX_CONSTANT_BUFFERS];
356 uint32_t enabled_mask;
357 uint32_t dirty_mask;
358 };
359
360 struct r600_vertexbuf_state
361 {
362 struct r600_atom atom;
363 struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS];
364 uint32_t enabled_mask; /* non-NULL buffers */
365 uint32_t dirty_mask;
366 };
367
368 /* CSO (constant state object, in other words, immutable state). */
369 struct r600_cso_state
370 {
371 struct r600_atom atom;
372 void *cso; /* e.g. r600_blend_state */
373 struct r600_command_buffer *cb;
374 };
375
376 struct r600_scissor_state
377 {
378 struct r600_atom atom;
379 struct pipe_scissor_state scissor;
380 bool enable; /* r6xx only */
381 };
382
383 struct r600_context {
384 struct pipe_context context;
385 struct r600_screen *screen;
386 struct radeon_winsys *ws;
387 struct radeon_winsys_cs *cs;
388 struct blitter_context *blitter;
389 struct u_upload_mgr *uploader;
390 struct util_slab_mempool pool_transfers;
391
392 /* Hardware info. */
393 enum radeon_family family;
394 enum chip_class chip_class;
395 boolean has_vertex_cache;
396 boolean keep_tiling_flags;
397 unsigned default_ps_gprs, default_vs_gprs;
398 unsigned r6xx_num_clause_temp_gprs;
399 unsigned backend_mask;
400 unsigned max_db; /* for OQ */
401
402 /* Miscellaneous state objects. */
403 void *custom_dsa_flush;
404 void *custom_blend_resolve;
405 void *custom_blend_decompress;
406 void *custom_blend_fmask_decompress;
407 /* With rasterizer discard, there doesn't have to be a pixel shader.
408 * In that case, we bind this one: */
409 void *dummy_pixel_shader;
410 /* These dummy CMASK and FMASK buffers are used to get around the R6xx hardware
411 * bug where valid CMASK and FMASK are required to be present to avoid
412 * a hardlock in certain operations but aren't actually used
413 * for anything useful. */
414 struct r600_resource *dummy_fmask;
415 struct r600_resource *dummy_cmask;
416
417 /* State binding slots are here. */
418 struct r600_atom *atoms[R600_NUM_ATOMS];
419 /* States for CS initialization. */
420 struct r600_command_buffer start_cs_cmd; /* invariant state mostly */
421 /** Compute specific registers initializations. The start_cs_cmd atom
422 * must be emitted before start_compute_cs_cmd. */
423 struct r600_command_buffer start_compute_cs_cmd;
424 /* Register states. */
425 struct r600_alphatest_state alphatest_state;
426 struct r600_cso_state blend_state;
427 struct r600_blend_color blend_color;
428 struct r600_cb_misc_state cb_misc_state;
429 struct r600_clip_misc_state clip_misc_state;
430 struct r600_clip_state clip_state;
431 struct r600_db_misc_state db_misc_state;
432 struct r600_cso_state dsa_state;
433 struct r600_framebuffer framebuffer;
434 struct r600_poly_offset_state poly_offset_state;
435 struct r600_cso_state rasterizer_state;
436 struct r600_sample_mask sample_mask;
437 struct r600_scissor_state scissor;
438 struct r600_seamless_cube_map seamless_cube_map;
439 struct r600_config_state config_state;
440 struct r600_stencil_ref_state stencil_ref;
441 struct r600_vgt_state vgt_state;
442 struct r600_vgt2_state vgt2_state;
443 struct r600_viewport_state viewport;
444 /* Shaders and shader resources. */
445 struct r600_cso_state vertex_fetch_shader;
446 struct r600_cs_shader_state cs_shader_state;
447 struct r600_constbuf_state constbuf_state[PIPE_SHADER_TYPES];
448 struct r600_textures_info samplers[PIPE_SHADER_TYPES];
449 /** Vertex buffers for fetch shaders */
450 struct r600_vertexbuf_state vertex_buffer_state;
451 /** Vertex buffers for compute shaders */
452 struct r600_vertexbuf_state cs_vertex_buffer_state;
453
454 /* Additional context states. */
455 unsigned flags;
456 unsigned compute_cb_target_mask;
457 struct r600_pipe_shader_selector *ps_shader;
458 struct r600_pipe_shader_selector *vs_shader;
459 struct r600_rasterizer_state *rasterizer;
460 bool alpha_to_one;
461 bool force_blend_disable;
462 boolean dual_src_blend;
463
464 /* Index buffer. */
465 struct pipe_index_buffer index_buffer;
466
467 /* Last draw state (-1 = unset). */
468 int last_primitive_type; /* Last primitive type used in draw_vbo. */
469 int last_start_instance;
470
471 /* Queries. */
472 /* The list of active queries. Only one query of each type can be active. */
473 int num_occlusion_queries;
474 /* Manage queries in two separate groups:
475 * The timer ones and the others (streamout, occlusion).
476 *
477 * We do this because we should only suspend non-timer queries for u_blitter,
478 * and later if the non-timer queries are suspended, the context flush should
479 * only suspend and resume the timer queries. */
480 struct list_head active_timer_queries;
481 unsigned num_cs_dw_timer_queries_suspend;
482 struct list_head active_nontimer_queries;
483 unsigned num_cs_dw_nontimer_queries_suspend;
484 /* Flags if queries have been suspended. */
485 bool timer_queries_suspended;
486 bool nontimer_queries_suspended;
487
488 /* Render condition. */
489 struct pipe_query *current_render_cond;
490 unsigned current_render_cond_mode;
491 boolean predicate_drawing;
492
493 /* Streamout state. */
494 unsigned num_cs_dw_streamout_end;
495 unsigned num_so_targets;
496 struct r600_so_target *so_targets[PIPE_MAX_SO_BUFFERS];
497 boolean streamout_start;
498 unsigned streamout_append_bitmask;
499 bool streamout_suspended;
500
501 /* Deprecated state management. */
502 struct r600_range *range;
503 unsigned nblocks;
504 struct r600_block **blocks;
505 struct list_head dirty;
506 struct list_head enable_list;
507 unsigned pm4_dirty_cdwords;
508 };
509
510 static INLINE void r600_emit_command_buffer(struct radeon_winsys_cs *cs,
511 struct r600_command_buffer *cb)
512 {
513 assert(cs->cdw + cb->num_dw <= RADEON_MAX_CMDBUF_DWORDS);
514 memcpy(cs->buf + cs->cdw, cb->buf, 4 * cb->num_dw);
515 cs->cdw += cb->num_dw;
516 }
517
518 static INLINE void r600_emit_atom(struct r600_context *rctx, struct r600_atom *atom)
519 {
520 atom->emit(rctx, atom);
521 atom->dirty = false;
522 }
523
524 static INLINE void r600_set_cso_state(struct r600_cso_state *state, void *cso)
525 {
526 state->cso = cso;
527 state->atom.dirty = cso != NULL;
528 }
529
530 static INLINE void r600_set_cso_state_with_cb(struct r600_cso_state *state, void *cso,
531 struct r600_command_buffer *cb)
532 {
533 state->cb = cb;
534 state->atom.num_dw = cb->num_dw;
535 r600_set_cso_state(state, cso);
536 }
537
538 /* evergreen_state.c */
539 struct pipe_sampler_view *
540 evergreen_create_sampler_view_custom(struct pipe_context *ctx,
541 struct pipe_resource *texture,
542 const struct pipe_sampler_view *state,
543 unsigned width0, unsigned height0);
544 void evergreen_init_common_regs(struct r600_command_buffer *cb,
545 enum chip_class ctx_chip_class,
546 enum radeon_family ctx_family,
547 int ctx_drm_minor);
548 void cayman_init_common_regs(struct r600_command_buffer *cb,
549 enum chip_class ctx_chip_class,
550 enum radeon_family ctx_family,
551 int ctx_drm_minor);
552
553 void evergreen_init_state_functions(struct r600_context *rctx);
554 void evergreen_init_atom_start_cs(struct r600_context *rctx);
555 void evergreen_pipe_shader_ps(struct pipe_context *ctx, struct r600_pipe_shader *shader);
556 void evergreen_pipe_shader_vs(struct pipe_context *ctx, struct r600_pipe_shader *shader);
557 void *evergreen_create_db_flush_dsa(struct r600_context *rctx);
558 void *evergreen_create_resolve_blend(struct r600_context *rctx);
559 void *evergreen_create_decompress_blend(struct r600_context *rctx);
560 void *evergreen_create_fmask_decompress_blend(struct r600_context *rctx);
561 boolean evergreen_is_format_supported(struct pipe_screen *screen,
562 enum pipe_format format,
563 enum pipe_texture_target target,
564 unsigned sample_count,
565 unsigned usage);
566 void evergreen_init_color_surface(struct r600_context *rctx,
567 struct r600_surface *surf);
568 void evergreen_init_color_surface_rat(struct r600_context *rctx,
569 struct r600_surface *surf);
570 void evergreen_update_db_shader_control(struct r600_context * rctx);
571
572 /* r600_blit.c */
573 void r600_copy_buffer(struct pipe_context *ctx, struct
574 pipe_resource *dst, unsigned dstx,
575 struct pipe_resource *src, const struct pipe_box *src_box);
576 void r600_init_blit_functions(struct r600_context *rctx);
577 void r600_blit_decompress_depth(struct pipe_context *ctx,
578 struct r600_texture *texture,
579 struct r600_texture *staging,
580 unsigned first_level, unsigned last_level,
581 unsigned first_layer, unsigned last_layer,
582 unsigned first_sample, unsigned last_sample);
583 void r600_decompress_depth_textures(struct r600_context *rctx,
584 struct r600_samplerview_state *textures);
585 void r600_decompress_color_textures(struct r600_context *rctx,
586 struct r600_samplerview_state *textures);
587
588 /* r600_buffer.c */
589 bool r600_init_resource(struct r600_screen *rscreen,
590 struct r600_resource *res,
591 unsigned size, unsigned alignment,
592 unsigned bind, unsigned usage);
593 struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
594 const struct pipe_resource *templ,
595 unsigned alignment);
596
597 /* r600_pipe.c */
598 void r600_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
599 unsigned flags);
600
601 /* r600_query.c */
602 void r600_init_query_functions(struct r600_context *rctx);
603 void r600_suspend_nontimer_queries(struct r600_context *ctx);
604 void r600_resume_nontimer_queries(struct r600_context *ctx);
605 void r600_suspend_timer_queries(struct r600_context *ctx);
606 void r600_resume_timer_queries(struct r600_context *ctx);
607
608 /* r600_resource.c */
609 void r600_init_context_resource_functions(struct r600_context *r600);
610
611 /* r600_shader.c */
612 int r600_pipe_shader_create(struct pipe_context *ctx,
613 struct r600_pipe_shader *shader,
614 struct r600_shader_key key);
615 #ifdef HAVE_OPENCL
616 int r600_compute_shader_create(struct pipe_context * ctx,
617 LLVMModuleRef mod, struct r600_bytecode * bytecode);
618 #endif
619 void r600_pipe_shader_destroy(struct pipe_context *ctx, struct r600_pipe_shader *shader);
620
621 /* r600_state.c */
622 struct pipe_sampler_view *
623 r600_create_sampler_view_custom(struct pipe_context *ctx,
624 struct pipe_resource *texture,
625 const struct pipe_sampler_view *state,
626 unsigned width_first_level, unsigned height_first_level);
627 void r600_init_state_functions(struct r600_context *rctx);
628 void r600_init_atom_start_cs(struct r600_context *rctx);
629 void r600_pipe_shader_ps(struct pipe_context *ctx, struct r600_pipe_shader *shader);
630 void r600_pipe_shader_vs(struct pipe_context *ctx, struct r600_pipe_shader *shader);
631 void *r600_create_db_flush_dsa(struct r600_context *rctx);
632 void *r600_create_resolve_blend(struct r600_context *rctx);
633 void *r700_create_resolve_blend(struct r600_context *rctx);
634 void *r600_create_decompress_blend(struct r600_context *rctx);
635 bool r600_adjust_gprs(struct r600_context *rctx);
636 boolean r600_is_format_supported(struct pipe_screen *screen,
637 enum pipe_format format,
638 enum pipe_texture_target target,
639 unsigned sample_count,
640 unsigned usage);
641 void r600_update_db_shader_control(struct r600_context * rctx);
642
643 /* r600_texture.c */
644 void r600_init_screen_texture_functions(struct pipe_screen *screen);
645 void r600_init_surface_functions(struct r600_context *r600);
646 uint32_t r600_translate_texformat(struct pipe_screen *screen, enum pipe_format format,
647 const unsigned char *swizzle_view,
648 uint32_t *word4_p, uint32_t *yuv_format_p);
649 unsigned r600_texture_get_offset(struct r600_texture *rtex,
650 unsigned level, unsigned layer);
651 struct pipe_surface *r600_create_surface_custom(struct pipe_context *pipe,
652 struct pipe_resource *texture,
653 const struct pipe_surface *templ,
654 unsigned width, unsigned height);
655
656 /* r600_state_common.c */
657 void r600_init_common_state_functions(struct r600_context *rctx);
658 void r600_emit_cso_state(struct r600_context *rctx, struct r600_atom *atom);
659 void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom);
660 void r600_emit_blend_color(struct r600_context *rctx, struct r600_atom *atom);
661 void r600_emit_vgt_state(struct r600_context *rctx, struct r600_atom *atom);
662 void r600_emit_vgt2_state(struct r600_context *rctx, struct r600_atom *atom);
663 void r600_emit_clip_misc_state(struct r600_context *rctx, struct r600_atom *atom);
664 void r600_emit_stencil_ref(struct r600_context *rctx, struct r600_atom *atom);
665 void r600_emit_viewport_state(struct r600_context *rctx, struct r600_atom *atom);
666 void r600_init_atom(struct r600_context *rctx, struct r600_atom *atom, unsigned id,
667 void (*emit)(struct r600_context *ctx, struct r600_atom *state),
668 unsigned num_dw);
669 void r600_vertex_buffers_dirty(struct r600_context *rctx);
670 void r600_sampler_views_dirty(struct r600_context *rctx,
671 struct r600_samplerview_state *state);
672 void r600_sampler_states_dirty(struct r600_context *rctx,
673 struct r600_sampler_states *state);
674 void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf_state *state);
675 void r600_draw_rectangle(struct blitter_context *blitter,
676 int x1, int y1, int x2, int y2, float depth,
677 enum blitter_attrib_type type, const union pipe_color_union *attrib);
678 uint32_t r600_translate_stencil_op(int s_op);
679 uint32_t r600_translate_fill(uint32_t func);
680 unsigned r600_tex_wrap(unsigned wrap);
681 unsigned r600_tex_filter(unsigned filter);
682 unsigned r600_tex_mipfilter(unsigned filter);
683 unsigned r600_tex_compare(unsigned compare);
684 bool sampler_state_needs_border_color(const struct pipe_sampler_state *state);
685
686 /*
687 * Helpers for building command buffers
688 */
689
690 #define PKT3_SET_CONFIG_REG 0x68
691 #define PKT3_SET_CONTEXT_REG 0x69
692 #define PKT3_SET_CTL_CONST 0x6F
693 #define PKT3_SET_LOOP_CONST 0x6C
694
695 #define R600_CONFIG_REG_OFFSET 0x08000
696 #define R600_CONTEXT_REG_OFFSET 0x28000
697 #define R600_CTL_CONST_OFFSET 0x3CFF0
698 #define R600_LOOP_CONST_OFFSET 0X0003E200
699 #define EG_LOOP_CONST_OFFSET 0x0003A200
700
701 #define PKT_TYPE_S(x) (((x) & 0x3) << 30)
702 #define PKT_COUNT_S(x) (((x) & 0x3FFF) << 16)
703 #define PKT3_IT_OPCODE_S(x) (((x) & 0xFF) << 8)
704 #define PKT3_PREDICATE(x) (((x) >> 0) & 0x1)
705 #define PKT3(op, count, predicate) (PKT_TYPE_S(3) | PKT_COUNT_S(count) | PKT3_IT_OPCODE_S(op) | PKT3_PREDICATE(predicate))
706
707 #define RADEON_CP_PACKET3_COMPUTE_MODE 0x00000002
708
709 /*Evergreen Compute packet3*/
710 #define PKT3C(op, count, predicate) (PKT_TYPE_S(3) | PKT3_IT_OPCODE_S(op) | PKT_COUNT_S(count) | PKT3_PREDICATE(predicate) | RADEON_CP_PACKET3_COMPUTE_MODE)
711
712 static INLINE void r600_store_value(struct r600_command_buffer *cb, unsigned value)
713 {
714 cb->buf[cb->num_dw++] = value;
715 }
716
717 static INLINE void r600_store_config_reg_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num)
718 {
719 assert(reg < R600_CONTEXT_REG_OFFSET);
720 assert(cb->num_dw+2+num <= cb->max_num_dw);
721 cb->buf[cb->num_dw++] = PKT3(PKT3_SET_CONFIG_REG, num, 0);
722 cb->buf[cb->num_dw++] = (reg - R600_CONFIG_REG_OFFSET) >> 2;
723 }
724
725 /**
726 * Needs cb->pkt_flags set to RADEON_CP_PACKET3_COMPUTE_MODE for compute
727 * shaders.
728 */
729 static INLINE void r600_store_context_reg_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num)
730 {
731 assert(reg >= R600_CONTEXT_REG_OFFSET && reg < R600_CTL_CONST_OFFSET);
732 assert(cb->num_dw+2+num <= cb->max_num_dw);
733 cb->buf[cb->num_dw++] = PKT3(PKT3_SET_CONTEXT_REG, num, 0) | cb->pkt_flags;
734 cb->buf[cb->num_dw++] = (reg - R600_CONTEXT_REG_OFFSET) >> 2;
735 }
736
737 /**
738 * Needs cb->pkt_flags set to RADEON_CP_PACKET3_COMPUTE_MODE for compute
739 * shaders.
740 */
741 static INLINE void r600_store_ctl_const_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num)
742 {
743 assert(reg >= R600_CTL_CONST_OFFSET);
744 assert(cb->num_dw+2+num <= cb->max_num_dw);
745 cb->buf[cb->num_dw++] = PKT3(PKT3_SET_CTL_CONST, num, 0) | cb->pkt_flags;
746 cb->buf[cb->num_dw++] = (reg - R600_CTL_CONST_OFFSET) >> 2;
747 }
748
749 static INLINE void r600_store_loop_const_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num)
750 {
751 assert(reg >= R600_LOOP_CONST_OFFSET);
752 assert(cb->num_dw+2+num <= cb->max_num_dw);
753 cb->buf[cb->num_dw++] = PKT3(PKT3_SET_LOOP_CONST, num, 0);
754 cb->buf[cb->num_dw++] = (reg - R600_LOOP_CONST_OFFSET) >> 2;
755 }
756
757 /**
758 * Needs cb->pkt_flags set to RADEON_CP_PACKET3_COMPUTE_MODE for compute
759 * shaders.
760 */
761 static INLINE void eg_store_loop_const_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num)
762 {
763 assert(reg >= EG_LOOP_CONST_OFFSET);
764 assert(cb->num_dw+2+num <= cb->max_num_dw);
765 cb->buf[cb->num_dw++] = PKT3(PKT3_SET_LOOP_CONST, num, 0) | cb->pkt_flags;
766 cb->buf[cb->num_dw++] = (reg - EG_LOOP_CONST_OFFSET) >> 2;
767 }
768
769 static INLINE void r600_store_config_reg(struct r600_command_buffer *cb, unsigned reg, unsigned value)
770 {
771 r600_store_config_reg_seq(cb, reg, 1);
772 r600_store_value(cb, value);
773 }
774
775 static INLINE void r600_store_context_reg(struct r600_command_buffer *cb, unsigned reg, unsigned value)
776 {
777 r600_store_context_reg_seq(cb, reg, 1);
778 r600_store_value(cb, value);
779 }
780
781 static INLINE void r600_store_ctl_const(struct r600_command_buffer *cb, unsigned reg, unsigned value)
782 {
783 r600_store_ctl_const_seq(cb, reg, 1);
784 r600_store_value(cb, value);
785 }
786
787 static INLINE void r600_store_loop_const(struct r600_command_buffer *cb, unsigned reg, unsigned value)
788 {
789 r600_store_loop_const_seq(cb, reg, 1);
790 r600_store_value(cb, value);
791 }
792
793 static INLINE void eg_store_loop_const(struct r600_command_buffer *cb, unsigned reg, unsigned value)
794 {
795 eg_store_loop_const_seq(cb, reg, 1);
796 r600_store_value(cb, value);
797 }
798
799 void r600_init_command_buffer(struct r600_command_buffer *cb, unsigned num_dw);
800 void r600_release_command_buffer(struct r600_command_buffer *cb);
801
802 /*
803 * Helpers for emitting state into a command stream directly.
804 */
805
806 static INLINE unsigned r600_context_bo_reloc(struct r600_context *ctx, struct r600_resource *rbo,
807 enum radeon_bo_usage usage)
808 {
809 assert(usage);
810 return ctx->ws->cs_add_reloc(ctx->cs, rbo->cs_buf, usage, rbo->domains) * 4;
811 }
812
813 static INLINE void r600_write_value(struct radeon_winsys_cs *cs, unsigned value)
814 {
815 cs->buf[cs->cdw++] = value;
816 }
817
818 static INLINE void r600_write_array(struct radeon_winsys_cs *cs, unsigned num, unsigned *ptr)
819 {
820 assert(cs->cdw+num <= RADEON_MAX_CMDBUF_DWORDS);
821 memcpy(&cs->buf[cs->cdw], ptr, num * sizeof(ptr[0]));
822 cs->cdw += num;
823 }
824
825 static INLINE void r600_write_config_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
826 {
827 assert(reg < R600_CONTEXT_REG_OFFSET);
828 assert(cs->cdw+2+num <= RADEON_MAX_CMDBUF_DWORDS);
829 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONFIG_REG, num, 0);
830 cs->buf[cs->cdw++] = (reg - R600_CONFIG_REG_OFFSET) >> 2;
831 }
832
833 static INLINE void r600_write_context_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
834 {
835 assert(reg >= R600_CONTEXT_REG_OFFSET && reg < R600_CTL_CONST_OFFSET);
836 assert(cs->cdw+2+num <= RADEON_MAX_CMDBUF_DWORDS);
837 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONTEXT_REG, num, 0);
838 cs->buf[cs->cdw++] = (reg - R600_CONTEXT_REG_OFFSET) >> 2;
839 }
840
841 static INLINE void r600_write_compute_context_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
842 {
843 r600_write_context_reg_seq(cs, reg, num);
844 /* Set the compute bit on the packet header */
845 cs->buf[cs->cdw - 2] |= RADEON_CP_PACKET3_COMPUTE_MODE;
846 }
847
848 static INLINE void r600_write_ctl_const_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num)
849 {
850 assert(reg >= R600_CTL_CONST_OFFSET);
851 assert(cs->cdw+2+num <= RADEON_MAX_CMDBUF_DWORDS);
852 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CTL_CONST, num, 0);
853 cs->buf[cs->cdw++] = (reg - R600_CTL_CONST_OFFSET) >> 2;
854 }
855
856 static INLINE void r600_write_config_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
857 {
858 r600_write_config_reg_seq(cs, reg, 1);
859 r600_write_value(cs, value);
860 }
861
862 static INLINE void r600_write_context_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
863 {
864 r600_write_context_reg_seq(cs, reg, 1);
865 r600_write_value(cs, value);
866 }
867
868 static INLINE void r600_write_compute_context_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
869 {
870 r600_write_compute_context_reg_seq(cs, reg, 1);
871 r600_write_value(cs, value);
872 }
873
874 static INLINE void r600_write_ctl_const(struct radeon_winsys_cs *cs, unsigned reg, unsigned value)
875 {
876 r600_write_ctl_const_seq(cs, reg, 1);
877 r600_write_value(cs, value);
878 }
879
880 /*
881 * common helpers
882 */
883 static INLINE uint32_t S_FIXED(float value, uint32_t frac_bits)
884 {
885 return value * (1 << frac_bits);
886 }
887 #define ALIGN_DIVUP(x, y) (((x) + (y) - 1) / (y))
888
889 static inline unsigned r600_tex_aniso_filter(unsigned filter)
890 {
891 if (filter <= 1) return 0;
892 if (filter <= 2) return 1;
893 if (filter <= 4) return 2;
894 if (filter <= 8) return 3;
895 /* else */ return 4;
896 }
897
898 /* 12.4 fixed-point */
899 static INLINE unsigned r600_pack_float_12p4(float x)
900 {
901 return x <= 0 ? 0 :
902 x >= 4096 ? 0xffff : x * 16;
903 }
904
905 static INLINE uint64_t r600_resource_va(struct pipe_screen *screen, struct pipe_resource *resource)
906 {
907 struct r600_screen *rscreen = (struct r600_screen*)screen;
908 struct r600_resource *rresource = (struct r600_resource*)resource;
909
910 return rscreen->ws->buffer_get_virtual_address(rresource->cs_buf);
911 }
912
913 #endif