30e3c6a735fef456a57f94f414fc2c338206e148
[mesa.git] / src / gallium / drivers / freedreno / freedreno_util.h
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29 #ifndef FREEDRENO_UTIL_H_
30 #define FREEDRENO_UTIL_H_
31
32 #include <freedreno_drmif.h>
33 #include <freedreno_ringbuffer.h>
34
35 #include "pipe/p_format.h"
36 #include "pipe/p_state.h"
37 #include "util/u_debug.h"
38 #include "util/u_math.h"
39 #include "util/u_half.h"
40 #include "util/u_dynarray.h"
41 #include "util/u_pack_color.h"
42
43 #include "disasm.h"
44 #include "adreno_common.xml.h"
45 #include "adreno_pm4.xml.h"
46
47 enum adreno_rb_depth_format fd_pipe2depth(enum pipe_format format);
48 enum pc_di_index_size fd_pipe2index(enum pipe_format format);
49 enum pipe_format fd_gmem_restore_format(enum pipe_format format);
50 enum adreno_rb_blend_factor fd_blend_factor(unsigned factor);
51 enum adreno_pa_su_sc_draw fd_polygon_mode(unsigned mode);
52 enum adreno_stencil_op fd_stencil_op(unsigned op);
53
54 #define A3XX_MAX_MIP_LEVELS 14
55 /* TBD if it is same on a2xx, but for now: */
56 #define MAX_MIP_LEVELS A3XX_MAX_MIP_LEVELS
57
58 #define A2XX_MAX_RENDER_TARGETS 1
59 #define A3XX_MAX_RENDER_TARGETS 4
60 #define A4XX_MAX_RENDER_TARGETS 8
61 #define A5XX_MAX_RENDER_TARGETS 8
62 #define A6XX_MAX_RENDER_TARGETS 8
63
64 #define MAX_RENDER_TARGETS A6XX_MAX_RENDER_TARGETS
65
66 #define FD_DBG_MSGS 0x0001
67 #define FD_DBG_DISASM 0x0002
68 #define FD_DBG_DCLEAR 0x0004
69 #define FD_DBG_DDRAW 0x0008
70 #define FD_DBG_NOSCIS 0x0010
71 #define FD_DBG_DIRECT 0x0020
72 #define FD_DBG_NOBYPASS 0x0040
73 #define FD_DBG_FRAGHALF 0x0080
74 #define FD_DBG_NOBIN 0x0100
75 #define FD_DBG_OPTMSGS 0x0200
76 #define FD_DBG_GLSL120 0x0400
77 #define FD_DBG_SHADERDB 0x0800
78 #define FD_DBG_FLUSH 0x1000
79 #define FD_DBG_DEQP 0x2000
80 #define FD_DBG_INORDER 0x4000
81 #define FD_DBG_BSTAT 0x8000
82 #define FD_DBG_NOGROW 0x10000
83 #define FD_DBG_LRZ 0x20000
84 #define FD_DBG_NOINDR 0x40000
85 #define FD_DBG_NOBLIT 0x80000
86 #define FD_DBG_HIPRIO 0x100000
87 #define FD_DBG_TTILE 0x200000
88 #define FD_DBG_PERFC 0x400000
89
90 extern int fd_mesa_debug;
91 extern bool fd_binning_enabled;
92
93 #define DBG(fmt, ...) \
94 do { if (fd_mesa_debug & FD_DBG_MSGS) \
95 debug_printf("%s:%d: "fmt "\n", \
96 __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
97
98 /* for conditionally setting boolean flag(s): */
99 #define COND(bool, val) ((bool) ? (val) : 0)
100
101 #define CP_REG(reg) ((0x4 << 16) | ((unsigned int)((reg) - (0x2000))))
102
103 static inline uint32_t DRAW(enum pc_di_primtype prim_type,
104 enum pc_di_src_sel source_select, enum pc_di_index_size index_size,
105 enum pc_di_vis_cull_mode vis_cull_mode,
106 uint8_t instances)
107 {
108 return (prim_type << 0) |
109 (source_select << 6) |
110 ((index_size & 1) << 11) |
111 ((index_size >> 1) << 13) |
112 (vis_cull_mode << 9) |
113 (1 << 14) |
114 (instances << 24);
115 }
116
117 static inline uint32_t DRAW_A20X(enum pc_di_primtype prim_type,
118 enum pc_di_src_sel source_select, enum pc_di_index_size index_size,
119 enum pc_di_vis_cull_mode vis_cull_mode,
120 uint16_t count)
121 {
122 return (prim_type << 0) |
123 (source_select << 6) |
124 ((index_size & 1) << 11) |
125 ((index_size >> 1) << 13) |
126 (vis_cull_mode << 9) |
127 (count << 16);
128 }
129
130 /* for tracking cmdstream positions that need to be patched: */
131 struct fd_cs_patch {
132 uint32_t *cs;
133 uint32_t val;
134 };
135 #define fd_patch_num_elements(buf) ((buf)->size / sizeof(struct fd_cs_patch))
136 #define fd_patch_element(buf, i) util_dynarray_element(buf, struct fd_cs_patch, i)
137
138 static inline enum pipe_format
139 pipe_surface_format(struct pipe_surface *psurf)
140 {
141 if (!psurf)
142 return PIPE_FORMAT_NONE;
143 return psurf->format;
144 }
145
146 static inline bool
147 fd_surface_half_precision(const struct pipe_surface *psurf)
148 {
149 enum pipe_format format;
150
151 if (!psurf)
152 return true;
153
154 format = psurf->format;
155
156 /* colors are provided in consts, which go through cov.f32f16, which will
157 * break these values
158 */
159 if (util_format_is_pure_integer(format))
160 return false;
161
162 /* avoid losing precision on 32-bit float formats */
163 if (util_format_is_float(format) &&
164 util_format_get_component_bits(format, UTIL_FORMAT_COLORSPACE_RGB, 0) == 32)
165 return false;
166
167 return true;
168 }
169
170 static inline unsigned
171 fd_sampler_first_level(const struct pipe_sampler_view *view)
172 {
173 if (view->target == PIPE_BUFFER)
174 return 0;
175 return view->u.tex.first_level;
176 }
177
178 static inline unsigned
179 fd_sampler_last_level(const struct pipe_sampler_view *view)
180 {
181 if (view->target == PIPE_BUFFER)
182 return 0;
183 return view->u.tex.last_level;
184 }
185
186 static inline bool
187 fd_half_precision(struct pipe_framebuffer_state *pfb)
188 {
189 unsigned i;
190
191 for (i = 0; i < pfb->nr_cbufs; i++)
192 if (!fd_surface_half_precision(pfb->cbufs[i]))
193 return false;
194
195 return true;
196 }
197
198 #define LOG_DWORDS 0
199
200 static inline void emit_marker(struct fd_ringbuffer *ring, int scratch_idx);
201
202 static inline void
203 OUT_RING(struct fd_ringbuffer *ring, uint32_t data)
204 {
205 if (LOG_DWORDS) {
206 DBG("ring[%p]: OUT_RING %04x: %08x", ring,
207 (uint32_t)(ring->cur - ring->last_start), data);
208 }
209 fd_ringbuffer_emit(ring, data);
210 }
211
212 /* like OUT_RING() but appends a cmdstream patch point to 'buf' */
213 static inline void
214 OUT_RINGP(struct fd_ringbuffer *ring, uint32_t data,
215 struct util_dynarray *buf)
216 {
217 if (LOG_DWORDS) {
218 DBG("ring[%p]: OUT_RINGP %04x: %08x", ring,
219 (uint32_t)(ring->cur - ring->last_start), data);
220 }
221 util_dynarray_append(buf, struct fd_cs_patch, ((struct fd_cs_patch){
222 .cs = ring->cur++,
223 .val = data,
224 }));
225 }
226
227 /*
228 * NOTE: OUT_RELOC*() is 2 dwords (64b) on a5xx+
229 */
230
231 static inline void
232 OUT_RELOC(struct fd_ringbuffer *ring, struct fd_bo *bo,
233 uint32_t offset, uint64_t or, int32_t shift)
234 {
235 if (LOG_DWORDS) {
236 DBG("ring[%p]: OUT_RELOC %04x: %p+%u << %d", ring,
237 (uint32_t)(ring->cur - ring->last_start), bo, offset, shift);
238 }
239 debug_assert(offset < fd_bo_size(bo));
240 fd_ringbuffer_reloc2(ring, &(struct fd_reloc){
241 .bo = bo,
242 .flags = FD_RELOC_READ,
243 .offset = offset,
244 .or = or,
245 .shift = shift,
246 .orhi = or >> 32,
247 });
248 }
249
250 static inline void
251 OUT_RELOCW(struct fd_ringbuffer *ring, struct fd_bo *bo,
252 uint32_t offset, uint64_t or, int32_t shift)
253 {
254 if (LOG_DWORDS) {
255 DBG("ring[%p]: OUT_RELOCW %04x: %p+%u << %d", ring,
256 (uint32_t)(ring->cur - ring->last_start), bo, offset, shift);
257 }
258 debug_assert(offset < fd_bo_size(bo));
259 fd_ringbuffer_reloc2(ring, &(struct fd_reloc){
260 .bo = bo,
261 .flags = FD_RELOC_READ | FD_RELOC_WRITE,
262 .offset = offset,
263 .or = or,
264 .shift = shift,
265 .orhi = or >> 32,
266 });
267 }
268
269 static inline void
270 OUT_RB(struct fd_ringbuffer *ring, struct fd_ringbuffer *target)
271 {
272 fd_ringbuffer_emit_reloc_ring_full(ring, target, 0);
273 }
274
275 static inline void BEGIN_RING(struct fd_ringbuffer *ring, uint32_t ndwords)
276 {
277 if (ring->cur + ndwords >= ring->end)
278 fd_ringbuffer_grow(ring, ndwords);
279 }
280
281 static inline uint32_t
282 __gpu_id(struct fd_ringbuffer *ring)
283 {
284 uint64_t val;
285 fd_pipe_get_param(ring->pipe, FD_GPU_ID, &val);
286 return val;
287 }
288
289 static inline void
290 OUT_PKT0(struct fd_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
291 {
292 debug_assert(__gpu_id(ring) < 500);
293 BEGIN_RING(ring, cnt+1);
294 OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
295 }
296
297 static inline void
298 OUT_PKT2(struct fd_ringbuffer *ring)
299 {
300 debug_assert(__gpu_id(ring) < 500);
301 BEGIN_RING(ring, 1);
302 OUT_RING(ring, CP_TYPE2_PKT);
303 }
304
305 static inline void
306 OUT_PKT3(struct fd_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
307 {
308 debug_assert(__gpu_id(ring) < 500);
309 BEGIN_RING(ring, cnt+1);
310 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
311 }
312
313 /*
314 * Starting with a5xx, pkt4/pkt7 are used instead of pkt0/pkt3
315 */
316
317 static inline unsigned
318 _odd_parity_bit(unsigned val)
319 {
320 /* See: http://graphics.stanford.edu/~seander/bithacks.html#ParityParallel
321 * note that we want odd parity so 0x6996 is inverted.
322 */
323 val ^= val >> 16;
324 val ^= val >> 8;
325 val ^= val >> 4;
326 val &= 0xf;
327 return (~0x6996 >> val) & 1;
328 }
329
330 static inline void
331 OUT_PKT4(struct fd_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
332 {
333 BEGIN_RING(ring, cnt+1);
334 OUT_RING(ring, CP_TYPE4_PKT | cnt |
335 (_odd_parity_bit(cnt) << 7) |
336 ((regindx & 0x3ffff) << 8) |
337 ((_odd_parity_bit(regindx) << 27)));
338 }
339
340 static inline void
341 OUT_PKT7(struct fd_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
342 {
343 BEGIN_RING(ring, cnt+1);
344 OUT_RING(ring, CP_TYPE7_PKT | cnt |
345 (_odd_parity_bit(cnt) << 15) |
346 ((opcode & 0x7f) << 16) |
347 ((_odd_parity_bit(opcode) << 23)));
348 }
349
350 static inline void
351 OUT_WFI(struct fd_ringbuffer *ring)
352 {
353 OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
354 OUT_RING(ring, 0x00000000);
355 }
356
357 static inline void
358 OUT_WFI5(struct fd_ringbuffer *ring)
359 {
360 OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
361 }
362
363 static inline void
364 __OUT_IB(struct fd_ringbuffer *ring, bool prefetch, struct fd_ringbuffer *target)
365 {
366 if (target->cur == target->start)
367 return;
368
369 unsigned count = fd_ringbuffer_cmd_count(target);
370
371 debug_assert(__gpu_id(ring) < 500);
372
373 /* for debug after a lock up, write a unique counter value
374 * to scratch6 for each IB, to make it easier to match up
375 * register dumps to cmdstream. The combination of IB and
376 * DRAW (scratch7) is enough to "triangulate" the particular
377 * draw that caused lockup.
378 */
379 emit_marker(ring, 6);
380
381 for (unsigned i = 0; i < count; i++) {
382 uint32_t dwords;
383 OUT_PKT3(ring, prefetch ? CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
384 dwords = fd_ringbuffer_emit_reloc_ring_full(ring, target, i) / 4;
385 assert(dwords > 0);
386 OUT_RING(ring, dwords);
387 OUT_PKT2(ring);
388 }
389
390 emit_marker(ring, 6);
391 }
392
393 static inline void
394 __OUT_IB5(struct fd_ringbuffer *ring, struct fd_ringbuffer *target)
395 {
396 if (target->cur == target->start)
397 return;
398
399 unsigned count = fd_ringbuffer_cmd_count(target);
400
401 for (unsigned i = 0; i < count; i++) {
402 uint32_t dwords;
403 OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3);
404 dwords = fd_ringbuffer_emit_reloc_ring_full(ring, target, i) / 4;
405 assert(dwords > 0);
406 OUT_RING(ring, dwords);
407 }
408 }
409
410 /* CP_SCRATCH_REG4 is used to hold base address for query results: */
411 // XXX annoyingly scratch regs move on a5xx.. and additionally different
412 // packet types.. so freedreno_query_hw is going to need a bit of
413 // rework..
414 #define HW_QUERY_BASE_REG REG_AXXX_CP_SCRATCH_REG4
415
416 static inline void
417 emit_marker(struct fd_ringbuffer *ring, int scratch_idx)
418 {
419 extern unsigned marker_cnt;
420 unsigned reg = REG_AXXX_CP_SCRATCH_REG0 + scratch_idx;
421 assert(reg != HW_QUERY_BASE_REG);
422 if (reg == HW_QUERY_BASE_REG)
423 return;
424 OUT_PKT0(ring, reg, 1);
425 OUT_RING(ring, ++marker_cnt);
426 }
427
428 /* helper to get numeric value from environment variable.. mostly
429 * just leaving this here because it is helpful to brute-force figure
430 * out unknown formats, etc, which blob driver does not support:
431 */
432 static inline uint32_t env2u(const char *envvar)
433 {
434 char *str = getenv(envvar);
435 if (str)
436 return strtoul(str, NULL, 0);
437 return 0;
438 }
439
440 static inline uint32_t
441 pack_rgba(enum pipe_format format, const float *rgba)
442 {
443 union util_color uc;
444 util_pack_color(rgba, format, &uc);
445 return uc.ui[0];
446 }
447
448 /*
449 * swap - swap value of @a and @b
450 */
451 #define swap(a, b) \
452 do { __typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
453
454 #define foreach_bit(b, mask) \
455 for (uint32_t _m = (mask); _m && ({(b) = u_bit_scan(&_m); 1;});)
456
457
458 #define BIT(bit) (1u << bit)
459
460 /*
461 * a3xx+ helpers:
462 */
463
464 static inline enum a3xx_msaa_samples
465 fd_msaa_samples(unsigned samples)
466 {
467 switch (samples) {
468 default:
469 debug_assert(0);
470 case 1: return MSAA_ONE;
471 case 2: return MSAA_TWO;
472 case 4: return MSAA_FOUR;
473 }
474 }
475
476 /*
477 * a4xx+ helpers:
478 */
479
480 static inline enum a4xx_state_block
481 fd4_stage2shadersb(enum shader_t type)
482 {
483 switch (type) {
484 case SHADER_VERTEX:
485 return SB4_VS_SHADER;
486 case SHADER_FRAGMENT:
487 return SB4_FS_SHADER;
488 case SHADER_COMPUTE:
489 return SB4_CS_SHADER;
490 default:
491 unreachable("bad shader type");
492 return ~0;
493 }
494 }
495
496 #endif /* FREEDRENO_UTIL_H_ */