freedreno: Make the slice pitch be bytes, not pixels.
[mesa.git] / src / gallium / drivers / freedreno / a2xx / fd2_gmem.c
1 /*
2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "pipe/p_state.h"
28 #include "util/u_string.h"
29 #include "util/u_memory.h"
30 #include "util/u_inlines.h"
31
32 #include "freedreno_draw.h"
33 #include "freedreno_state.h"
34 #include "freedreno_resource.h"
35
36 #include "fd2_gmem.h"
37 #include "fd2_context.h"
38 #include "fd2_emit.h"
39 #include "fd2_program.h"
40 #include "fd2_util.h"
41 #include "fd2_zsa.h"
42 #include "fd2_draw.h"
43 #include "instr-a2xx.h"
44
45 static uint32_t fmt2swap(enum pipe_format format)
46 {
47 switch (format) {
48 case PIPE_FORMAT_B8G8R8A8_UNORM:
49 case PIPE_FORMAT_B8G8R8X8_UNORM:
50 case PIPE_FORMAT_B5G6R5_UNORM:
51 case PIPE_FORMAT_B5G5R5A1_UNORM:
52 case PIPE_FORMAT_B5G5R5X1_UNORM:
53 case PIPE_FORMAT_B4G4R4A4_UNORM:
54 case PIPE_FORMAT_B4G4R4X4_UNORM:
55 case PIPE_FORMAT_B2G3R3_UNORM:
56 return 1;
57 default:
58 return 0;
59 }
60 }
61
62 static bool
63 use_hw_binning(struct fd_batch *batch)
64 {
65 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
66
67 /* we hardcoded a limit of 8 "pipes", we can increase this limit
68 * at the cost of a slightly larger command stream
69 * however very few cases will need more than 8
70 * gmem->num_vsc_pipes == 0 means empty batch (TODO: does it still happen?)
71 */
72 if (gmem->num_vsc_pipes > 8 || !gmem->num_vsc_pipes)
73 return false;
74
75 /* only a20x hw binning is implement
76 * a22x is more like a3xx, but perhaps the a20x works? (TODO)
77 */
78 if (!is_a20x(batch->ctx->screen))
79 return false;
80
81 return fd_binning_enabled && ((gmem->nbins_x * gmem->nbins_y) > 2);
82 }
83
84 /* transfer from gmem to system memory (ie. normal RAM) */
85
86 static void
87 emit_gmem2mem_surf(struct fd_batch *batch, uint32_t base,
88 struct pipe_surface *psurf)
89 {
90 struct fd_ringbuffer *ring = batch->tile_fini;
91 struct fd_resource *rsc = fd_resource(psurf->texture);
92 struct fdl_slice *slice = fd_resource_slice(rsc, psurf->u.tex.level);
93 uint32_t offset =
94 fd_resource_offset(rsc, psurf->u.tex.level, psurf->u.tex.first_layer);
95 enum pipe_format format = fd_gmem_restore_format(psurf->format);
96 uint32_t pitch = slice->pitch >> fdl_cpp_shift(&rsc->layout);
97
98 assert((pitch & 31) == 0);
99 assert((offset & 0xfff) == 0);
100
101 if (!rsc->valid)
102 return;
103
104 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
105 OUT_RING(ring, CP_REG(REG_A2XX_RB_COLOR_INFO));
106 OUT_RING(ring, A2XX_RB_COLOR_INFO_BASE(base) |
107 A2XX_RB_COLOR_INFO_FORMAT(fd2_pipe2color(format)));
108
109 OUT_PKT3(ring, CP_SET_CONSTANT, 5);
110 OUT_RING(ring, CP_REG(REG_A2XX_RB_COPY_CONTROL));
111 OUT_RING(ring, 0x00000000); /* RB_COPY_CONTROL */
112 OUT_RELOCW(ring, rsc->bo, offset, 0, 0); /* RB_COPY_DEST_BASE */
113 OUT_RING(ring, pitch >> 5); /* RB_COPY_DEST_PITCH */
114 OUT_RING(ring, /* RB_COPY_DEST_INFO */
115 A2XX_RB_COPY_DEST_INFO_FORMAT(fd2_pipe2color(format)) |
116 COND(!rsc->layout.tile_mode, A2XX_RB_COPY_DEST_INFO_LINEAR) |
117 A2XX_RB_COPY_DEST_INFO_WRITE_RED |
118 A2XX_RB_COPY_DEST_INFO_WRITE_GREEN |
119 A2XX_RB_COPY_DEST_INFO_WRITE_BLUE |
120 A2XX_RB_COPY_DEST_INFO_WRITE_ALPHA);
121
122 if (!is_a20x(batch->ctx->screen)) {
123 OUT_WFI (ring);
124
125 OUT_PKT3(ring, CP_SET_CONSTANT, 3);
126 OUT_RING(ring, CP_REG(REG_A2XX_VGT_MAX_VTX_INDX));
127 OUT_RING(ring, 3); /* VGT_MAX_VTX_INDX */
128 OUT_RING(ring, 0); /* VGT_MIN_VTX_INDX */
129 }
130
131 fd_draw(batch, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
132 DI_SRC_SEL_AUTO_INDEX, 3, 0, INDEX_SIZE_IGN, 0, 0, NULL);
133 }
134
135 static void
136 prepare_tile_fini_ib(struct fd_batch *batch)
137 {
138 struct fd_context *ctx = batch->ctx;
139 struct fd2_context *fd2_ctx = fd2_context(ctx);
140 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
141 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
142 struct fd_ringbuffer *ring;
143
144 batch->tile_fini = fd_submit_new_ringbuffer(batch->submit, 0x1000,
145 FD_RINGBUFFER_STREAMING);
146 ring = batch->tile_fini;
147
148 fd2_emit_vertex_bufs(ring, 0x9c, (struct fd2_vertex_buf[]) {
149 { .prsc = fd2_ctx->solid_vertexbuf, .size = 36 },
150 }, 1);
151
152 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
153 OUT_RING(ring, CP_REG(REG_A2XX_PA_SC_WINDOW_OFFSET));
154 OUT_RING(ring, 0x00000000); /* PA_SC_WINDOW_OFFSET */
155
156 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
157 OUT_RING(ring, CP_REG(REG_A2XX_VGT_INDX_OFFSET));
158 OUT_RING(ring, 0);
159
160 if (!is_a20x(ctx->screen)) {
161 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
162 OUT_RING(ring, CP_REG(REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL));
163 OUT_RING(ring, 0x0000028f);
164 }
165
166 fd2_program_emit(ctx, ring, &ctx->solid_prog);
167
168 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
169 OUT_RING(ring, CP_REG(REG_A2XX_PA_SC_AA_MASK));
170 OUT_RING(ring, 0x0000ffff);
171
172 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
173 OUT_RING(ring, CP_REG(REG_A2XX_RB_DEPTHCONTROL));
174 OUT_RING(ring, A2XX_RB_DEPTHCONTROL_EARLY_Z_ENABLE);
175
176 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
177 OUT_RING(ring, CP_REG(REG_A2XX_PA_SU_SC_MODE_CNTL));
178 OUT_RING(ring, A2XX_PA_SU_SC_MODE_CNTL_PROVOKING_VTX_LAST | /* PA_SU_SC_MODE_CNTL */
179 A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE(PC_DRAW_TRIANGLES) |
180 A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE(PC_DRAW_TRIANGLES));
181
182 OUT_PKT3(ring, CP_SET_CONSTANT, 3);
183 OUT_RING(ring, CP_REG(REG_A2XX_PA_SC_WINDOW_SCISSOR_TL));
184 OUT_RING(ring, xy2d(0, 0)); /* PA_SC_WINDOW_SCISSOR_TL */
185 OUT_RING(ring, xy2d(pfb->width, pfb->height)); /* PA_SC_WINDOW_SCISSOR_BR */
186
187 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
188 OUT_RING(ring, CP_REG(REG_A2XX_PA_CL_CLIP_CNTL));
189 OUT_RING(ring, 0x00000000);
190
191 OUT_PKT3(ring, CP_SET_CONSTANT, 5);
192 OUT_RING(ring, CP_REG(REG_A2XX_PA_CL_VPORT_XSCALE));
193 OUT_RING(ring, fui((float) gmem->bin_w / 2.0)); /* XSCALE */
194 OUT_RING(ring, fui((float) gmem->bin_w / 2.0)); /* XOFFSET */
195 OUT_RING(ring, fui((float) gmem->bin_h / 2.0)); /* YSCALE */
196 OUT_RING(ring, fui((float) gmem->bin_h / 2.0)); /* YOFFSET */
197
198 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
199 OUT_RING(ring, CP_REG(REG_A2XX_RB_MODECONTROL));
200 OUT_RING(ring, A2XX_RB_MODECONTROL_EDRAM_MODE(EDRAM_COPY));
201
202 if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL))
203 emit_gmem2mem_surf(batch, gmem->zsbuf_base[0], pfb->zsbuf);
204
205 if (batch->resolve & FD_BUFFER_COLOR)
206 emit_gmem2mem_surf(batch, gmem->cbuf_base[0], pfb->cbufs[0]);
207
208 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
209 OUT_RING(ring, CP_REG(REG_A2XX_RB_MODECONTROL));
210 OUT_RING(ring, A2XX_RB_MODECONTROL_EDRAM_MODE(COLOR_DEPTH));
211
212 if (!is_a20x(ctx->screen)) {
213 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
214 OUT_RING(ring, CP_REG(REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL));
215 OUT_RING(ring, 0x0000003b);
216 }
217 }
218
219 static void
220 fd2_emit_tile_gmem2mem(struct fd_batch *batch, const struct fd_tile *tile)
221 {
222 fd2_emit_ib(batch->gmem, batch->tile_fini);
223 }
224
225 /* transfer from system memory to gmem */
226
227 static void
228 emit_mem2gmem_surf(struct fd_batch *batch, uint32_t base,
229 struct pipe_surface *psurf)
230 {
231 struct fd_ringbuffer *ring = batch->gmem;
232 struct fd_resource *rsc = fd_resource(psurf->texture);
233 struct fdl_slice *slice = fd_resource_slice(rsc, psurf->u.tex.level);
234 uint32_t offset =
235 fd_resource_offset(rsc, psurf->u.tex.level, psurf->u.tex.first_layer);
236 enum pipe_format format = fd_gmem_restore_format(psurf->format);
237
238 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
239 OUT_RING(ring, CP_REG(REG_A2XX_RB_COLOR_INFO));
240 OUT_RING(ring, A2XX_RB_COLOR_INFO_BASE(base) |
241 A2XX_RB_COLOR_INFO_FORMAT(fd2_pipe2color(format)));
242
243 /* emit fb as a texture: */
244 OUT_PKT3(ring, CP_SET_CONSTANT, 7);
245 OUT_RING(ring, 0x00010000);
246 OUT_RING(ring, A2XX_SQ_TEX_0_CLAMP_X(SQ_TEX_WRAP) |
247 A2XX_SQ_TEX_0_CLAMP_Y(SQ_TEX_WRAP) |
248 A2XX_SQ_TEX_0_CLAMP_Z(SQ_TEX_WRAP) |
249 A2XX_SQ_TEX_0_PITCH(slice->pitch >> fdl_cpp_shift(&rsc->layout)));
250 OUT_RELOC(ring, rsc->bo, offset,
251 A2XX_SQ_TEX_1_FORMAT(fd2_pipe2surface(format).format) |
252 A2XX_SQ_TEX_1_CLAMP_POLICY(SQ_TEX_CLAMP_POLICY_OGL), 0);
253 OUT_RING(ring, A2XX_SQ_TEX_2_WIDTH(psurf->width - 1) |
254 A2XX_SQ_TEX_2_HEIGHT(psurf->height - 1));
255 OUT_RING(ring, A2XX_SQ_TEX_3_MIP_FILTER(SQ_TEX_FILTER_BASEMAP) |
256 A2XX_SQ_TEX_3_SWIZ_X(0) |
257 A2XX_SQ_TEX_3_SWIZ_Y(1) |
258 A2XX_SQ_TEX_3_SWIZ_Z(2) |
259 A2XX_SQ_TEX_3_SWIZ_W(3) |
260 A2XX_SQ_TEX_3_XY_MAG_FILTER(SQ_TEX_FILTER_POINT) |
261 A2XX_SQ_TEX_3_XY_MIN_FILTER(SQ_TEX_FILTER_POINT));
262 OUT_RING(ring, 0x00000000);
263 OUT_RING(ring, A2XX_SQ_TEX_5_DIMENSION(SQ_TEX_DIMENSION_2D));
264
265 if (!is_a20x(batch->ctx->screen)) {
266 OUT_PKT3(ring, CP_SET_CONSTANT, 3);
267 OUT_RING(ring, CP_REG(REG_A2XX_VGT_MAX_VTX_INDX));
268 OUT_RING(ring, 3); /* VGT_MAX_VTX_INDX */
269 OUT_RING(ring, 0); /* VGT_MIN_VTX_INDX */
270 }
271
272 fd_draw(batch, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
273 DI_SRC_SEL_AUTO_INDEX, 3, 0, INDEX_SIZE_IGN, 0, 0, NULL);
274 }
275
276 static void
277 fd2_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
278 {
279 struct fd_context *ctx = batch->ctx;
280 struct fd2_context *fd2_ctx = fd2_context(ctx);
281 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
282 struct fd_ringbuffer *ring = batch->gmem;
283 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
284 unsigned bin_w = tile->bin_w;
285 unsigned bin_h = tile->bin_h;
286 float x0, y0, x1, y1;
287
288 fd2_emit_vertex_bufs(ring, 0x9c, (struct fd2_vertex_buf[]) {
289 { .prsc = fd2_ctx->solid_vertexbuf, .size = 36 },
290 { .prsc = fd2_ctx->solid_vertexbuf, .size = 24, .offset = 36 },
291 }, 2);
292
293 /* write texture coordinates to vertexbuf: */
294 x0 = ((float)tile->xoff) / ((float)pfb->width);
295 x1 = ((float)tile->xoff + bin_w) / ((float)pfb->width);
296 y0 = ((float)tile->yoff) / ((float)pfb->height);
297 y1 = ((float)tile->yoff + bin_h) / ((float)pfb->height);
298 OUT_PKT3(ring, CP_MEM_WRITE, 7);
299 OUT_RELOC(ring, fd_resource(fd2_ctx->solid_vertexbuf)->bo, 36, 0, 0);
300 OUT_RING(ring, fui(x0));
301 OUT_RING(ring, fui(y0));
302 OUT_RING(ring, fui(x1));
303 OUT_RING(ring, fui(y0));
304 OUT_RING(ring, fui(x0));
305 OUT_RING(ring, fui(y1));
306
307 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
308 OUT_RING(ring, CP_REG(REG_A2XX_VGT_INDX_OFFSET));
309 OUT_RING(ring, 0);
310
311 fd2_program_emit(ctx, ring, &ctx->blit_prog[0]);
312
313 OUT_PKT0(ring, REG_A2XX_TC_CNTL_STATUS, 1);
314 OUT_RING(ring, A2XX_TC_CNTL_STATUS_L2_INVALIDATE);
315
316 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
317 OUT_RING(ring, CP_REG(REG_A2XX_RB_DEPTHCONTROL));
318 OUT_RING(ring, A2XX_RB_DEPTHCONTROL_EARLY_Z_ENABLE);
319
320 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
321 OUT_RING(ring, CP_REG(REG_A2XX_PA_SU_SC_MODE_CNTL));
322 OUT_RING(ring, A2XX_PA_SU_SC_MODE_CNTL_PROVOKING_VTX_LAST |
323 A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE(PC_DRAW_TRIANGLES) |
324 A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE(PC_DRAW_TRIANGLES));
325
326 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
327 OUT_RING(ring, CP_REG(REG_A2XX_PA_SC_AA_MASK));
328 OUT_RING(ring, 0x0000ffff);
329
330 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
331 OUT_RING(ring, CP_REG(REG_A2XX_RB_COLORCONTROL));
332 OUT_RING(ring, A2XX_RB_COLORCONTROL_ALPHA_FUNC(FUNC_ALWAYS) |
333 A2XX_RB_COLORCONTROL_BLEND_DISABLE |
334 A2XX_RB_COLORCONTROL_ROP_CODE(12) |
335 A2XX_RB_COLORCONTROL_DITHER_MODE(DITHER_DISABLE) |
336 A2XX_RB_COLORCONTROL_DITHER_TYPE(DITHER_PIXEL));
337
338 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
339 OUT_RING(ring, CP_REG(REG_A2XX_RB_BLEND_CONTROL));
340 OUT_RING(ring, A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(FACTOR_ONE) |
341 A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(BLEND2_DST_PLUS_SRC) |
342 A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND(FACTOR_ZERO) |
343 A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(FACTOR_ONE) |
344 A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(BLEND2_DST_PLUS_SRC) |
345 A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND(FACTOR_ZERO));
346
347 OUT_PKT3(ring, CP_SET_CONSTANT, 3);
348 OUT_RING(ring, CP_REG(REG_A2XX_PA_SC_WINDOW_SCISSOR_TL));
349 OUT_RING(ring, A2XX_PA_SC_WINDOW_OFFSET_DISABLE |
350 xy2d(0,0)); /* PA_SC_WINDOW_SCISSOR_TL */
351 OUT_RING(ring, xy2d(bin_w, bin_h)); /* PA_SC_WINDOW_SCISSOR_BR */
352
353 OUT_PKT3(ring, CP_SET_CONSTANT, 5);
354 OUT_RING(ring, CP_REG(REG_A2XX_PA_CL_VPORT_XSCALE));
355 OUT_RING(ring, fui((float)bin_w/2.0)); /* PA_CL_VPORT_XSCALE */
356 OUT_RING(ring, fui((float)bin_w/2.0)); /* PA_CL_VPORT_XOFFSET */
357 OUT_RING(ring, fui(-(float)bin_h/2.0)); /* PA_CL_VPORT_YSCALE */
358 OUT_RING(ring, fui((float)bin_h/2.0)); /* PA_CL_VPORT_YOFFSET */
359
360 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
361 OUT_RING(ring, CP_REG(REG_A2XX_PA_CL_VTE_CNTL));
362 OUT_RING(ring, A2XX_PA_CL_VTE_CNTL_VTX_XY_FMT |
363 A2XX_PA_CL_VTE_CNTL_VTX_Z_FMT | // XXX check this???
364 A2XX_PA_CL_VTE_CNTL_VPORT_X_SCALE_ENA |
365 A2XX_PA_CL_VTE_CNTL_VPORT_X_OFFSET_ENA |
366 A2XX_PA_CL_VTE_CNTL_VPORT_Y_SCALE_ENA |
367 A2XX_PA_CL_VTE_CNTL_VPORT_Y_OFFSET_ENA);
368
369 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
370 OUT_RING(ring, CP_REG(REG_A2XX_PA_CL_CLIP_CNTL));
371 OUT_RING(ring, 0x00000000);
372
373 if (fd_gmem_needs_restore(batch, tile, FD_BUFFER_DEPTH | FD_BUFFER_STENCIL))
374 emit_mem2gmem_surf(batch, gmem->zsbuf_base[0], pfb->zsbuf);
375
376 if (fd_gmem_needs_restore(batch, tile, FD_BUFFER_COLOR))
377 emit_mem2gmem_surf(batch, gmem->cbuf_base[0], pfb->cbufs[0]);
378
379 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
380 OUT_RING(ring, CP_REG(REG_A2XX_PA_CL_VTE_CNTL));
381 OUT_RING(ring, A2XX_PA_CL_VTE_CNTL_VTX_W0_FMT |
382 A2XX_PA_CL_VTE_CNTL_VPORT_X_SCALE_ENA |
383 A2XX_PA_CL_VTE_CNTL_VPORT_X_OFFSET_ENA |
384 A2XX_PA_CL_VTE_CNTL_VPORT_Y_SCALE_ENA |
385 A2XX_PA_CL_VTE_CNTL_VPORT_Y_OFFSET_ENA |
386 A2XX_PA_CL_VTE_CNTL_VPORT_Z_SCALE_ENA |
387 A2XX_PA_CL_VTE_CNTL_VPORT_Z_OFFSET_ENA);
388
389 /* TODO blob driver seems to toss in a CACHE_FLUSH after each DRAW_INDX.. */
390 }
391
392 static void
393 patch_draws(struct fd_batch *batch, enum pc_di_vis_cull_mode vismode)
394 {
395 unsigned i;
396
397 if (!is_a20x(batch->ctx->screen)) {
398 /* identical to a3xx */
399 for (i = 0; i < fd_patch_num_elements(&batch->draw_patches); i++) {
400 struct fd_cs_patch *patch = fd_patch_element(&batch->draw_patches, i);
401 *patch->cs = patch->val | DRAW(0, 0, 0, vismode, 0);
402 }
403 util_dynarray_clear(&batch->draw_patches);
404 return;
405 }
406
407 if (vismode == USE_VISIBILITY)
408 return;
409
410 for (i = 0; i < batch->draw_patches.size / sizeof(uint32_t*); i++) {
411 uint32_t *ptr = *util_dynarray_element(&batch->draw_patches, uint32_t*, i);
412 unsigned cnt = ptr[0] >> 16 & 0xfff; /* 5 with idx buffer, 3 without */
413
414 /* convert CP_DRAW_INDX_BIN to a CP_DRAW_INDX
415 * replace first two DWORDS with NOP and move the rest down
416 * (we don't want to have to move the idx buffer reloc)
417 */
418 ptr[0] = CP_TYPE3_PKT | (CP_NOP << 8);
419 ptr[1] = 0x00000000;
420
421 ptr[4] = ptr[2] & ~(1 << 14 | 1 << 15); /* remove cull_enable bits */
422 ptr[2] = CP_TYPE3_PKT | ((cnt-2) << 16) | (CP_DRAW_INDX << 8);
423 ptr[3] = 0x00000000;
424 }
425 }
426
427 static void
428 fd2_emit_sysmem_prep(struct fd_batch *batch)
429 {
430 struct fd_context *ctx = batch->ctx;
431 struct fd_ringbuffer *ring = batch->gmem;
432 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
433 struct pipe_surface *psurf = pfb->cbufs[0];
434
435 if (!psurf)
436 return;
437
438 struct fd_resource *rsc = fd_resource(psurf->texture);
439 struct fdl_slice *slice = fd_resource_slice(rsc, psurf->u.tex.level);
440 uint32_t offset =
441 fd_resource_offset(rsc, psurf->u.tex.level, psurf->u.tex.first_layer);
442 uint32_t pitch = slice->pitch >> fdl_cpp_shift(&rsc->layout);
443
444 assert((pitch & 31) == 0);
445 assert((offset & 0xfff) == 0);
446
447 fd2_emit_restore(ctx, ring);
448
449 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
450 OUT_RING(ring, CP_REG(REG_A2XX_RB_SURFACE_INFO));
451 OUT_RING(ring, A2XX_RB_SURFACE_INFO_SURFACE_PITCH(pitch));
452
453 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
454 OUT_RING(ring, CP_REG(REG_A2XX_RB_COLOR_INFO));
455 OUT_RELOCW(ring, rsc->bo, offset,
456 COND(!rsc->layout.tile_mode, A2XX_RB_COLOR_INFO_LINEAR) |
457 A2XX_RB_COLOR_INFO_SWAP(fmt2swap(psurf->format)) |
458 A2XX_RB_COLOR_INFO_FORMAT(fd2_pipe2color(psurf->format)), 0);
459
460 OUT_PKT3(ring, CP_SET_CONSTANT, 3);
461 OUT_RING(ring, CP_REG(REG_A2XX_PA_SC_SCREEN_SCISSOR_TL));
462 OUT_RING(ring, A2XX_PA_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE);
463 OUT_RING(ring, A2XX_PA_SC_SCREEN_SCISSOR_BR_X(pfb->width) |
464 A2XX_PA_SC_SCREEN_SCISSOR_BR_Y(pfb->height));
465
466 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
467 OUT_RING(ring, CP_REG(REG_A2XX_PA_SC_WINDOW_OFFSET));
468 OUT_RING(ring, A2XX_PA_SC_WINDOW_OFFSET_X(0) |
469 A2XX_PA_SC_WINDOW_OFFSET_Y(0));
470
471 patch_draws(batch, IGNORE_VISIBILITY);
472 util_dynarray_clear(&batch->draw_patches);
473 util_dynarray_clear(&batch->shader_patches);
474 }
475
476 /* before first tile */
477 static void
478 fd2_emit_tile_init(struct fd_batch *batch)
479 {
480 struct fd_context *ctx = batch->ctx;
481 struct fd_ringbuffer *ring = batch->gmem;
482 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
483 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
484 enum pipe_format format = pipe_surface_format(pfb->cbufs[0]);
485 uint32_t reg;
486
487 fd2_emit_restore(ctx, ring);
488
489 prepare_tile_fini_ib(batch);
490
491 OUT_PKT3(ring, CP_SET_CONSTANT, 4);
492 OUT_RING(ring, CP_REG(REG_A2XX_RB_SURFACE_INFO));
493 OUT_RING(ring, gmem->bin_w); /* RB_SURFACE_INFO */
494 OUT_RING(ring, A2XX_RB_COLOR_INFO_SWAP(fmt2swap(format)) |
495 A2XX_RB_COLOR_INFO_FORMAT(fd2_pipe2color(format)));
496 reg = A2XX_RB_DEPTH_INFO_DEPTH_BASE(gmem->zsbuf_base[0]);
497 if (pfb->zsbuf)
498 reg |= A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(fd_pipe2depth(pfb->zsbuf->format));
499 OUT_RING(ring, reg); /* RB_DEPTH_INFO */
500
501 /* fast clear patches */
502 int depth_size = -1;
503 int color_size = -1;
504
505 if (pfb->cbufs[0])
506 color_size = util_format_get_blocksizebits(format) == 32 ? 4 : 2;
507
508 if (pfb->zsbuf)
509 depth_size = fd_pipe2depth(pfb->zsbuf->format) == 1 ? 4 : 2;
510
511 for (int i = 0; i < fd_patch_num_elements(&batch->gmem_patches); i++) {
512 struct fd_cs_patch *patch = fd_patch_element(&batch->gmem_patches, i);
513 uint32_t color_base = 0, depth_base = gmem->zsbuf_base[0];
514 uint32_t size, lines;
515
516 /* note: 1 "line" is 512 bytes in both color/depth areas (1K total) */
517 switch (patch->val) {
518 case GMEM_PATCH_FASTCLEAR_COLOR:
519 size = align(gmem->bin_w * gmem->bin_h * color_size, 0x8000);
520 lines = size / 1024;
521 depth_base = size / 2;
522 break;
523 case GMEM_PATCH_FASTCLEAR_DEPTH:
524 size = align(gmem->bin_w * gmem->bin_h * depth_size, 0x8000);
525 lines = size / 1024;
526 color_base = depth_base;
527 depth_base = depth_base + size / 2;
528 break;
529 case GMEM_PATCH_FASTCLEAR_COLOR_DEPTH:
530 lines = align(gmem->bin_w * gmem->bin_h * color_size * 2, 0x8000) / 1024;
531 break;
532 case GMEM_PATCH_RESTORE_INFO:
533 patch->cs[0] = gmem->bin_w;
534 patch->cs[1] = A2XX_RB_COLOR_INFO_SWAP(fmt2swap(format)) |
535 A2XX_RB_COLOR_INFO_FORMAT(fd2_pipe2color(format));
536 patch->cs[2] = A2XX_RB_DEPTH_INFO_DEPTH_BASE(gmem->zsbuf_base[0]);
537 if (pfb->zsbuf)
538 patch->cs[2] |= A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(fd_pipe2depth(pfb->zsbuf->format));
539 continue;
540 default:
541 continue;
542 }
543
544 patch->cs[0] = A2XX_PA_SC_SCREEN_SCISSOR_BR_X(32) |
545 A2XX_PA_SC_SCREEN_SCISSOR_BR_Y(lines);
546 patch->cs[4] = A2XX_RB_COLOR_INFO_BASE(color_base) |
547 A2XX_RB_COLOR_INFO_FORMAT(COLORX_8_8_8_8);
548 patch->cs[5] = A2XX_RB_DEPTH_INFO_DEPTH_BASE(depth_base) |
549 A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(1);
550 }
551 util_dynarray_clear(&batch->gmem_patches);
552
553 /* set to zero, for some reason hardware doesn't like certain values */
554 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
555 OUT_RING(ring, CP_REG(REG_A2XX_VGT_CURRENT_BIN_ID_MIN));
556 OUT_RING(ring, 0);
557
558 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
559 OUT_RING(ring, CP_REG(REG_A2XX_VGT_CURRENT_BIN_ID_MAX));
560 OUT_RING(ring, 0);
561
562 if (use_hw_binning(batch)) {
563 /* patch out unneeded memory exports by changing EXEC CF to EXEC_END
564 *
565 * in the shader compiler, we guarantee that the shader ends with
566 * a specific pattern of ALLOC/EXEC CF pairs for the hw binning exports
567 *
568 * the since patches point only to dwords and CFs are 1.5 dwords
569 * the patch is aligned and might point to a ALLOC CF
570 */
571 for (int i = 0; i < batch->shader_patches.size / sizeof(void*); i++) {
572 instr_cf_t *cf =
573 *util_dynarray_element(&batch->shader_patches, instr_cf_t*, i);
574 if (cf->opc == ALLOC)
575 cf++;
576 assert(cf->opc == EXEC);
577 assert(cf[ctx->screen->num_vsc_pipes*2-2].opc == EXEC_END);
578 cf[2*(gmem->num_vsc_pipes-1)].opc = EXEC_END;
579 }
580
581 patch_draws(batch, USE_VISIBILITY);
582
583 /* initialize shader constants for the binning memexport */
584 OUT_PKT3(ring, CP_SET_CONSTANT, 1 + gmem->num_vsc_pipes * 4);
585 OUT_RING(ring, 0x0000000C);
586
587 for (int i = 0; i < gmem->num_vsc_pipes; i++) {
588 /* allocate in 64k increments to avoid reallocs */
589 uint32_t bo_size = align(batch->num_vertices, 0x10000);
590 if (!ctx->vsc_pipe_bo[i] || fd_bo_size(ctx->vsc_pipe_bo[i]) < bo_size) {
591 if (ctx->vsc_pipe_bo[i])
592 fd_bo_del(ctx->vsc_pipe_bo[i]);
593 ctx->vsc_pipe_bo[i] = fd_bo_new(ctx->dev, bo_size,
594 DRM_FREEDRENO_GEM_TYPE_KMEM, "vsc_pipe[%u]", i);
595 assert(ctx->vsc_pipe_bo[i]);
596 }
597
598 /* memory export address (export32):
599 * .x: (base_address >> 2) | 0x40000000 (?)
600 * .y: index (float) - set by shader
601 * .z: 0x4B00D000 (?)
602 * .w: 0x4B000000 (?) | max_index (?)
603 */
604 OUT_RELOCW(ring, ctx->vsc_pipe_bo[i], 0, 0x40000000, -2);
605 OUT_RING(ring, 0x00000000);
606 OUT_RING(ring, 0x4B00D000);
607 OUT_RING(ring, 0x4B000000 | bo_size);
608 }
609
610 OUT_PKT3(ring, CP_SET_CONSTANT, 1 + gmem->num_vsc_pipes * 8);
611 OUT_RING(ring, 0x0000018C);
612
613 for (int i = 0; i < gmem->num_vsc_pipes; i++) {
614 const struct fd_vsc_pipe *pipe = &gmem->vsc_pipe[i];
615 float off_x, off_y, mul_x, mul_y;
616
617 /* const to tranform from [-1,1] to bin coordinates for this pipe
618 * for x/y, [0,256/2040] = 0, [256/2040,512/2040] = 1, etc
619 * 8 possible values on x/y axis,
620 * to clip at binning stage: only use center 6x6
621 * TODO: set the z parameters too so that hw binning
622 * can clip primitives in Z too
623 */
624
625 mul_x = 1.0f / (float) (gmem->bin_w * 8);
626 mul_y = 1.0f / (float) (gmem->bin_h * 8);
627 off_x = -pipe->x * (1.0/8.0f) + 0.125f - mul_x * gmem->minx;
628 off_y = -pipe->y * (1.0/8.0f) + 0.125f - mul_y * gmem->miny;
629
630 OUT_RING(ring, fui(off_x * (256.0f/255.0f)));
631 OUT_RING(ring, fui(off_y * (256.0f/255.0f)));
632 OUT_RING(ring, 0x3f000000);
633 OUT_RING(ring, fui(0.0f));
634
635 OUT_RING(ring, fui(mul_x * (256.0f/255.0f)));
636 OUT_RING(ring, fui(mul_y * (256.0f/255.0f)));
637 OUT_RING(ring, fui(0.0f));
638 OUT_RING(ring, fui(0.0f));
639 }
640
641 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
642 OUT_RING(ring, CP_REG(REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL));
643 OUT_RING(ring, 0);
644
645 fd2_emit_ib(ring, batch->binning);
646
647 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
648 OUT_RING(ring, CP_REG(REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL));
649 OUT_RING(ring, 0x00000002);
650 } else {
651 patch_draws(batch, IGNORE_VISIBILITY);
652 }
653
654 util_dynarray_clear(&batch->draw_patches);
655 util_dynarray_clear(&batch->shader_patches);
656 }
657
658 /* before mem2gmem */
659 static void
660 fd2_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile)
661 {
662 struct fd_ringbuffer *ring = batch->gmem;
663 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
664 enum pipe_format format = pipe_surface_format(pfb->cbufs[0]);
665
666 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
667 OUT_RING(ring, CP_REG(REG_A2XX_RB_COLOR_INFO));
668 OUT_RING(ring, A2XX_RB_COLOR_INFO_SWAP(1) | /* RB_COLOR_INFO */
669 A2XX_RB_COLOR_INFO_FORMAT(fd2_pipe2color(format)));
670
671 /* setup screen scissor for current tile (same for mem2gmem): */
672 OUT_PKT3(ring, CP_SET_CONSTANT, 3);
673 OUT_RING(ring, CP_REG(REG_A2XX_PA_SC_SCREEN_SCISSOR_TL));
674 OUT_RING(ring, A2XX_PA_SC_SCREEN_SCISSOR_TL_X(0) |
675 A2XX_PA_SC_SCREEN_SCISSOR_TL_Y(0));
676 OUT_RING(ring, A2XX_PA_SC_SCREEN_SCISSOR_BR_X(tile->bin_w) |
677 A2XX_PA_SC_SCREEN_SCISSOR_BR_Y(tile->bin_h));
678 }
679
680 /* before IB to rendering cmds: */
681 static void
682 fd2_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
683 {
684 struct fd_context *ctx = batch->ctx;
685 struct fd2_context *fd2_ctx = fd2_context(ctx);
686 struct fd_ringbuffer *ring = batch->gmem;
687 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
688 enum pipe_format format = pipe_surface_format(pfb->cbufs[0]);
689
690 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
691 OUT_RING(ring, CP_REG(REG_A2XX_RB_COLOR_INFO));
692 OUT_RING(ring, A2XX_RB_COLOR_INFO_SWAP(fmt2swap(format)) |
693 A2XX_RB_COLOR_INFO_FORMAT(fd2_pipe2color(format)));
694
695 /* setup window scissor and offset for current tile (different
696 * from mem2gmem):
697 */
698 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
699 OUT_RING(ring, CP_REG(REG_A2XX_PA_SC_WINDOW_OFFSET));
700 OUT_RING(ring, A2XX_PA_SC_WINDOW_OFFSET_X(-tile->xoff) |
701 A2XX_PA_SC_WINDOW_OFFSET_Y(-tile->yoff));
702
703 /* write SCISSOR_BR to memory so fast clear path can restore from it */
704 OUT_PKT3(ring, CP_MEM_WRITE, 2);
705 OUT_RELOC(ring, fd_resource(fd2_ctx->solid_vertexbuf)->bo, 60, 0, 0);
706 OUT_RING(ring, A2XX_PA_SC_SCREEN_SCISSOR_BR_X(tile->bin_w) |
707 A2XX_PA_SC_SCREEN_SCISSOR_BR_Y(tile->bin_h));
708
709 /* set the copy offset for gmem2mem */
710 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
711 OUT_RING(ring, CP_REG(REG_A2XX_RB_COPY_DEST_OFFSET));
712 OUT_RING(ring, A2XX_RB_COPY_DEST_OFFSET_X(tile->xoff) |
713 A2XX_RB_COPY_DEST_OFFSET_Y(tile->yoff));
714
715 /* tile offset for gl_FragCoord on a20x (C64 in fragment shader) */
716 if (is_a20x(ctx->screen)) {
717 OUT_PKT3(ring, CP_SET_CONSTANT, 5);
718 OUT_RING(ring, 0x00000580);
719 OUT_RING(ring, fui(tile->xoff));
720 OUT_RING(ring, fui(tile->yoff));
721 OUT_RING(ring, fui(0.0f));
722 OUT_RING(ring, fui(0.0f));
723 }
724
725 if (use_hw_binning(batch)) {
726 struct fd_bo *pipe_bo = ctx->vsc_pipe_bo[tile->p];
727
728 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
729 OUT_RING(ring, CP_REG(REG_A2XX_VGT_CURRENT_BIN_ID_MIN));
730 OUT_RING(ring, tile->n);
731
732 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
733 OUT_RING(ring, CP_REG(REG_A2XX_VGT_CURRENT_BIN_ID_MAX));
734 OUT_RING(ring, tile->n);
735
736 /* TODO only emit this when tile->p changes */
737 OUT_PKT3(ring, CP_SET_DRAW_INIT_FLAGS, 1);
738 OUT_RELOC(ring, pipe_bo, 0, 0, 0);
739 }
740 }
741
742 void
743 fd2_gmem_init(struct pipe_context *pctx)
744 {
745 struct fd_context *ctx = fd_context(pctx);
746
747 ctx->emit_sysmem_prep = fd2_emit_sysmem_prep;
748 ctx->emit_tile_init = fd2_emit_tile_init;
749 ctx->emit_tile_prep = fd2_emit_tile_prep;
750 ctx->emit_tile_mem2gmem = fd2_emit_tile_mem2gmem;
751 ctx->emit_tile_renderprep = fd2_emit_tile_renderprep;
752 ctx->emit_tile_gmem2mem = fd2_emit_tile_gmem2mem;
753 }