radeonsi: switch radeon_add_to_buffer_list parameter to si_context
[mesa.git] / src / gallium / drivers / radeonsi / si_state_viewport.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "si_pipe.h"
25 #include "sid.h"
26 #include "radeon/r600_cs.h"
27 #include "util/u_viewport.h"
28 #include "tgsi/tgsi_scan.h"
29
30 #define SI_MAX_SCISSOR 16384
31
32 static void si_set_scissor_states(struct pipe_context *pctx,
33 unsigned start_slot,
34 unsigned num_scissors,
35 const struct pipe_scissor_state *state)
36 {
37 struct si_context *ctx = (struct si_context *)pctx;
38 int i;
39
40 for (i = 0; i < num_scissors; i++)
41 ctx->scissors.states[start_slot + i] = state[i];
42
43 if (!ctx->queued.named.rasterizer ||
44 !ctx->queued.named.rasterizer->scissor_enable)
45 return;
46
47 ctx->scissors.dirty_mask |= ((1 << num_scissors) - 1) << start_slot;
48 si_mark_atom_dirty(ctx, &ctx->scissors.atom);
49 }
50
51 /* Since the guard band disables clipping, we have to clip per-pixel
52 * using a scissor.
53 */
54 static void si_get_scissor_from_viewport(struct si_context *ctx,
55 const struct pipe_viewport_state *vp,
56 struct si_signed_scissor *scissor)
57 {
58 float tmp, minx, miny, maxx, maxy;
59
60 /* Convert (-1, -1) and (1, 1) from clip space into window space. */
61 minx = -vp->scale[0] + vp->translate[0];
62 miny = -vp->scale[1] + vp->translate[1];
63 maxx = vp->scale[0] + vp->translate[0];
64 maxy = vp->scale[1] + vp->translate[1];
65
66 /* Handle inverted viewports. */
67 if (minx > maxx) {
68 tmp = minx;
69 minx = maxx;
70 maxx = tmp;
71 }
72 if (miny > maxy) {
73 tmp = miny;
74 miny = maxy;
75 maxy = tmp;
76 }
77
78 /* Convert to integer and round up the max bounds. */
79 scissor->minx = minx;
80 scissor->miny = miny;
81 scissor->maxx = ceilf(maxx);
82 scissor->maxy = ceilf(maxy);
83 }
84
85 static void si_clamp_scissor(struct si_context *ctx,
86 struct pipe_scissor_state *out,
87 struct si_signed_scissor *scissor)
88 {
89 out->minx = CLAMP(scissor->minx, 0, SI_MAX_SCISSOR);
90 out->miny = CLAMP(scissor->miny, 0, SI_MAX_SCISSOR);
91 out->maxx = CLAMP(scissor->maxx, 0, SI_MAX_SCISSOR);
92 out->maxy = CLAMP(scissor->maxy, 0, SI_MAX_SCISSOR);
93 }
94
95 static void si_clip_scissor(struct pipe_scissor_state *out,
96 struct pipe_scissor_state *clip)
97 {
98 out->minx = MAX2(out->minx, clip->minx);
99 out->miny = MAX2(out->miny, clip->miny);
100 out->maxx = MIN2(out->maxx, clip->maxx);
101 out->maxy = MIN2(out->maxy, clip->maxy);
102 }
103
104 static void si_scissor_make_union(struct si_signed_scissor *out,
105 struct si_signed_scissor *in)
106 {
107 out->minx = MIN2(out->minx, in->minx);
108 out->miny = MIN2(out->miny, in->miny);
109 out->maxx = MAX2(out->maxx, in->maxx);
110 out->maxy = MAX2(out->maxy, in->maxy);
111 }
112
113 static void si_emit_one_scissor(struct si_context *ctx,
114 struct radeon_winsys_cs *cs,
115 struct si_signed_scissor *vp_scissor,
116 struct pipe_scissor_state *scissor)
117 {
118 struct pipe_scissor_state final;
119
120 if (ctx->vs_disables_clipping_viewport) {
121 final.minx = final.miny = 0;
122 final.maxx = final.maxy = SI_MAX_SCISSOR;
123 } else {
124 si_clamp_scissor(ctx, &final, vp_scissor);
125 }
126
127 if (scissor)
128 si_clip_scissor(&final, scissor);
129
130 radeon_emit(cs, S_028250_TL_X(final.minx) |
131 S_028250_TL_Y(final.miny) |
132 S_028250_WINDOW_OFFSET_DISABLE(1));
133 radeon_emit(cs, S_028254_BR_X(final.maxx) |
134 S_028254_BR_Y(final.maxy));
135 }
136
137 /* the range is [-MAX, MAX] */
138 #define GET_MAX_VIEWPORT_RANGE(rctx) (32768)
139
140 static void si_emit_guardband(struct si_context *ctx,
141 struct si_signed_scissor *vp_as_scissor)
142 {
143 struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
144 struct pipe_viewport_state vp;
145 float left, top, right, bottom, max_range, guardband_x, guardband_y;
146 float discard_x, discard_y;
147
148 /* Reconstruct the viewport transformation from the scissor. */
149 vp.translate[0] = (vp_as_scissor->minx + vp_as_scissor->maxx) / 2.0;
150 vp.translate[1] = (vp_as_scissor->miny + vp_as_scissor->maxy) / 2.0;
151 vp.scale[0] = vp_as_scissor->maxx - vp.translate[0];
152 vp.scale[1] = vp_as_scissor->maxy - vp.translate[1];
153
154 /* Treat a 0x0 viewport as 1x1 to prevent division by zero. */
155 if (vp_as_scissor->minx == vp_as_scissor->maxx)
156 vp.scale[0] = 0.5;
157 if (vp_as_scissor->miny == vp_as_scissor->maxy)
158 vp.scale[1] = 0.5;
159
160 /* Find the biggest guard band that is inside the supported viewport
161 * range. The guard band is specified as a horizontal and vertical
162 * distance from (0,0) in clip space.
163 *
164 * This is done by applying the inverse viewport transformation
165 * on the viewport limits to get those limits in clip space.
166 *
167 * Use a limit one pixel smaller to allow for some precision error.
168 */
169 max_range = GET_MAX_VIEWPORT_RANGE(ctx) - 1;
170 left = (-max_range - vp.translate[0]) / vp.scale[0];
171 right = ( max_range - vp.translate[0]) / vp.scale[0];
172 top = (-max_range - vp.translate[1]) / vp.scale[1];
173 bottom = ( max_range - vp.translate[1]) / vp.scale[1];
174
175 assert(left <= -1 && top <= -1 && right >= 1 && bottom >= 1);
176
177 guardband_x = MIN2(-left, right);
178 guardband_y = MIN2(-top, bottom);
179
180 discard_x = 1.0;
181 discard_y = 1.0;
182
183 if (unlikely(ctx->current_rast_prim < PIPE_PRIM_TRIANGLES) &&
184 ctx->queued.named.rasterizer) {
185 /* When rendering wide points or lines, we need to be more
186 * conservative about when to discard them entirely. */
187 const struct si_state_rasterizer *rs = ctx->queued.named.rasterizer;
188 float pixels;
189
190 if (ctx->current_rast_prim == PIPE_PRIM_POINTS)
191 pixels = rs->max_point_size;
192 else
193 pixels = rs->line_width;
194
195 /* Add half the point size / line width */
196 discard_x += pixels / (2.0 * vp.scale[0]);
197 discard_y += pixels / (2.0 * vp.scale[1]);
198
199 /* Discard primitives that would lie entirely outside the clip
200 * region. */
201 discard_x = MIN2(discard_x, guardband_x);
202 discard_y = MIN2(discard_y, guardband_y);
203 }
204
205 /* If any of the GB registers is updated, all of them must be updated. */
206 radeon_set_context_reg_seq(cs, R_028BE8_PA_CL_GB_VERT_CLIP_ADJ, 4);
207
208 radeon_emit(cs, fui(guardband_y)); /* R_028BE8_PA_CL_GB_VERT_CLIP_ADJ */
209 radeon_emit(cs, fui(discard_y)); /* R_028BEC_PA_CL_GB_VERT_DISC_ADJ */
210 radeon_emit(cs, fui(guardband_x)); /* R_028BF0_PA_CL_GB_HORZ_CLIP_ADJ */
211 radeon_emit(cs, fui(discard_x)); /* R_028BF4_PA_CL_GB_HORZ_DISC_ADJ */
212 }
213
214 static void si_emit_scissors(struct si_context *ctx, struct r600_atom *atom)
215 {
216 struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
217 struct pipe_scissor_state *states = ctx->scissors.states;
218 unsigned mask = ctx->scissors.dirty_mask;
219 bool scissor_enabled = false;
220 struct si_signed_scissor max_vp_scissor;
221 int i;
222
223 if (ctx->queued.named.rasterizer)
224 scissor_enabled = ctx->queued.named.rasterizer->scissor_enable;
225
226 /* The simple case: Only 1 viewport is active. */
227 if (!ctx->vs_writes_viewport_index) {
228 struct si_signed_scissor *vp = &ctx->viewports.as_scissor[0];
229
230 if (!(mask & 1))
231 return;
232
233 radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2);
234 si_emit_one_scissor(ctx, cs, vp, scissor_enabled ? &states[0] : NULL);
235 si_emit_guardband(ctx, vp);
236 ctx->scissors.dirty_mask &= ~1; /* clear one bit */
237 return;
238 }
239
240 /* Shaders can draw to any viewport. Make a union of all viewports. */
241 max_vp_scissor = ctx->viewports.as_scissor[0];
242 for (i = 1; i < SI_MAX_VIEWPORTS; i++)
243 si_scissor_make_union(&max_vp_scissor,
244 &ctx->viewports.as_scissor[i]);
245
246 while (mask) {
247 int start, count, i;
248
249 u_bit_scan_consecutive_range(&mask, &start, &count);
250
251 radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL +
252 start * 4 * 2, count * 2);
253 for (i = start; i < start+count; i++) {
254 si_emit_one_scissor(ctx, cs, &ctx->viewports.as_scissor[i],
255 scissor_enabled ? &states[i] : NULL);
256 }
257 }
258 si_emit_guardband(ctx, &max_vp_scissor);
259 ctx->scissors.dirty_mask = 0;
260 }
261
262 static void si_set_viewport_states(struct pipe_context *pctx,
263 unsigned start_slot,
264 unsigned num_viewports,
265 const struct pipe_viewport_state *state)
266 {
267 struct si_context *ctx = (struct si_context *)pctx;
268 unsigned mask;
269 int i;
270
271 for (i = 0; i < num_viewports; i++) {
272 unsigned index = start_slot + i;
273
274 ctx->viewports.states[index] = state[i];
275 si_get_scissor_from_viewport(ctx, &state[i],
276 &ctx->viewports.as_scissor[index]);
277 }
278
279 mask = ((1 << num_viewports) - 1) << start_slot;
280 ctx->viewports.dirty_mask |= mask;
281 ctx->viewports.depth_range_dirty_mask |= mask;
282 ctx->scissors.dirty_mask |= mask;
283 si_mark_atom_dirty(ctx, &ctx->viewports.atom);
284 si_mark_atom_dirty(ctx, &ctx->scissors.atom);
285 }
286
287 static void si_emit_one_viewport(struct si_context *ctx,
288 struct pipe_viewport_state *state)
289 {
290 struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
291
292 radeon_emit(cs, fui(state->scale[0]));
293 radeon_emit(cs, fui(state->translate[0]));
294 radeon_emit(cs, fui(state->scale[1]));
295 radeon_emit(cs, fui(state->translate[1]));
296 radeon_emit(cs, fui(state->scale[2]));
297 radeon_emit(cs, fui(state->translate[2]));
298 }
299
300 static void si_emit_viewports(struct si_context *ctx)
301 {
302 struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
303 struct pipe_viewport_state *states = ctx->viewports.states;
304 unsigned mask = ctx->viewports.dirty_mask;
305
306 /* The simple case: Only 1 viewport is active. */
307 if (!ctx->vs_writes_viewport_index) {
308 if (!(mask & 1))
309 return;
310
311 radeon_set_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE, 6);
312 si_emit_one_viewport(ctx, &states[0]);
313 ctx->viewports.dirty_mask &= ~1; /* clear one bit */
314 return;
315 }
316
317 while (mask) {
318 int start, count, i;
319
320 u_bit_scan_consecutive_range(&mask, &start, &count);
321
322 radeon_set_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE +
323 start * 4 * 6, count * 6);
324 for (i = start; i < start+count; i++)
325 si_emit_one_viewport(ctx, &states[i]);
326 }
327 ctx->viewports.dirty_mask = 0;
328 }
329
330 static inline void
331 si_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz,
332 bool window_space_position, float *zmin, float *zmax)
333 {
334 if (window_space_position) {
335 *zmin = 0;
336 *zmax = 1;
337 return;
338 }
339 util_viewport_zmin_zmax(vp, halfz, zmin, zmax);
340 }
341
342 static void si_emit_depth_ranges(struct si_context *ctx)
343 {
344 struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
345 struct pipe_viewport_state *states = ctx->viewports.states;
346 unsigned mask = ctx->viewports.depth_range_dirty_mask;
347 bool clip_halfz = false;
348 bool window_space = ctx->vs_disables_clipping_viewport;
349 float zmin, zmax;
350
351 if (ctx->queued.named.rasterizer)
352 clip_halfz = ctx->queued.named.rasterizer->clip_halfz;
353
354 /* The simple case: Only 1 viewport is active. */
355 if (!ctx->vs_writes_viewport_index) {
356 if (!(mask & 1))
357 return;
358
359 si_viewport_zmin_zmax(&states[0], clip_halfz, window_space,
360 &zmin, &zmax);
361
362 radeon_set_context_reg_seq(cs, R_0282D0_PA_SC_VPORT_ZMIN_0, 2);
363 radeon_emit(cs, fui(zmin));
364 radeon_emit(cs, fui(zmax));
365 ctx->viewports.depth_range_dirty_mask &= ~1; /* clear one bit */
366 return;
367 }
368
369 while (mask) {
370 int start, count, i;
371
372 u_bit_scan_consecutive_range(&mask, &start, &count);
373
374 radeon_set_context_reg_seq(cs, R_0282D0_PA_SC_VPORT_ZMIN_0 +
375 start * 4 * 2, count * 2);
376 for (i = start; i < start+count; i++) {
377 si_viewport_zmin_zmax(&states[i], clip_halfz, window_space,
378 &zmin, &zmax);
379 radeon_emit(cs, fui(zmin));
380 radeon_emit(cs, fui(zmax));
381 }
382 }
383 ctx->viewports.depth_range_dirty_mask = 0;
384 }
385
386 static void si_emit_viewport_states(struct si_context *ctx,
387 struct r600_atom *atom)
388 {
389 si_emit_viewports(ctx);
390 si_emit_depth_ranges(ctx);
391 }
392
393 /**
394 * This reacts to 2 state changes:
395 * - VS.writes_viewport_index
396 * - VS output position in window space (enable/disable)
397 *
398 * Normally, we only emit 1 viewport and 1 scissor if no shader is using
399 * the VIEWPORT_INDEX output, and emitting the other viewports and scissors
400 * is delayed. When a shader with VIEWPORT_INDEX appears, this should be
401 * called to emit the rest.
402 */
403 void si_update_vs_viewport_state(struct si_context *ctx)
404 {
405 struct tgsi_shader_info *info = si_get_vs_info(ctx);
406 bool vs_window_space;
407
408 if (!info)
409 return;
410
411 /* When the VS disables clipping and viewport transformation. */
412 vs_window_space =
413 info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
414
415 if (ctx->vs_disables_clipping_viewport != vs_window_space) {
416 ctx->vs_disables_clipping_viewport = vs_window_space;
417 ctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
418 ctx->viewports.depth_range_dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
419 si_mark_atom_dirty(ctx, &ctx->scissors.atom);
420 si_mark_atom_dirty(ctx, &ctx->viewports.atom);
421 }
422
423 /* Viewport index handling. */
424 ctx->vs_writes_viewport_index = info->writes_viewport_index;
425 if (!ctx->vs_writes_viewport_index)
426 return;
427
428 if (ctx->scissors.dirty_mask)
429 si_mark_atom_dirty(ctx, &ctx->scissors.atom);
430
431 if (ctx->viewports.dirty_mask ||
432 ctx->viewports.depth_range_dirty_mask)
433 si_mark_atom_dirty(ctx, &ctx->viewports.atom);
434 }
435
436 void si_init_viewport_functions(struct si_context *ctx)
437 {
438 ctx->scissors.atom.emit = si_emit_scissors;
439 ctx->viewports.atom.emit = si_emit_viewport_states;
440
441 ctx->b.b.set_scissor_states = si_set_scissor_states;
442 ctx->b.b.set_viewport_states = si_set_viewport_states;
443 }