r600g: fix multi buffer rendering
[mesa.git] / src / gallium / drivers / r600 / r600_state2.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 /* TODO:
25 * - fix mask for depth control & cull for query
26 */
27 #include <stdio.h>
28 #include <errno.h>
29 #include <pipe/p_defines.h>
30 #include <pipe/p_state.h>
31 #include <pipe/p_context.h>
32 #include <tgsi/tgsi_scan.h>
33 #include <tgsi/tgsi_parse.h>
34 #include <tgsi/tgsi_util.h>
35 #include <util/u_blitter.h>
36 #include <util/u_double_list.h>
37 #include <util/u_transfer.h>
38 #include <util/u_surface.h>
39 #include <util/u_pack_color.h>
40 #include <util/u_memory.h>
41 #include <util/u_inlines.h>
42 #include <pipebuffer/pb_buffer.h>
43 #include "r600.h"
44 #include "r600d.h"
45 #include "r700_sq.h"
46 struct radeon_state {
47 unsigned dummy;
48 };
49 #include "r600_resource.h"
50 #include "r600_shader.h"
51
52
53 uint32_t r600_translate_texformat(enum pipe_format format,
54 const unsigned char *swizzle_view,
55 uint32_t *word4_p, uint32_t *yuv_format_p);
56
57 #include "r600_state_inlines.h"
58
59 enum r600_pipe_state_id {
60 R600_PIPE_STATE_BLEND = 0,
61 R600_PIPE_STATE_BLEND_COLOR,
62 R600_PIPE_STATE_CONFIG,
63 R600_PIPE_STATE_CLIP,
64 R600_PIPE_STATE_SCISSOR,
65 R600_PIPE_STATE_VIEWPORT,
66 R600_PIPE_STATE_RASTERIZER,
67 R600_PIPE_STATE_VGT,
68 R600_PIPE_STATE_FRAMEBUFFER,
69 R600_PIPE_STATE_DSA,
70 R600_PIPE_STATE_STENCIL_REF,
71 R600_PIPE_STATE_PS_SHADER,
72 R600_PIPE_STATE_VS_SHADER,
73 R600_PIPE_STATE_CONSTANT,
74 R600_PIPE_STATE_SAMPLER,
75 R600_PIPE_STATE_RESOURCE,
76 R600_PIPE_NSTATES
77 };
78
79 struct r600_screen {
80 struct pipe_screen screen;
81 struct radeon *radeon;
82 };
83
84 struct r600_pipe_sampler_view {
85 struct pipe_sampler_view base;
86 struct r600_pipe_state state;
87 };
88
89 struct r600_pipe_rasterizer {
90 struct r600_pipe_state rstate;
91 bool flatshade;
92 unsigned sprite_coord_enable;
93 };
94
95 struct r600_pipe_blend {
96 struct r600_pipe_state rstate;
97 unsigned cb_target_mask;
98 };
99
100 struct r600_pipe_shader {
101 struct r600_shader shader;
102 struct r600_pipe_state rstate;
103 struct radeon_ws_bo *bo;
104 };
105
106 struct r600_vertex_element
107 {
108 unsigned count;
109 unsigned refcount;
110 struct pipe_vertex_element elements[32];
111 };
112
113 struct r600_pipe_context {
114 struct pipe_context context;
115 struct r600_screen *screen;
116 struct radeon *radeon;
117 struct blitter_context *blitter;
118 struct r600_pipe_state *states[R600_PIPE_NSTATES];
119 struct r600_context ctx;
120 struct r600_vertex_element *vertex_elements;
121 struct pipe_framebuffer_state framebuffer;
122 struct pipe_index_buffer index_buffer;
123 struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
124 unsigned nvertex_buffer;
125 unsigned cb_target_mask;
126 /* for saving when using blitter */
127 struct pipe_stencil_ref stencil_ref;
128 struct pipe_viewport_state viewport;
129 struct pipe_clip_state clip;
130 unsigned vs_nconst;
131 unsigned ps_nconst;
132 struct r600_pipe_state vs_const[256];
133 struct r600_pipe_state ps_const[256];
134 struct r600_pipe_state vs_resource[160];
135 struct r600_pipe_state ps_resource[160];
136 struct r600_pipe_state config;
137 struct r600_pipe_shader *ps_shader;
138 struct r600_pipe_shader *vs_shader;
139 /* shader information */
140 bool ps_rebuild;
141 bool vs_rebuild;
142 unsigned sprite_coord_enable;
143 bool flatshade;
144 };
145
146 static INLINE u32 S_FIXED(float value, u32 frac_bits)
147 {
148 return value * (1 << frac_bits);
149 }
150
151 /* r600_shader.c */
152 static void r600_pipe_shader_vs(struct pipe_context *ctx, struct r600_pipe_shader *shader)
153 {
154 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
155 struct r600_pipe_state *rstate = &shader->rstate;
156 struct r600_shader *rshader = &shader->shader;
157 unsigned spi_vs_out_id[10];
158 unsigned i, tmp;
159
160 /* clear previous register */
161 rstate->nregs = 0;
162
163 /* so far never got proper semantic id from tgsi */
164 for (i = 0; i < 10; i++) {
165 spi_vs_out_id[i] = 0;
166 }
167 for (i = 0; i < 32; i++) {
168 tmp = i << ((i & 3) * 8);
169 spi_vs_out_id[i / 4] |= tmp;
170 }
171 for (i = 0; i < 10; i++) {
172 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
173 R_028614_SPI_VS_OUT_ID_0 + i * 4,
174 spi_vs_out_id[i], 0xFFFFFFFF, NULL);
175 }
176
177 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
178 R_0286C4_SPI_VS_OUT_CONFIG,
179 S_0286C4_VS_EXPORT_COUNT(rshader->noutput - 2),
180 0xFFFFFFFF, NULL);
181 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
182 R_028868_SQ_PGM_RESOURCES_VS,
183 S_028868_NUM_GPRS(rshader->bc.ngpr) |
184 S_028868_STACK_SIZE(rshader->bc.nstack),
185 0xFFFFFFFF, NULL);
186 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
187 R_0288A4_SQ_PGM_RESOURCES_FS,
188 0x00000000, 0xFFFFFFFF, NULL);
189 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
190 R_0288D0_SQ_PGM_CF_OFFSET_VS,
191 0x00000000, 0xFFFFFFFF, NULL);
192 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
193 R_0288DC_SQ_PGM_CF_OFFSET_FS,
194 0x00000000, 0xFFFFFFFF, NULL);
195 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
196 R_028858_SQ_PGM_START_VS,
197 0x00000000, 0xFFFFFFFF, shader->bo);
198 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
199 R_028894_SQ_PGM_START_FS,
200 0x00000000, 0xFFFFFFFF, shader->bo);
201 rctx->vs_rebuild = FALSE;
202 }
203
204 static void r600_pipe_shader_ps(struct pipe_context *ctx, struct r600_pipe_shader *shader)
205 {
206 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
207 struct r600_pipe_state *rstate = &shader->rstate;
208 struct r600_shader *rshader = &shader->shader;
209 unsigned i, tmp, exports_ps, num_cout, spi_ps_in_control_0, spi_input_z;
210 boolean have_pos = FALSE;
211
212 /* clear previous register */
213 rstate->nregs = 0;
214
215 for (i = 0; i < rshader->ninput; i++) {
216 tmp = S_028644_SEMANTIC(i);
217 tmp |= S_028644_SEL_CENTROID(1);
218 if (rshader->input[i].name == TGSI_SEMANTIC_POSITION)
219 have_pos = TRUE;
220 if (rshader->input[i].name == TGSI_SEMANTIC_COLOR ||
221 rshader->input[i].name == TGSI_SEMANTIC_BCOLOR ||
222 rshader->input[i].name == TGSI_SEMANTIC_POSITION) {
223 tmp |= S_028644_FLAT_SHADE(rshader->flat_shade);
224 }
225 if (rctx->sprite_coord_enable & (1 << i)) {
226 tmp |= S_028644_PT_SPRITE_TEX(1);
227 }
228 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028644_SPI_PS_INPUT_CNTL_0 + i * 4, tmp, 0xFFFFFFFF, NULL);
229 }
230
231 exports_ps = 0;
232 num_cout = 0;
233 for (i = 0; i < rshader->noutput; i++) {
234 if (rshader->output[i].name == TGSI_SEMANTIC_POSITION)
235 exports_ps |= 1;
236 else if (rshader->output[i].name == TGSI_SEMANTIC_COLOR) {
237 num_cout++;
238 }
239 }
240 exports_ps |= S_028854_EXPORT_COLORS(num_cout);
241 if (!exports_ps) {
242 /* always at least export 1 component per pixel */
243 exports_ps = 2;
244 }
245
246 spi_ps_in_control_0 = S_0286CC_NUM_INTERP(rshader->ninput) |
247 S_0286CC_PERSP_GRADIENT_ENA(1);
248 spi_input_z = 0;
249 if (have_pos) {
250 spi_ps_in_control_0 |= S_0286CC_POSITION_ENA(1) |
251 S_0286CC_BARYC_SAMPLE_CNTL(1);
252 spi_input_z |= 1;
253 }
254 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0286CC_SPI_PS_IN_CONTROL_0, spi_ps_in_control_0, 0xFFFFFFFF, NULL);
255 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0286D0_SPI_PS_IN_CONTROL_1, 0x00000000, 0xFFFFFFFF, NULL);
256 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0286D8_SPI_INPUT_Z, spi_input_z, 0xFFFFFFFF, NULL);
257 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
258 R_028840_SQ_PGM_START_PS,
259 0x00000000, 0xFFFFFFFF, shader->bo);
260 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
261 R_028850_SQ_PGM_RESOURCES_PS,
262 S_028868_NUM_GPRS(rshader->bc.ngpr) |
263 S_028868_STACK_SIZE(rshader->bc.nstack),
264 0xFFFFFFFF, NULL);
265 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
266 R_028854_SQ_PGM_EXPORTS_PS,
267 exports_ps, 0xFFFFFFFF, NULL);
268 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
269 R_0288CC_SQ_PGM_CF_OFFSET_PS,
270 0x00000000, 0xFFFFFFFF, NULL);
271 rctx->ps_rebuild = FALSE;
272 }
273
274 static int r600_pipe_shader(struct pipe_context *ctx, struct r600_pipe_shader *shader)
275 {
276 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
277 struct r600_shader *rshader = &shader->shader;
278 void *ptr;
279
280 /* copy new shader */
281 if (shader->bo == NULL) {
282 shader->bo = radeon_ws_bo(rctx->radeon, rshader->bc.ndw * 4, 4096, 0);
283 if (shader->bo == NULL) {
284 return -ENOMEM;
285 }
286 ptr = radeon_ws_bo_map(rctx->radeon, shader->bo, 0, NULL);
287 memcpy(ptr, rshader->bc.bytecode, rshader->bc.ndw * 4);
288 radeon_ws_bo_unmap(rctx->radeon, shader->bo);
289 }
290 /* build state */
291 rshader->flat_shade = rctx->flatshade;
292 switch (rshader->processor_type) {
293 case TGSI_PROCESSOR_VERTEX:
294 r600_pipe_shader_vs(ctx, shader);
295 break;
296 case TGSI_PROCESSOR_FRAGMENT:
297 r600_pipe_shader_ps(ctx, shader);
298 break;
299 default:
300 return -EINVAL;
301 }
302 r600_context_pipe_state_set(&rctx->ctx, &shader->rstate);
303 return 0;
304 }
305
306 static int r600_shader_update(struct pipe_context *ctx, struct r600_pipe_shader *rshader)
307 {
308 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
309 struct r600_shader *shader = &rshader->shader;
310 const struct util_format_description *desc;
311 enum pipe_format resource_format[160];
312 unsigned i, nresources = 0;
313 struct r600_bc *bc = &shader->bc;
314 struct r600_bc_cf *cf;
315 struct r600_bc_vtx *vtx;
316
317 if (shader->processor_type != TGSI_PROCESSOR_VERTEX)
318 return 0;
319 for (i = 0; i < rctx->vertex_elements->count; i++) {
320 resource_format[nresources++] = rctx->vertex_elements->elements[i].src_format;
321 }
322 radeon_ws_bo_reference(rctx->radeon, &rshader->bo, NULL);
323 LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
324 switch (cf->inst) {
325 case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
326 case V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC:
327 LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) {
328 desc = util_format_description(resource_format[vtx->buffer_id]);
329 if (desc == NULL) {
330 R600_ERR("unknown format %d\n", resource_format[vtx->buffer_id]);
331 return -EINVAL;
332 }
333 vtx->dst_sel_x = desc->swizzle[0];
334 vtx->dst_sel_y = desc->swizzle[1];
335 vtx->dst_sel_z = desc->swizzle[2];
336 vtx->dst_sel_w = desc->swizzle[3];
337 }
338 break;
339 default:
340 break;
341 }
342 }
343 return r600_bc_build(&shader->bc);
344 }
345
346 static int r600_pipe_shader_update2(struct pipe_context *ctx, struct r600_pipe_shader *shader)
347 {
348 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
349 int r;
350
351 if (shader == NULL)
352 return -EINVAL;
353 if (shader->bo) {
354 switch (shader->shader.processor_type) {
355 case TGSI_PROCESSOR_VERTEX:
356 if (!rctx->vs_rebuild)
357 return 0;
358 break;
359 case TGSI_PROCESSOR_FRAGMENT:
360 if (!rctx->ps_rebuild)
361 return 0;
362 break;
363 default:
364 return -EINVAL;
365 }
366 }
367 /* there should be enough input */
368 if (rctx->vertex_elements->count < shader->shader.bc.nresource) {
369 R600_ERR("%d resources provided, expecting %d\n",
370 rctx->vertex_elements->count, shader->shader.bc.nresource);
371 return -EINVAL;
372 }
373 r = r600_shader_update(ctx, shader);
374 if (r)
375 return r;
376 return r600_pipe_shader(ctx, shader);
377 }
378
379 int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader);
380 static int r600_pipe_shader_create2(struct pipe_context *ctx, struct r600_pipe_shader *shader, const struct tgsi_token *tokens)
381 {
382 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
383 int r;
384
385 //fprintf(stderr, "--------------------------------------------------------------\n");
386 //tgsi_dump(tokens, 0);
387 shader->shader.family = r600_get_family(rctx->radeon);
388 r = r600_shader_from_tgsi(tokens, &shader->shader);
389 if (r) {
390 R600_ERR("translation from TGSI failed !\n");
391 return r;
392 }
393 r = r600_bc_build(&shader->shader.bc);
394 if (r) {
395 R600_ERR("building bytecode failed !\n");
396 return r;
397 }
398 //fprintf(stderr, "______________________________________________________________\n");
399 return 0;
400 }
401 /* r600_shader.c END */
402
403 static const char* r600_get_vendor(struct pipe_screen* pscreen)
404 {
405 return "X.Org";
406 }
407
408 static const char* r600_get_name(struct pipe_screen* pscreen)
409 {
410 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
411 enum radeon_family family = r600_get_family(rscreen->radeon);
412
413 if (family >= CHIP_R600 && family < CHIP_RV770)
414 return "R600 (HD2XXX,HD3XXX)";
415 else
416 return "R700 (HD4XXX)";
417 }
418
419 static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
420 {
421 switch (param) {
422 /* Supported features (boolean caps). */
423 case PIPE_CAP_NPOT_TEXTURES:
424 case PIPE_CAP_TWO_SIDED_STENCIL:
425 case PIPE_CAP_GLSL:
426 case PIPE_CAP_DUAL_SOURCE_BLEND:
427 case PIPE_CAP_ANISOTROPIC_FILTER:
428 case PIPE_CAP_POINT_SPRITE:
429 case PIPE_CAP_OCCLUSION_QUERY:
430 case PIPE_CAP_TEXTURE_SHADOW_MAP:
431 case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
432 case PIPE_CAP_TEXTURE_MIRROR_REPEAT:
433 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
434 case PIPE_CAP_SM3:
435 case PIPE_CAP_TEXTURE_SWIZZLE:
436 case PIPE_CAP_INDEP_BLEND_ENABLE:
437 case PIPE_CAP_DEPTHSTENCIL_CLEAR_SEPARATE:
438 case PIPE_CAP_DEPTH_CLAMP:
439 return 1;
440
441 /* Unsupported features (boolean caps). */
442 case PIPE_CAP_TIMER_QUERY:
443 case PIPE_CAP_STREAM_OUTPUT:
444 case PIPE_CAP_INDEP_BLEND_FUNC: /* FIXME allow this */
445 return 0;
446
447 /* Texturing. */
448 case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
449 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
450 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
451 return 14;
452 case PIPE_CAP_MAX_VERTEX_TEXTURE_UNITS:
453 /* FIXME allow this once infrastructure is there */
454 return 0;
455 case PIPE_CAP_MAX_TEXTURE_IMAGE_UNITS:
456 case PIPE_CAP_MAX_COMBINED_SAMPLERS:
457 return 16;
458
459 /* Render targets. */
460 case PIPE_CAP_MAX_RENDER_TARGETS:
461 /* FIXME some r6xx are buggy and can only do 4 */
462 return 8;
463
464 /* Fragment coordinate conventions. */
465 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
466 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
467 return 1;
468 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT:
469 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
470 return 0;
471
472 default:
473 R600_ERR("r600: unknown param %d\n", param);
474 return 0;
475 }
476 }
477
478 static float r600_get_paramf(struct pipe_screen* pscreen, enum pipe_cap param)
479 {
480 switch (param) {
481 case PIPE_CAP_MAX_LINE_WIDTH:
482 case PIPE_CAP_MAX_LINE_WIDTH_AA:
483 case PIPE_CAP_MAX_POINT_WIDTH:
484 case PIPE_CAP_MAX_POINT_WIDTH_AA:
485 return 8192.0f;
486 case PIPE_CAP_MAX_TEXTURE_ANISOTROPY:
487 return 16.0f;
488 case PIPE_CAP_MAX_TEXTURE_LOD_BIAS:
489 return 16.0f;
490 default:
491 R600_ERR("r600: unsupported paramf %d\n", param);
492 return 0.0f;
493 }
494 }
495
496 static boolean r600_is_format_supported(struct pipe_screen* screen,
497 enum pipe_format format,
498 enum pipe_texture_target target,
499 unsigned sample_count,
500 unsigned usage,
501 unsigned geom_flags)
502 {
503 unsigned retval = 0;
504 if (target >= PIPE_MAX_TEXTURE_TYPES) {
505 R600_ERR("r600: unsupported texture type %d\n", target);
506 return FALSE;
507 }
508
509 /* Multisample */
510 if (sample_count > 1)
511 return FALSE;
512
513 if ((usage & PIPE_BIND_SAMPLER_VIEW) &&
514 r600_is_sampler_format_supported(format)) {
515 retval |= PIPE_BIND_SAMPLER_VIEW;
516 }
517
518 if ((usage & (PIPE_BIND_RENDER_TARGET |
519 PIPE_BIND_DISPLAY_TARGET |
520 PIPE_BIND_SCANOUT |
521 PIPE_BIND_SHARED)) &&
522 r600_is_colorbuffer_format_supported(format)) {
523 retval |= usage &
524 (PIPE_BIND_RENDER_TARGET |
525 PIPE_BIND_DISPLAY_TARGET |
526 PIPE_BIND_SCANOUT |
527 PIPE_BIND_SHARED);
528 }
529
530 if ((usage & PIPE_BIND_DEPTH_STENCIL) &&
531 r600_is_zs_format_supported(format)) {
532 retval |= PIPE_BIND_DEPTH_STENCIL;
533 }
534
535 if ((usage & PIPE_BIND_VERTEX_BUFFER) &&
536 r600_is_vertex_format_supported(format))
537 retval |= PIPE_BIND_VERTEX_BUFFER;
538
539 if (usage & PIPE_BIND_TRANSFER_READ)
540 retval |= PIPE_BIND_TRANSFER_READ;
541 if (usage & PIPE_BIND_TRANSFER_WRITE)
542 retval |= PIPE_BIND_TRANSFER_WRITE;
543
544 return retval == usage;
545 }
546
547 static void r600_destroy_screen(struct pipe_screen* pscreen)
548 {
549 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
550
551 if (rscreen == NULL)
552 return;
553 FREE(rscreen);
554 }
555
556 struct r600_drawl {
557 struct pipe_context *ctx;
558 unsigned mode;
559 unsigned start;
560 unsigned count;
561 unsigned index_size;
562 struct pipe_resource *index_buffer;
563 };
564
565 int r600_conv_pipe_prim(unsigned pprim, unsigned *prim);
566 static void r600_draw_common(struct r600_drawl *draw)
567 {
568 struct r600_pipe_context *rctx = (struct r600_pipe_context *)draw->ctx;
569 struct r600_pipe_state *rstate;
570 struct r600_resource *rbuffer;
571 unsigned i, j, offset, format, prim;
572 u32 vgt_dma_index_type, vgt_draw_initiator, mask;
573 struct pipe_vertex_buffer *vertex_buffer;
574 struct r600_draw rdraw;
575 struct r600_pipe_state vgt;
576
577 switch (draw->index_size) {
578 case 2:
579 vgt_draw_initiator = 0;
580 vgt_dma_index_type = 0;
581 break;
582 case 4:
583 vgt_draw_initiator = 0;
584 vgt_dma_index_type = 1;
585 break;
586 case 0:
587 vgt_draw_initiator = 2;
588 vgt_dma_index_type = 0;
589 break;
590 default:
591 R600_ERR("unsupported index size %d\n", draw->index_size);
592 return;
593 }
594 if (r600_conv_pipe_prim(draw->mode, &prim))
595 return;
596
597 /* rebuild vertex shader if input format changed */
598 if (r600_pipe_shader_update2(&rctx->context, rctx->vs_shader))
599 return;
600 if (r600_pipe_shader_update2(&rctx->context, rctx->ps_shader))
601 return;
602
603 for (i = 0 ; i < rctx->vertex_elements->count; i++) {
604 rstate = &rctx->vs_resource[i];
605 j = rctx->vertex_elements->elements[i].vertex_buffer_index;
606 vertex_buffer = &rctx->vertex_buffer[j];
607 rbuffer = (struct r600_resource*)vertex_buffer->buffer;
608 offset = rctx->vertex_elements->elements[i].src_offset + vertex_buffer->buffer_offset;
609 format = r600_translate_colorformat(rctx->vertex_elements->elements[i].src_format);
610 rstate->id = R600_PIPE_STATE_RESOURCE;
611 rstate->nregs = 0;
612
613 r600_pipe_state_add_reg(rstate, R600_GROUP_RESOURCE, R_038000_RESOURCE0_WORD0, offset, 0xFFFFFFFF, rbuffer->bo);
614 r600_pipe_state_add_reg(rstate, R600_GROUP_RESOURCE, R_038004_RESOURCE0_WORD1, rbuffer->size - offset - 1, 0xFFFFFFFF, NULL);
615 r600_pipe_state_add_reg(rstate, R600_GROUP_RESOURCE,
616 R_038008_RESOURCE0_WORD2,
617 S_038008_STRIDE(vertex_buffer->stride) |
618 S_038008_DATA_FORMAT(format),
619 0xFFFFFFFF, NULL);
620 r600_pipe_state_add_reg(rstate, R600_GROUP_RESOURCE, R_03800C_RESOURCE0_WORD3, 0x00000000, 0xFFFFFFFF, NULL);
621 r600_pipe_state_add_reg(rstate, R600_GROUP_RESOURCE, R_038010_RESOURCE0_WORD4, 0x00000000, 0xFFFFFFFF, NULL);
622 r600_pipe_state_add_reg(rstate, R600_GROUP_RESOURCE, R_038014_RESOURCE0_WORD5, 0x00000000, 0xFFFFFFFF, NULL);
623 r600_pipe_state_add_reg(rstate, R600_GROUP_RESOURCE, R_038018_RESOURCE0_WORD6, 0xC0000000, 0xFFFFFFFF, NULL);
624 r600_context_pipe_state_set_vs_resource(&rctx->ctx, rstate, i);
625 }
626
627 mask = 0;
628 for (int i = 0; i < rctx->framebuffer.nr_cbufs; i++) {
629 mask |= (0xF << (i * 4));
630 }
631
632 vgt.id = R600_PIPE_STATE_VGT;
633 vgt.nregs = 0;
634 r600_pipe_state_add_reg(&vgt, R600_GROUP_CONFIG, R_008958_VGT_PRIMITIVE_TYPE, prim, 0xFFFFFFFF, NULL);
635 r600_pipe_state_add_reg(&vgt, R600_GROUP_CONTEXT, R_028408_VGT_INDX_OFFSET, draw->start, 0xFFFFFFFF, NULL);
636 r600_pipe_state_add_reg(&vgt, R600_GROUP_CONTEXT, R_028238_CB_TARGET_MASK, rctx->cb_target_mask & mask, 0xFFFFFFFF, NULL);
637 r600_context_pipe_state_set(&rctx->ctx, &vgt);
638
639 rdraw.vgt_num_indices = draw->count;
640 rdraw.vgt_num_instances = 1;
641 rdraw.vgt_index_type = vgt_dma_index_type;
642 rdraw.vgt_draw_initiator = vgt_draw_initiator;
643 rdraw.indices = NULL;
644 if (draw->index_buffer) {
645 rbuffer = (struct r600_resource*)draw->index_buffer;
646 rdraw.indices = rbuffer->bo;
647 }
648 r600_context_draw(&rctx->ctx, &rdraw);
649 }
650
651 static void r600_draw_vbo2(struct pipe_context *ctx, const struct pipe_draw_info *info)
652 {
653 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
654 struct r600_drawl draw;
655
656 assert(info->index_bias == 0);
657
658 draw.ctx = ctx;
659 draw.mode = info->mode;
660 draw.start = info->start;
661 draw.count = info->count;
662 if (info->indexed && rctx->index_buffer.buffer) {
663 draw.index_size = rctx->index_buffer.index_size;
664 draw.index_buffer = rctx->index_buffer.buffer;
665 assert(rctx->index_buffer.offset %
666 rctx->index_buffer.index_size == 0);
667 draw.start += rctx->index_buffer.offset /
668 rctx->index_buffer.index_size;
669 } else {
670 draw.index_size = 0;
671 draw.index_buffer = NULL;
672 }
673 r600_draw_common(&draw);
674 }
675
676 static void r600_flush2(struct pipe_context *ctx, unsigned flags,
677 struct pipe_fence_handle **fence)
678 {
679 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
680 #if 1
681 static int dc = 0;
682 char dname[256];
683 #endif
684
685 if (!rctx->ctx.pm4_cdwords)
686 return;
687
688 #if 0
689 sprintf(dname, "gallium-%08d.bof", dc);
690 if (dc < 20) {
691 r600_context_dump_bof(&rctx->ctx, dname);
692 R600_ERR("dumped %s\n", dname);
693 }
694 dc++;
695 #endif
696 r600_context_flush(&rctx->ctx);
697 }
698
699 static void r600_destroy_context(struct pipe_context *context)
700 {
701 struct r600_pipe_context *rctx = (struct r600_pipe_context *)context;
702
703 r600_context_fini(&rctx->ctx);
704 for (int i = 0; i < R600_PIPE_NSTATES; i++) {
705 free(rctx->states[i]);
706 }
707 FREE(rctx);
708 }
709
710 static void r600_blitter_save_states(struct pipe_context *ctx)
711 {
712 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
713
714 util_blitter_save_blend(rctx->blitter, rctx->states[R600_PIPE_STATE_BLEND]);
715 util_blitter_save_depth_stencil_alpha(rctx->blitter, rctx->states[R600_PIPE_STATE_DSA]);
716 if (rctx->states[R600_PIPE_STATE_STENCIL_REF]) {
717 util_blitter_save_stencil_ref(rctx->blitter, &rctx->stencil_ref);
718 }
719 util_blitter_save_rasterizer(rctx->blitter, rctx->states[R600_PIPE_STATE_RASTERIZER]);
720 util_blitter_save_fragment_shader(rctx->blitter, rctx->ps_shader);
721 util_blitter_save_vertex_shader(rctx->blitter, rctx->vs_shader);
722 util_blitter_save_vertex_elements(rctx->blitter, rctx->vertex_elements);
723 if (rctx->states[R600_PIPE_STATE_VIEWPORT]) {
724 util_blitter_save_viewport(rctx->blitter, &rctx->viewport);
725 }
726 if (rctx->states[R600_PIPE_STATE_CLIP]) {
727 util_blitter_save_clip(rctx->blitter, &rctx->clip);
728 }
729 util_blitter_save_vertex_buffers(rctx->blitter, rctx->nvertex_buffer, rctx->vertex_buffer);
730
731 rctx->vertex_elements = NULL;
732
733 /* TODO queries */
734 }
735
736 static void r600_clear(struct pipe_context *ctx, unsigned buffers,
737 const float *rgba, double depth, unsigned stencil)
738 {
739 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
740 struct pipe_framebuffer_state *fb = &rctx->framebuffer;
741
742 r600_blitter_save_states(ctx);
743 util_blitter_clear(rctx->blitter, fb->width, fb->height,
744 fb->nr_cbufs, buffers, rgba, depth,
745 stencil);
746 }
747
748 static void r600_clear_render_target(struct pipe_context *ctx,
749 struct pipe_surface *dst,
750 const float *rgba,
751 unsigned dstx, unsigned dsty,
752 unsigned width, unsigned height)
753 {
754 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
755 struct pipe_framebuffer_state *fb = &rctx->framebuffer;
756
757 util_blitter_save_framebuffer(rctx->blitter, fb);
758 util_blitter_clear_render_target(rctx->blitter, dst, rgba,
759 dstx, dsty, width, height);
760 }
761
762 static void r600_clear_depth_stencil(struct pipe_context *ctx,
763 struct pipe_surface *dst,
764 unsigned clear_flags,
765 double depth,
766 unsigned stencil,
767 unsigned dstx, unsigned dsty,
768 unsigned width, unsigned height)
769 {
770 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
771 struct pipe_framebuffer_state *fb = &rctx->framebuffer;
772
773 util_blitter_save_framebuffer(rctx->blitter, fb);
774 util_blitter_clear_depth_stencil(rctx->blitter, dst, clear_flags, depth, stencil,
775 dstx, dsty, width, height);
776 }
777
778
779 static void r600_resource_copy_region(struct pipe_context *ctx,
780 struct pipe_resource *dst,
781 struct pipe_subresource subdst,
782 unsigned dstx, unsigned dsty, unsigned dstz,
783 struct pipe_resource *src,
784 struct pipe_subresource subsrc,
785 unsigned srcx, unsigned srcy, unsigned srcz,
786 unsigned width, unsigned height)
787 {
788 util_resource_copy_region(ctx, dst, subdst, dstx, dsty, dstz,
789 src, subsrc, srcx, srcy, srcz, width, height);
790 }
791
792 static void r600_init_blit_functions2(struct r600_pipe_context *rctx)
793 {
794 rctx->context.clear = r600_clear;
795 rctx->context.clear_render_target = r600_clear_render_target;
796 rctx->context.clear_depth_stencil = r600_clear_depth_stencil;
797 rctx->context.resource_copy_region = r600_resource_copy_region;
798 }
799
800 static void r600_init_context_resource_functions2(struct r600_pipe_context *r600)
801 {
802 r600->context.get_transfer = u_get_transfer_vtbl;
803 r600->context.transfer_map = u_transfer_map_vtbl;
804 r600->context.transfer_flush_region = u_transfer_flush_region_vtbl;
805 r600->context.transfer_unmap = u_transfer_unmap_vtbl;
806 r600->context.transfer_destroy = u_transfer_destroy_vtbl;
807 r600->context.transfer_inline_write = u_transfer_inline_write_vtbl;
808 r600->context.is_resource_referenced = u_is_resource_referenced_vtbl;
809 }
810
811 static void r600_set_blend_color(struct pipe_context *ctx,
812 const struct pipe_blend_color *state)
813 {
814 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
815 struct r600_pipe_state *rstate = CALLOC_STRUCT(r600_pipe_state);
816
817 if (rstate == NULL)
818 return;
819
820 rstate->id = R600_PIPE_STATE_BLEND_COLOR;
821 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028414_CB_BLEND_RED, fui(state->color[0]), 0xFFFFFFFF, NULL);
822 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028418_CB_BLEND_GREEN, fui(state->color[1]), 0xFFFFFFFF, NULL);
823 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_02841C_CB_BLEND_BLUE, fui(state->color[2]), 0xFFFFFFFF, NULL);
824 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028420_CB_BLEND_ALPHA, fui(state->color[3]), 0xFFFFFFFF, NULL);
825 free(rctx->states[R600_PIPE_STATE_BLEND_COLOR]);
826 rctx->states[R600_PIPE_STATE_BLEND_COLOR] = rstate;
827 r600_context_pipe_state_set(&rctx->ctx, rstate);
828 }
829
830 static void *r600_create_blend_state(struct pipe_context *ctx,
831 const struct pipe_blend_state *state)
832 {
833 struct r600_pipe_blend *blend = CALLOC_STRUCT(r600_pipe_blend);
834 struct r600_pipe_state *rstate;
835 u32 color_control, target_mask;
836
837 if (blend == NULL) {
838 return NULL;
839 }
840 rstate = &blend->rstate;
841
842 rstate->id = R600_PIPE_STATE_BLEND;
843
844 target_mask = 0;
845 color_control = S_028808_PER_MRT_BLEND(1);
846 if (state->logicop_enable) {
847 color_control |= (state->logicop_func << 16) | (state->logicop_func << 20);
848 } else {
849 color_control |= (0xcc << 16);
850 }
851 /* we pretend 8 buffer are used, CB_SHADER_MASK will disable unused one */
852 if (state->independent_blend_enable) {
853 for (int i = 0; i < 8; i++) {
854 if (state->rt[i].blend_enable) {
855 color_control |= S_028808_TARGET_BLEND_ENABLE(1 << i);
856 }
857 target_mask |= (state->rt[i].colormask << (4 * i));
858 }
859 } else {
860 for (int i = 0; i < 8; i++) {
861 if (state->rt[0].blend_enable) {
862 color_control |= S_028808_TARGET_BLEND_ENABLE(1 << i);
863 }
864 target_mask |= (state->rt[0].colormask << (4 * i));
865 }
866 }
867 blend->cb_target_mask = target_mask;
868 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028808_CB_COLOR_CONTROL,
869 color_control, 0xFFFFFFFF, NULL);
870
871 for (int i = 0; i < 8; i++) {
872 unsigned eqRGB = state->rt[i].rgb_func;
873 unsigned srcRGB = state->rt[i].rgb_src_factor;
874 unsigned dstRGB = state->rt[i].rgb_dst_factor;
875
876 unsigned eqA = state->rt[i].alpha_func;
877 unsigned srcA = state->rt[i].alpha_src_factor;
878 unsigned dstA = state->rt[i].alpha_dst_factor;
879 uint32_t bc = 0;
880
881 if (!state->rt[i].blend_enable)
882 continue;
883
884 bc |= S_028804_COLOR_COMB_FCN(r600_translate_blend_function(eqRGB));
885 bc |= S_028804_COLOR_SRCBLEND(r600_translate_blend_factor(srcRGB));
886 bc |= S_028804_COLOR_DESTBLEND(r600_translate_blend_factor(dstRGB));
887
888 if (srcA != srcRGB || dstA != dstRGB || eqA != eqRGB) {
889 bc |= S_028804_SEPARATE_ALPHA_BLEND(1);
890 bc |= S_028804_ALPHA_COMB_FCN(r600_translate_blend_function(eqA));
891 bc |= S_028804_ALPHA_SRCBLEND(r600_translate_blend_factor(srcA));
892 bc |= S_028804_ALPHA_DESTBLEND(r600_translate_blend_factor(dstA));
893 }
894
895 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028780_CB_BLEND0_CONTROL + i * 4, bc, 0xFFFFFFFF, NULL);
896 if (i == 0) {
897 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028804_CB_BLEND_CONTROL, bc, 0xFFFFFFFF, NULL);
898 }
899 }
900 return rstate;
901 }
902
903 static void r600_bind_blend_state(struct pipe_context *ctx, void *state)
904 {
905 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
906 struct r600_pipe_blend *blend = (struct r600_pipe_blend *)state;
907 struct r600_pipe_state *rstate;
908
909 if (state == NULL)
910 return;
911 rstate = &blend->rstate;
912 rctx->states[rstate->id] = rstate;
913 rctx->cb_target_mask = blend->cb_target_mask;
914 r600_context_pipe_state_set(&rctx->ctx, rstate);
915 }
916
917 static void *r600_create_dsa_state(struct pipe_context *ctx,
918 const struct pipe_depth_stencil_alpha_state *state)
919 {
920 struct r600_pipe_state *rstate = CALLOC_STRUCT(r600_pipe_state);
921 unsigned db_depth_control, alpha_test_control, alpha_ref, db_shader_control;
922 unsigned stencil_ref_mask, stencil_ref_mask_bf, db_render_override, db_render_control;
923
924 if (rstate == NULL) {
925 return NULL;
926 }
927
928 rstate->id = R600_PIPE_STATE_DSA;
929 /* depth TODO some of those db_shader_control field depend on shader adjust mask & add it to shader */
930 /* db_shader_control is 0xFFFFFFBE as Z_EXPORT_ENABLE (bit 0) will be
931 * set by fragment shader if it export Z and KILL_ENABLE (bit 6) will
932 * be set if shader use texkill instruction
933 */
934 db_shader_control = 0x210;
935 stencil_ref_mask = 0;
936 stencil_ref_mask_bf = 0;
937 db_depth_control = S_028800_Z_ENABLE(state->depth.enabled) |
938 S_028800_Z_WRITE_ENABLE(state->depth.writemask) |
939 S_028800_ZFUNC(state->depth.func);
940
941 /* stencil */
942 if (state->stencil[0].enabled) {
943 db_depth_control |= S_028800_STENCIL_ENABLE(1);
944 db_depth_control |= S_028800_STENCILFUNC(r600_translate_ds_func(state->stencil[0].func));
945 db_depth_control |= S_028800_STENCILFAIL(r600_translate_stencil_op(state->stencil[0].fail_op));
946 db_depth_control |= S_028800_STENCILZPASS(r600_translate_stencil_op(state->stencil[0].zpass_op));
947 db_depth_control |= S_028800_STENCILZFAIL(r600_translate_stencil_op(state->stencil[0].zfail_op));
948
949
950 stencil_ref_mask = S_028430_STENCILMASK(state->stencil[0].valuemask) |
951 S_028430_STENCILWRITEMASK(state->stencil[0].writemask);
952 if (state->stencil[1].enabled) {
953 db_depth_control |= S_028800_BACKFACE_ENABLE(1);
954 db_depth_control |= S_028800_STENCILFUNC_BF(r600_translate_ds_func(state->stencil[1].func));
955 db_depth_control |= S_028800_STENCILFAIL_BF(r600_translate_stencil_op(state->stencil[1].fail_op));
956 db_depth_control |= S_028800_STENCILZPASS_BF(r600_translate_stencil_op(state->stencil[1].zpass_op));
957 db_depth_control |= S_028800_STENCILZFAIL_BF(r600_translate_stencil_op(state->stencil[1].zfail_op));
958 stencil_ref_mask_bf = S_028434_STENCILMASK_BF(state->stencil[1].valuemask) |
959 S_028434_STENCILWRITEMASK_BF(state->stencil[1].writemask);
960 }
961 }
962
963 /* alpha */
964 alpha_test_control = 0;
965 alpha_ref = 0;
966 if (state->alpha.enabled) {
967 alpha_test_control = S_028410_ALPHA_FUNC(state->alpha.func);
968 alpha_test_control |= S_028410_ALPHA_TEST_ENABLE(1);
969 alpha_ref = fui(state->alpha.ref_value);
970 }
971
972 /* misc */
973 db_render_control = 0;
974 db_render_override = S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_DISABLE) |
975 S_028D10_FORCE_HIS_ENABLE0(V_028D10_FORCE_DISABLE) |
976 S_028D10_FORCE_HIS_ENABLE1(V_028D10_FORCE_DISABLE);
977 /* TODO db_render_override depends on query */
978 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028028_DB_STENCIL_CLEAR, 0x00000000, 0xFFFFFFFF, NULL);
979 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_02802C_DB_DEPTH_CLEAR, 0x3F800000, 0xFFFFFFFF, NULL);
980 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028410_SX_ALPHA_TEST_CONTROL, alpha_test_control, 0xFFFFFFFF, NULL);
981 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
982 R_028430_DB_STENCILREFMASK, stencil_ref_mask,
983 0xFFFFFFFF & C_028430_STENCILREF, NULL);
984 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
985 R_028434_DB_STENCILREFMASK_BF, stencil_ref_mask_bf,
986 0xFFFFFFFF & C_028434_STENCILREF_BF, NULL);
987 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028438_SX_ALPHA_REF, alpha_ref, 0xFFFFFFFF, NULL);
988 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0286E0_SPI_FOG_FUNC_SCALE, 0x00000000, 0xFFFFFFFF, NULL);
989 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0286E4_SPI_FOG_FUNC_BIAS, 0x00000000, 0xFFFFFFFF, NULL);
990 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0286DC_SPI_FOG_CNTL, 0x00000000, 0xFFFFFFFF, NULL);
991 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028800_DB_DEPTH_CONTROL, db_depth_control, 0xFFFFFFFF, NULL);
992 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_02880C_DB_SHADER_CONTROL, db_shader_control, 0xFFFFFFBE, NULL);
993 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028D0C_DB_RENDER_CONTROL, db_render_control, 0xFFFFFFFF, NULL);
994 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028D10_DB_RENDER_OVERRIDE, db_render_override, 0xFFFFFFFF, NULL);
995 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028D2C_DB_SRESULTS_COMPARE_STATE1, 0x00000000, 0xFFFFFFFF, NULL);
996 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028D30_DB_PRELOAD_CONTROL, 0x00000000, 0xFFFFFFFF, NULL);
997 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028D44_DB_ALPHA_TO_MASK, 0x0000AA00, 0xFFFFFFFF, NULL);
998
999 return rstate;
1000 }
1001
1002 static void *r600_create_rs_state(struct pipe_context *ctx,
1003 const struct pipe_rasterizer_state *state)
1004 {
1005 struct r600_pipe_rasterizer *rs = CALLOC_STRUCT(r600_pipe_rasterizer);
1006 struct r600_pipe_state *rstate;
1007 float offset_units = 0, offset_scale = 0;
1008 unsigned offset_db_fmt_cntl = 0;
1009 unsigned tmp;
1010 unsigned prov_vtx = 1;
1011
1012 if (rs == NULL) {
1013 return NULL;
1014 }
1015
1016 rstate = &rs->rstate;
1017 rs->flatshade = state->flatshade;
1018 rs->sprite_coord_enable = state->sprite_coord_enable;
1019
1020 rstate->id = R600_PIPE_STATE_RASTERIZER;
1021 if (state->flatshade_first)
1022 prov_vtx = 0;
1023 tmp = 0x00000001;
1024 if (state->sprite_coord_enable) {
1025 tmp |= S_0286D4_PNT_SPRITE_ENA(1) |
1026 S_0286D4_PNT_SPRITE_OVRD_X(2) |
1027 S_0286D4_PNT_SPRITE_OVRD_Y(3) |
1028 S_0286D4_PNT_SPRITE_OVRD_Z(0) |
1029 S_0286D4_PNT_SPRITE_OVRD_W(1);
1030 if (state->sprite_coord_mode != PIPE_SPRITE_COORD_UPPER_LEFT) {
1031 tmp |= S_0286D4_PNT_SPRITE_TOP_1(1);
1032 }
1033 }
1034 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0286D4_SPI_INTERP_CONTROL_0, tmp, 0xFFFFFFFF, NULL);
1035
1036 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028814_PA_SU_SC_MODE_CNTL,
1037 S_028814_PROVOKING_VTX_LAST(prov_vtx) |
1038 S_028814_CULL_FRONT((state->cull_face & PIPE_FACE_FRONT) ? 1 : 0) |
1039 S_028814_CULL_BACK((state->cull_face & PIPE_FACE_BACK) ? 1 : 0) |
1040 S_028814_FACE(!state->front_ccw) |
1041 S_028814_POLY_OFFSET_FRONT_ENABLE(state->offset_tri) |
1042 S_028814_POLY_OFFSET_BACK_ENABLE(state->offset_tri) |
1043 S_028814_POLY_OFFSET_PARA_ENABLE(state->offset_tri), 0xFFFFFFFF, NULL);
1044 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_02881C_PA_CL_VS_OUT_CNTL,
1045 S_02881C_USE_VTX_POINT_SIZE(state->point_size_per_vertex) |
1046 S_02881C_VS_OUT_MISC_VEC_ENA(state->point_size_per_vertex), 0xFFFFFFFF, NULL);
1047 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028820_PA_CL_NANINF_CNTL, 0x00000000, 0xFFFFFFFF, NULL);
1048 /* point size 12.4 fixed point */
1049 tmp = (unsigned)(state->point_size * 8.0);
1050 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A00_PA_SU_POINT_SIZE, S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp), 0xFFFFFFFF, NULL);
1051 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A04_PA_SU_POINT_MINMAX, 0x80000000, 0xFFFFFFFF, NULL);
1052 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A08_PA_SU_LINE_CNTL, 0x00000008, 0xFFFFFFFF, NULL);
1053 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A0C_PA_SC_LINE_STIPPLE, 0x00000005, 0xFFFFFFFF, NULL);
1054 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A48_PA_SC_MPASS_PS_CNTL, 0x00000000, 0xFFFFFFFF, NULL);
1055 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028C00_PA_SC_LINE_CNTL, 0x00000400, 0xFFFFFFFF, NULL);
1056 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028C0C_PA_CL_GB_VERT_CLIP_ADJ, 0x3F800000, 0xFFFFFFFF, NULL);
1057 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028C10_PA_CL_GB_VERT_DISC_ADJ, 0x3F800000, 0xFFFFFFFF, NULL);
1058 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028C14_PA_CL_GB_HORZ_CLIP_ADJ, 0x3F800000, 0xFFFFFFFF, NULL);
1059 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028C18_PA_CL_GB_HORZ_DISC_ADJ, 0x3F800000, 0xFFFFFFFF, NULL);
1060 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028DF8_PA_SU_POLY_OFFSET_DB_FMT_CNTL, offset_db_fmt_cntl, 0xFFFFFFFF, NULL);
1061 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028DFC_PA_SU_POLY_OFFSET_CLAMP, 0x00000000, 0xFFFFFFFF, NULL);
1062 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE, fui(offset_scale), 0xFFFFFFFF, NULL);
1063 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028E04_PA_SU_POLY_OFFSET_FRONT_OFFSET, fui(offset_units), 0xFFFFFFFF, NULL);
1064 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028E08_PA_SU_POLY_OFFSET_BACK_SCALE, fui(offset_scale), 0xFFFFFFFF, NULL);
1065 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028E0C_PA_SU_POLY_OFFSET_BACK_OFFSET, fui(offset_units), 0xFFFFFFFF, NULL);
1066 return rstate;
1067 }
1068
1069 static void r600_bind_rs_state(struct pipe_context *ctx, void *state)
1070 {
1071 struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state;
1072 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1073
1074 if (state == NULL)
1075 return;
1076
1077 if (rctx->flatshade != rs->flatshade) {
1078 rctx->ps_rebuild = TRUE;
1079 }
1080 if (rctx->sprite_coord_enable != rs->sprite_coord_enable) {
1081 rctx->ps_rebuild = TRUE;
1082 }
1083 rctx->flatshade = rs->flatshade;
1084 rctx->sprite_coord_enable = rs->sprite_coord_enable;
1085
1086 rctx->states[rs->rstate.id] = &rs->rstate;
1087 r600_context_pipe_state_set(&rctx->ctx, &rs->rstate);
1088 }
1089
1090 static void r600_delete_rs_state(struct pipe_context *ctx, void *state)
1091 {
1092 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1093 struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state;
1094
1095 if (rctx->states[rs->rstate.id] == &rs->rstate) {
1096 rctx->states[rs->rstate.id] = NULL;
1097 }
1098 free(rs);
1099 }
1100
1101 static void *r600_create_sampler_state(struct pipe_context *ctx,
1102 const struct pipe_sampler_state *state)
1103 {
1104 struct r600_pipe_state *rstate = CALLOC_STRUCT(r600_pipe_state);
1105 union util_color uc;
1106
1107 if (rstate == NULL) {
1108 return NULL;
1109 }
1110
1111 rstate->id = R600_PIPE_STATE_SAMPLER;
1112 util_pack_color(state->border_color, PIPE_FORMAT_B8G8R8A8_UNORM, &uc);
1113 r600_pipe_state_add_reg(rstate, R600_GROUP_SAMPLER, R_03C000_SQ_TEX_SAMPLER_WORD0_0,
1114 S_03C000_CLAMP_X(r600_tex_wrap(state->wrap_s)) |
1115 S_03C000_CLAMP_Y(r600_tex_wrap(state->wrap_t)) |
1116 S_03C000_CLAMP_Z(r600_tex_wrap(state->wrap_r)) |
1117 S_03C000_XY_MAG_FILTER(r600_tex_filter(state->mag_img_filter)) |
1118 S_03C000_XY_MIN_FILTER(r600_tex_filter(state->min_img_filter)) |
1119 S_03C000_MIP_FILTER(r600_tex_mipfilter(state->min_mip_filter)) |
1120 S_03C000_DEPTH_COMPARE_FUNCTION(r600_tex_compare(state->compare_func)) |
1121 S_03C000_BORDER_COLOR_TYPE(uc.ui ? V_03C000_SQ_TEX_BORDER_COLOR_REGISTER : 0), 0xFFFFFFFF, NULL);
1122 /* FIXME LOD it depends on texture base level ... */
1123 r600_pipe_state_add_reg(rstate, R600_GROUP_SAMPLER, R_03C004_SQ_TEX_SAMPLER_WORD1_0,
1124 S_03C004_MIN_LOD(S_FIXED(CLAMP(state->min_lod, 0, 15), 6)) |
1125 S_03C004_MAX_LOD(S_FIXED(CLAMP(state->max_lod, 0, 15), 6)) |
1126 S_03C004_LOD_BIAS(S_FIXED(CLAMP(state->lod_bias, -16, 16), 6)), 0xFFFFFFFF, NULL);
1127 r600_pipe_state_add_reg(rstate, R600_GROUP_SAMPLER, R_03C008_SQ_TEX_SAMPLER_WORD2_0, S_03C008_TYPE(1), 0xFFFFFFFF, NULL);
1128 if (uc.ui) {
1129 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_00A400_TD_PS_SAMPLER0_BORDER_RED, fui(state->border_color[0]), 0xFFFFFFFF, NULL);
1130 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_00A404_TD_PS_SAMPLER0_BORDER_GREEN, fui(state->border_color[1]), 0xFFFFFFFF, NULL);
1131 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_00A408_TD_PS_SAMPLER0_BORDER_BLUE, fui(state->border_color[2]), 0xFFFFFFFF, NULL);
1132 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_00A40C_TD_PS_SAMPLER0_BORDER_ALPHA, fui(state->border_color[3]), 0xFFFFFFFF, NULL);
1133 }
1134 return rstate;
1135 }
1136
1137 static void *r600_create_vertex_elements(struct pipe_context *ctx,
1138 unsigned count,
1139 const struct pipe_vertex_element *elements)
1140 {
1141 struct r600_vertex_element *v = CALLOC_STRUCT(r600_vertex_element);
1142
1143 assert(count < 32);
1144 v->count = count;
1145 v->refcount = 1;
1146 memcpy(v->elements, elements, count * sizeof(struct pipe_vertex_element));
1147 return v;
1148 }
1149
1150 static void r600_sampler_view_destroy(struct pipe_context *ctx,
1151 struct pipe_sampler_view *state)
1152 {
1153 struct r600_pipe_sampler_view *resource = (struct r600_pipe_sampler_view *)state;
1154
1155 pipe_resource_reference(&state->texture, NULL);
1156 FREE(resource);
1157 }
1158
1159 static struct pipe_sampler_view *r600_create_sampler_view(struct pipe_context *ctx,
1160 struct pipe_resource *texture,
1161 const struct pipe_sampler_view *state)
1162 {
1163 struct r600_pipe_sampler_view *resource = CALLOC_STRUCT(r600_pipe_sampler_view);
1164 struct r600_pipe_state *rstate;
1165 const struct util_format_description *desc;
1166 struct r600_resource_texture *tmp;
1167 struct r600_resource *rbuffer;
1168 unsigned format;
1169 uint32_t word4 = 0, yuv_format = 0, pitch = 0;
1170 unsigned char swizzle[4], array_mode = 0, tile_type = 0;
1171 struct radeon_ws_bo *bo[2];
1172
1173 if (resource == NULL)
1174 return NULL;
1175 rstate = &resource->state;
1176
1177 /* initialize base object */
1178 resource->base = *state;
1179 resource->base.texture = NULL;
1180 pipe_reference(NULL, &texture->reference);
1181 resource->base.texture = texture;
1182 resource->base.reference.count = 1;
1183 resource->base.context = ctx;
1184
1185 swizzle[0] = state->swizzle_r;
1186 swizzle[1] = state->swizzle_g;
1187 swizzle[2] = state->swizzle_b;
1188 swizzle[3] = state->swizzle_a;
1189 format = r600_translate_texformat(texture->format,
1190 swizzle,
1191 &word4, &yuv_format);
1192 if (format == ~0) {
1193 format = 0;
1194 }
1195 desc = util_format_description(texture->format);
1196 if (desc == NULL) {
1197 R600_ERR("unknow format %d\n", texture->format);
1198 }
1199 tmp = (struct r600_resource_texture*)texture;
1200 rbuffer = &tmp->resource;
1201 bo[0] = rbuffer->bo;
1202 bo[1] = rbuffer->bo;
1203 /* FIXME depth texture decompression */
1204 if (tmp->depth) {
1205 #if 0
1206 r = r600_texture_from_depth(ctx, tmp, view->first_level);
1207 if (r) {
1208 return;
1209 }
1210 bo[0] = radeon_ws_bo_incref(rscreen->rw, tmp->uncompressed);
1211 bo[1] = radeon_ws_bo_incref(rscreen->rw, tmp->uncompressed);
1212 #endif
1213 }
1214 pitch = align(tmp->pitch[0] / tmp->bpt, 8);
1215
1216 /* FIXME properly handle first level != 0 */
1217 r600_pipe_state_add_reg(rstate, R600_GROUP_RESOURCE, R_038000_RESOURCE0_WORD0,
1218 S_038000_DIM(r600_tex_dim(texture->target)) |
1219 S_038000_TILE_MODE(array_mode) |
1220 S_038000_TILE_TYPE(tile_type) |
1221 S_038000_PITCH((pitch / 8) - 1) |
1222 S_038000_TEX_WIDTH(texture->width0 - 1), 0xFFFFFFFF, NULL);
1223 r600_pipe_state_add_reg(rstate, R600_GROUP_RESOURCE, R_038004_RESOURCE0_WORD1,
1224 S_038004_TEX_HEIGHT(texture->height0 - 1) |
1225 S_038004_TEX_DEPTH(texture->depth0 - 1) |
1226 S_038004_DATA_FORMAT(format), 0xFFFFFFFF, NULL);
1227 r600_pipe_state_add_reg(rstate, R600_GROUP_RESOURCE, R_038008_RESOURCE0_WORD2,
1228 tmp->offset[0] >> 8, 0xFFFFFFFF, bo[0]);
1229 r600_pipe_state_add_reg(rstate, R600_GROUP_RESOURCE, R_03800C_RESOURCE0_WORD3,
1230 tmp->offset[1] >> 8, 0xFFFFFFFF, bo[1]);
1231 r600_pipe_state_add_reg(rstate, R600_GROUP_RESOURCE, R_038010_RESOURCE0_WORD4,
1232 word4 | S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_NORM) |
1233 S_038010_SRF_MODE_ALL(V_038010_SFR_MODE_NO_ZERO) |
1234 S_038010_REQUEST_SIZE(1) |
1235 S_038010_BASE_LEVEL(state->first_level), 0xFFFFFFFF, NULL);
1236 r600_pipe_state_add_reg(rstate, R600_GROUP_RESOURCE, R_038014_RESOURCE0_WORD5,
1237 S_038014_LAST_LEVEL(state->last_level) |
1238 S_038014_BASE_ARRAY(0) |
1239 S_038014_LAST_ARRAY(0), 0xFFFFFFFF, NULL);
1240 r600_pipe_state_add_reg(rstate, R600_GROUP_RESOURCE, R_038018_RESOURCE0_WORD6,
1241 S_038018_TYPE(V_038010_SQ_TEX_VTX_VALID_TEXTURE), 0xFFFFFFFF, NULL);
1242
1243 return &resource->base;
1244 }
1245
1246 static void r600_set_vs_sampler_view(struct pipe_context *ctx, unsigned count,
1247 struct pipe_sampler_view **views)
1248 {
1249 /* TODO */
1250 assert(1);
1251 }
1252
1253 static void r600_set_ps_sampler_view(struct pipe_context *ctx, unsigned count,
1254 struct pipe_sampler_view **views)
1255 {
1256 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1257 struct r600_pipe_sampler_view **resource = (struct r600_pipe_sampler_view **)views;
1258
1259 for (int i = 0; i < count; i++) {
1260 if (resource[i]) {
1261 r600_context_pipe_state_set_ps_resource(&rctx->ctx, &resource[i]->state, i);
1262 }
1263 }
1264 }
1265
1266 static void r600_bind_state(struct pipe_context *ctx, void *state)
1267 {
1268 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1269 struct r600_pipe_state *rstate = (struct r600_pipe_state *)state;
1270
1271 if (state == NULL)
1272 return;
1273 rctx->states[rstate->id] = rstate;
1274 r600_context_pipe_state_set(&rctx->ctx, rstate);
1275 }
1276
1277 static void r600_bind_ps_sampler(struct pipe_context *ctx, unsigned count, void **states)
1278 {
1279 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1280 struct r600_pipe_state **rstates = (struct r600_pipe_state **)states;
1281
1282 for (int i = 0; i < count; i++) {
1283 r600_context_pipe_state_set_ps_sampler(&rctx->ctx, rstates[i], i);
1284 }
1285 }
1286
1287 static void r600_bind_vs_sampler(struct pipe_context *ctx, unsigned count, void **states)
1288 {
1289 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1290 struct r600_pipe_state **rstates = (struct r600_pipe_state **)states;
1291
1292 /* TODO implement */
1293 for (int i = 0; i < count; i++) {
1294 r600_context_pipe_state_set_vs_sampler(&rctx->ctx, rstates[i], i);
1295 }
1296 }
1297
1298 static void r600_delete_state(struct pipe_context *ctx, void *state)
1299 {
1300 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1301 struct r600_pipe_state *rstate = (struct r600_pipe_state *)state;
1302
1303 if (rctx->states[rstate->id] == rstate) {
1304 rctx->states[rstate->id] = NULL;
1305 }
1306 for (int i = 0; i < rstate->nregs; i++) {
1307 radeon_ws_bo_reference(rctx->radeon, &rstate->regs[i].bo, NULL);
1308 }
1309 free(rstate);
1310 }
1311
1312 static void r600_delete_vertex_element(struct pipe_context *ctx, void *state)
1313 {
1314 struct r600_vertex_element *v = (struct r600_vertex_element*)state;
1315
1316 if (v == NULL)
1317 return;
1318 if (--v->refcount)
1319 return;
1320 free(v);
1321 }
1322
1323 static void r600_set_clip_state(struct pipe_context *ctx,
1324 const struct pipe_clip_state *state)
1325 {
1326 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1327 struct r600_pipe_state *rstate = CALLOC_STRUCT(r600_pipe_state);
1328
1329 if (rstate == NULL)
1330 return;
1331
1332 rctx->clip = *state;
1333 rstate->id = R600_PIPE_STATE_CLIP;
1334 for (int i = 0; i < state->nr; i++) {
1335 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1336 R_028E20_PA_CL_UCP0_X + i * 4,
1337 fui(state->ucp[i][0]), 0xFFFFFFFF, NULL);
1338 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1339 R_028E24_PA_CL_UCP0_Y + i * 4,
1340 fui(state->ucp[i][1]) , 0xFFFFFFFF, NULL);
1341 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1342 R_028E28_PA_CL_UCP0_Z + i * 4,
1343 fui(state->ucp[i][2]), 0xFFFFFFFF, NULL);
1344 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1345 R_028E2C_PA_CL_UCP0_W + i * 4,
1346 fui(state->ucp[i][3]), 0xFFFFFFFF, NULL);
1347 }
1348 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028810_PA_CL_CLIP_CNTL,
1349 S_028810_PS_UCP_MODE(3) | ((1 << state->nr) - 1) |
1350 S_028810_ZCLIP_NEAR_DISABLE(state->depth_clamp) |
1351 S_028810_ZCLIP_FAR_DISABLE(state->depth_clamp), 0xFFFFFFFF, NULL);
1352
1353 free(rctx->states[R600_PIPE_STATE_CLIP]);
1354 rctx->states[R600_PIPE_STATE_CLIP] = rstate;
1355 r600_context_pipe_state_set(&rctx->ctx, rstate);
1356 }
1357
1358 static void r600_bind_vertex_elements(struct pipe_context *ctx, void *state)
1359 {
1360 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1361 struct r600_vertex_element *v = (struct r600_vertex_element*)state;
1362
1363 r600_delete_vertex_element(ctx, rctx->vertex_elements);
1364 rctx->vertex_elements = v;
1365 if (v) {
1366 v->refcount++;
1367 rctx->vs_rebuild = TRUE;
1368 }
1369 }
1370
1371 static void r600_set_polygon_stipple(struct pipe_context *ctx,
1372 const struct pipe_poly_stipple *state)
1373 {
1374 }
1375
1376 static void r600_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask)
1377 {
1378 }
1379
1380 static void r600_set_scissor_state(struct pipe_context *ctx,
1381 const struct pipe_scissor_state *state)
1382 {
1383 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1384 struct r600_pipe_state *rstate = CALLOC_STRUCT(r600_pipe_state);
1385 u32 tl, br;
1386
1387 if (rstate == NULL)
1388 return;
1389
1390 rstate->id = R600_PIPE_STATE_SCISSOR;
1391 tl = S_028240_TL_X(state->minx) | S_028240_TL_Y(state->miny) | S_028240_WINDOW_OFFSET_DISABLE(1);
1392 br = S_028244_BR_X(state->maxx) | S_028244_BR_Y(state->maxy);
1393 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1394 R_028030_PA_SC_SCREEN_SCISSOR_TL, tl,
1395 0xFFFFFFFF, NULL);
1396 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1397 R_028034_PA_SC_SCREEN_SCISSOR_BR, br,
1398 0xFFFFFFFF, NULL);
1399 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1400 R_028204_PA_SC_WINDOW_SCISSOR_TL, tl,
1401 0xFFFFFFFF, NULL);
1402 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1403 R_028208_PA_SC_WINDOW_SCISSOR_BR, br,
1404 0xFFFFFFFF, NULL);
1405 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1406 R_028210_PA_SC_CLIPRECT_0_TL, tl,
1407 0xFFFFFFFF, NULL);
1408 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1409 R_028214_PA_SC_CLIPRECT_0_BR, br,
1410 0xFFFFFFFF, NULL);
1411 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1412 R_028218_PA_SC_CLIPRECT_1_TL, tl,
1413 0xFFFFFFFF, NULL);
1414 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1415 R_02821C_PA_SC_CLIPRECT_1_BR, br,
1416 0xFFFFFFFF, NULL);
1417 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1418 R_028220_PA_SC_CLIPRECT_2_TL, tl,
1419 0xFFFFFFFF, NULL);
1420 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1421 R_028224_PA_SC_CLIPRECT_2_BR, br,
1422 0xFFFFFFFF, NULL);
1423 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1424 R_028228_PA_SC_CLIPRECT_3_TL, tl,
1425 0xFFFFFFFF, NULL);
1426 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1427 R_02822C_PA_SC_CLIPRECT_3_BR, br,
1428 0xFFFFFFFF, NULL);
1429 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1430 R_028200_PA_SC_WINDOW_OFFSET, 0x00000000,
1431 0xFFFFFFFF, NULL);
1432 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1433 R_02820C_PA_SC_CLIPRECT_RULE, 0x0000FFFF,
1434 0xFFFFFFFF, NULL);
1435 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1436 R_028230_PA_SC_EDGERULE, 0xAAAAAAAA,
1437 0xFFFFFFFF, NULL);
1438
1439 free(rctx->states[R600_PIPE_STATE_SCISSOR]);
1440 rctx->states[R600_PIPE_STATE_SCISSOR] = rstate;
1441 r600_context_pipe_state_set(&rctx->ctx, rstate);
1442 }
1443
1444 static void r600_set_stencil_ref(struct pipe_context *ctx,
1445 const struct pipe_stencil_ref *state)
1446 {
1447 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1448 struct r600_pipe_state *rstate = CALLOC_STRUCT(r600_pipe_state);
1449 u32 tmp;
1450
1451 if (rstate == NULL)
1452 return;
1453
1454 rctx->stencil_ref = *state;
1455 rstate->id = R600_PIPE_STATE_STENCIL_REF;
1456 tmp = S_028430_STENCILREF(state->ref_value[0]);
1457 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1458 R_028430_DB_STENCILREFMASK, tmp,
1459 ~C_028430_STENCILREF, NULL);
1460 tmp = S_028434_STENCILREF_BF(state->ref_value[1]);
1461 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1462 R_028434_DB_STENCILREFMASK_BF, tmp,
1463 ~C_028434_STENCILREF_BF, NULL);
1464
1465 free(rctx->states[R600_PIPE_STATE_STENCIL_REF]);
1466 rctx->states[R600_PIPE_STATE_STENCIL_REF] = rstate;
1467 r600_context_pipe_state_set(&rctx->ctx, rstate);
1468 }
1469
1470 static void r600_set_viewport_state(struct pipe_context *ctx,
1471 const struct pipe_viewport_state *state)
1472 {
1473 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1474 struct r600_pipe_state *rstate = CALLOC_STRUCT(r600_pipe_state);
1475
1476 if (rstate == NULL)
1477 return;
1478
1479 rctx->viewport = *state;
1480 rstate->id = R600_PIPE_STATE_VIEWPORT;
1481 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0282D0_PA_SC_VPORT_ZMIN_0, 0x00000000, 0xFFFFFFFF, NULL);
1482 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0282D4_PA_SC_VPORT_ZMAX_0, 0x3F800000, 0xFFFFFFFF, NULL);
1483 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_02843C_PA_CL_VPORT_XSCALE_0, fui(state->scale[0]), 0xFFFFFFFF, NULL);
1484 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028444_PA_CL_VPORT_YSCALE_0, fui(state->scale[1]), 0xFFFFFFFF, NULL);
1485 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_02844C_PA_CL_VPORT_ZSCALE_0, fui(state->scale[2]), 0xFFFFFFFF, NULL);
1486 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028440_PA_CL_VPORT_XOFFSET_0, fui(state->translate[0]), 0xFFFFFFFF, NULL);
1487 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028448_PA_CL_VPORT_YOFFSET_0, fui(state->translate[1]), 0xFFFFFFFF, NULL);
1488 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028450_PA_CL_VPORT_ZOFFSET_0, fui(state->translate[2]), 0xFFFFFFFF, NULL);
1489 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028818_PA_CL_VTE_CNTL, 0x0000043F, 0xFFFFFFFF, NULL);
1490
1491 free(rctx->states[R600_PIPE_STATE_VIEWPORT]);
1492 rctx->states[R600_PIPE_STATE_VIEWPORT] = rstate;
1493 r600_context_pipe_state_set(&rctx->ctx, rstate);
1494 }
1495
1496 static void r600_cb(struct r600_pipe_context *rctx, struct r600_pipe_state *rstate,
1497 const struct pipe_framebuffer_state *state, int cb)
1498 {
1499 struct r600_resource_texture *rtex;
1500 struct r600_resource *rbuffer;
1501 unsigned level = state->cbufs[cb]->level;
1502 unsigned pitch, slice;
1503 unsigned color_info;
1504 unsigned format, swap, ntype;
1505 const struct util_format_description *desc;
1506 struct radeon_ws_bo *bo[3];
1507
1508 rtex = (struct r600_resource_texture*)state->cbufs[cb]->texture;
1509 rbuffer = &rtex->resource;
1510 bo[0] = rbuffer->bo;
1511 bo[1] = rbuffer->bo;
1512 bo[2] = rbuffer->bo;
1513
1514 pitch = (rtex->pitch[level] / rtex->bpt) / 8 - 1;
1515 slice = (rtex->pitch[level] / rtex->bpt) * state->cbufs[cb]->height / 64 - 1;
1516 ntype = 0;
1517 desc = util_format_description(rtex->resource.base.b.format);
1518 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
1519 ntype = V_0280A0_NUMBER_SRGB;
1520
1521 format = r600_translate_colorformat(rtex->resource.base.b.format);
1522 swap = r600_translate_colorswap(rtex->resource.base.b.format);
1523 color_info = S_0280A0_FORMAT(format) |
1524 S_0280A0_COMP_SWAP(swap) |
1525 S_0280A0_BLEND_CLAMP(1) |
1526 S_0280A0_SOURCE_FORMAT(1) |
1527 S_0280A0_NUMBER_TYPE(ntype);
1528
1529 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1530 R_028040_CB_COLOR0_BASE + cb * 4,
1531 state->cbufs[cb]->offset >> 8, 0xFFFFFFFF, bo[0]);
1532 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1533 R_0280A0_CB_COLOR0_INFO + cb * 4,
1534 color_info, 0xFFFFFFFF, NULL);
1535 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1536 R_028060_CB_COLOR0_SIZE + cb * 4,
1537 S_028060_PITCH_TILE_MAX(pitch) |
1538 S_028060_SLICE_TILE_MAX(slice),
1539 0xFFFFFFFF, NULL);
1540 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1541 R_028080_CB_COLOR0_VIEW + cb * 4,
1542 0x00000000, 0xFFFFFFFF, NULL);
1543 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1544 R_0280E0_CB_COLOR0_FRAG + cb * 4,
1545 0x00000000, 0xFFFFFFFF, bo[1]);
1546 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1547 R_0280C0_CB_COLOR0_TILE + cb * 4,
1548 0x00000000, 0xFFFFFFFF, bo[2]);
1549 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1550 R_028100_CB_COLOR0_MASK + cb * 4,
1551 0x00000000, 0xFFFFFFFF, NULL);
1552 }
1553
1554 static void r600_db(struct r600_pipe_context *rctx, struct r600_pipe_state *rstate,
1555 const struct pipe_framebuffer_state *state)
1556 {
1557 struct r600_resource_texture *rtex;
1558 struct r600_resource *rbuffer;
1559 unsigned level;
1560 unsigned pitch, slice, format;
1561
1562 if (state->zsbuf == NULL)
1563 return;
1564
1565 rtex = (struct r600_resource_texture*)state->zsbuf->texture;
1566 rtex->tilled = 1;
1567 rtex->array_mode = 2;
1568 rtex->tile_type = 1;
1569 rtex->depth = 1;
1570 rbuffer = &rtex->resource;
1571
1572 level = state->zsbuf->level;
1573 pitch = (rtex->pitch[level] / rtex->bpt) / 8 - 1;
1574 slice = (rtex->pitch[level] / rtex->bpt) * state->zsbuf->height / 64 - 1;
1575 format = r600_translate_dbformat(state->zsbuf->texture->format);
1576
1577 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_02800C_DB_DEPTH_BASE,
1578 state->zsbuf->offset >> 8, 0xFFFFFFFF, rbuffer->bo);
1579 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028000_DB_DEPTH_SIZE,
1580 S_028000_PITCH_TILE_MAX(pitch) | S_028000_SLICE_TILE_MAX(slice),
1581 0xFFFFFFFF, NULL);
1582 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028004_DB_DEPTH_VIEW, 0x00000000, 0xFFFFFFFF, NULL);
1583 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028010_DB_DEPTH_INFO,
1584 S_028010_ARRAY_MODE(rtex->array_mode) | S_028010_FORMAT(format),
1585 0xFFFFFFFF, NULL);
1586 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028D34_DB_PREFETCH_LIMIT,
1587 (state->zsbuf->height / 8) - 1, 0xFFFFFFFF, NULL);
1588 }
1589
1590 static void r600_set_framebuffer_state(struct pipe_context *ctx,
1591 const struct pipe_framebuffer_state *state)
1592 {
1593 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1594 struct r600_pipe_state *rstate = CALLOC_STRUCT(r600_pipe_state);
1595 u32 shader_mask, tl, br, shader_control, target_mask;
1596
1597 if (rstate == NULL)
1598 return;
1599
1600 /* unreference old buffer and reference new one */
1601 rstate->id = R600_PIPE_STATE_FRAMEBUFFER;
1602 for (int i = 0; i < rctx->framebuffer.nr_cbufs; i++) {
1603 pipe_surface_reference(&rctx->framebuffer.cbufs[i], NULL);
1604 }
1605 for (int i = 0; i < state->nr_cbufs; i++) {
1606 pipe_surface_reference(&rctx->framebuffer.cbufs[i], state->cbufs[i]);
1607 }
1608 pipe_surface_reference(&rctx->framebuffer.zsbuf, state->zsbuf);
1609 rctx->framebuffer = *state;
1610
1611 /* build states */
1612 for (int i = 0; i < state->nr_cbufs; i++) {
1613 r600_cb(rctx, rstate, state, i);
1614 }
1615 if (state->zsbuf) {
1616 r600_db(rctx, rstate, state);
1617 }
1618
1619 target_mask = 0x00000000;
1620 target_mask = 0xFFFFFFFF;
1621 shader_mask = 0;
1622 shader_control = 0;
1623 for (int i = 0; i < state->nr_cbufs; i++) {
1624 target_mask ^= 0xf << (i * 4);
1625 shader_mask |= 0xf << (i * 4);
1626 shader_control |= 1 << i;
1627 }
1628 tl = S_028240_TL_X(0) | S_028240_TL_Y(0) | S_028240_WINDOW_OFFSET_DISABLE(1);
1629 br = S_028244_BR_X(state->width) | S_028244_BR_Y(state->height);
1630
1631 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1632 R_028240_PA_SC_GENERIC_SCISSOR_TL, tl,
1633 0xFFFFFFFF, NULL);
1634 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1635 R_028244_PA_SC_GENERIC_SCISSOR_BR, br,
1636 0xFFFFFFFF, NULL);
1637 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1638 R_028250_PA_SC_VPORT_SCISSOR_0_TL, tl,
1639 0xFFFFFFFF, NULL);
1640 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT,
1641 R_028254_PA_SC_VPORT_SCISSOR_0_BR, br,
1642 0xFFFFFFFF, NULL);
1643
1644 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0287A0_CB_SHADER_CONTROL,
1645 shader_control, 0xFFFFFFFF, NULL);
1646 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028238_CB_TARGET_MASK,
1647 0x00000000, target_mask, NULL);
1648 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_02823C_CB_SHADER_MASK,
1649 shader_mask, 0xFFFFFFFF, NULL);
1650 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028C04_PA_SC_AA_CONFIG,
1651 0x00000000, 0xFFFFFFFF, NULL);
1652 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX,
1653 0x00000000, 0xFFFFFFFF, NULL);
1654 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028C20_PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX,
1655 0x00000000, 0xFFFFFFFF, NULL);
1656 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028C30_CB_CLRCMP_CONTROL,
1657 0x01000000, 0xFFFFFFFF, NULL);
1658 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028C34_CB_CLRCMP_SRC,
1659 0x00000000, 0xFFFFFFFF, NULL);
1660 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028C38_CB_CLRCMP_DST,
1661 0x000000FF, 0xFFFFFFFF, NULL);
1662 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028C3C_CB_CLRCMP_MSK,
1663 0xFFFFFFFF, 0xFFFFFFFF, NULL);
1664 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028C48_PA_SC_AA_MASK,
1665 0xFFFFFFFF, 0xFFFFFFFF, NULL);
1666
1667 free(rctx->states[R600_PIPE_STATE_FRAMEBUFFER]);
1668 rctx->states[R600_PIPE_STATE_FRAMEBUFFER] = rstate;
1669 r600_context_pipe_state_set(&rctx->ctx, rstate);
1670 }
1671
1672 static void r600_set_index_buffer(struct pipe_context *ctx,
1673 const struct pipe_index_buffer *ib)
1674 {
1675 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1676
1677 if (ib) {
1678 pipe_resource_reference(&rctx->index_buffer.buffer, ib->buffer);
1679 memcpy(&rctx->index_buffer, ib, sizeof(rctx->index_buffer));
1680 } else {
1681 pipe_resource_reference(&rctx->index_buffer.buffer, NULL);
1682 memset(&rctx->index_buffer, 0, sizeof(rctx->index_buffer));
1683 }
1684
1685 /* TODO make this more like a state */
1686 }
1687
1688 static void r600_set_vertex_buffers(struct pipe_context *ctx, unsigned count,
1689 const struct pipe_vertex_buffer *buffers)
1690 {
1691 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1692
1693 for (int i = 0; i < rctx->nvertex_buffer; i++) {
1694 pipe_resource_reference(&rctx->vertex_buffer[i].buffer, NULL);
1695 }
1696 memcpy(rctx->vertex_buffer, buffers, sizeof(struct pipe_vertex_buffer) * count);
1697 for (int i = 0; i < count; i++) {
1698 rctx->vertex_buffer[i].buffer = NULL;
1699 pipe_resource_reference(&rctx->vertex_buffer[i].buffer, buffers[i].buffer);
1700 }
1701 rctx->nvertex_buffer = count;
1702 }
1703
1704 static void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
1705 struct pipe_resource *buffer)
1706 {
1707 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1708 struct r600_pipe_state *rstate;
1709 struct pipe_transfer *transfer;
1710 unsigned *nconst = NULL;
1711 u32 *ptr, offset;
1712
1713 switch (shader) {
1714 case PIPE_SHADER_VERTEX:
1715 rstate = rctx->vs_const;
1716 nconst = &rctx->vs_nconst;
1717 offset = R_030000_SQ_ALU_CONSTANT0_0 + 0x1000;
1718 break;
1719 case PIPE_SHADER_FRAGMENT:
1720 rstate = rctx->ps_const;
1721 nconst = &rctx->ps_nconst;
1722 offset = R_030000_SQ_ALU_CONSTANT0_0;
1723 break;
1724 default:
1725 R600_ERR("unsupported %d\n", shader);
1726 return;
1727 }
1728 if (buffer && buffer->width0 > 0) {
1729 *nconst = buffer->width0 / 16;
1730 ptr = pipe_buffer_map(ctx, buffer, PIPE_TRANSFER_READ, &transfer);
1731 if (ptr == NULL)
1732 return;
1733 for (int i = 0; i < *nconst; i++, offset += 0x10) {
1734 rstate[i].nregs = 0;
1735 r600_pipe_state_add_reg(&rstate[i], R600_GROUP_ALU_CONST, offset + 0x0, ptr[i * 4 + 0], 0xFFFFFFFF, NULL);
1736 r600_pipe_state_add_reg(&rstate[i], R600_GROUP_ALU_CONST, offset + 0x4, ptr[i * 4 + 1], 0xFFFFFFFF, NULL);
1737 r600_pipe_state_add_reg(&rstate[i], R600_GROUP_ALU_CONST, offset + 0x8, ptr[i * 4 + 2], 0xFFFFFFFF, NULL);
1738 r600_pipe_state_add_reg(&rstate[i], R600_GROUP_ALU_CONST, offset + 0xC, ptr[i * 4 + 3], 0xFFFFFFFF, NULL);
1739 r600_context_pipe_state_set(&rctx->ctx, &rstate[i]);
1740 }
1741 pipe_buffer_unmap(ctx, buffer, transfer);
1742 }
1743 }
1744
1745 static void *r600_create_shader_state(struct pipe_context *ctx,
1746 const struct pipe_shader_state *state)
1747 {
1748 struct r600_pipe_shader *shader = CALLOC_STRUCT(r600_pipe_shader);
1749 int r;
1750
1751 r = r600_pipe_shader_create2(ctx, shader, state->tokens);
1752 if (r) {
1753 return NULL;
1754 }
1755 return shader;
1756 }
1757
1758 static void r600_bind_ps_shader(struct pipe_context *ctx, void *state)
1759 {
1760 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1761
1762 /* TODO delete old shader */
1763 rctx->ps_shader = (struct r600_pipe_shader *)state;
1764 }
1765
1766 static void r600_bind_vs_shader(struct pipe_context *ctx, void *state)
1767 {
1768 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1769
1770 /* TODO delete old shader */
1771 rctx->vs_shader = (struct r600_pipe_shader *)state;
1772 }
1773
1774 static void r600_delete_ps_shader(struct pipe_context *ctx, void *state)
1775 {
1776 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1777 struct r600_pipe_shader *shader = (struct r600_pipe_shader *)state;
1778
1779 if (rctx->ps_shader == shader) {
1780 rctx->ps_shader = NULL;
1781 }
1782 /* TODO proper delete */
1783 free(shader);
1784 }
1785
1786 static void r600_delete_vs_shader(struct pipe_context *ctx, void *state)
1787 {
1788 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
1789 struct r600_pipe_shader *shader = (struct r600_pipe_shader *)state;
1790
1791 if (rctx->vs_shader == shader) {
1792 rctx->vs_shader = NULL;
1793 }
1794 /* TODO proper delete */
1795 free(shader);
1796 }
1797
1798 static void r600_init_state_functions2(struct r600_pipe_context *rctx)
1799 {
1800 rctx->context.create_blend_state = r600_create_blend_state;
1801 rctx->context.create_depth_stencil_alpha_state = r600_create_dsa_state;
1802 rctx->context.create_fs_state = r600_create_shader_state;
1803 rctx->context.create_rasterizer_state = r600_create_rs_state;
1804 rctx->context.create_sampler_state = r600_create_sampler_state;
1805 rctx->context.create_sampler_view = r600_create_sampler_view;
1806 rctx->context.create_vertex_elements_state = r600_create_vertex_elements;
1807 rctx->context.create_vs_state = r600_create_shader_state;
1808 rctx->context.bind_blend_state = r600_bind_blend_state;
1809 rctx->context.bind_depth_stencil_alpha_state = r600_bind_state;
1810 rctx->context.bind_fragment_sampler_states = r600_bind_ps_sampler;
1811 rctx->context.bind_fs_state = r600_bind_ps_shader;
1812 rctx->context.bind_rasterizer_state = r600_bind_rs_state;
1813 rctx->context.bind_vertex_elements_state = r600_bind_vertex_elements;
1814 rctx->context.bind_vertex_sampler_states = r600_bind_vs_sampler;
1815 rctx->context.bind_vs_state = r600_bind_vs_shader;
1816 rctx->context.delete_blend_state = r600_delete_state;
1817 rctx->context.delete_depth_stencil_alpha_state = r600_delete_state;
1818 rctx->context.delete_fs_state = r600_delete_ps_shader;
1819 rctx->context.delete_rasterizer_state = r600_delete_rs_state;
1820 rctx->context.delete_sampler_state = r600_delete_state;
1821 rctx->context.delete_vertex_elements_state = r600_delete_vertex_element;
1822 rctx->context.delete_vs_state = r600_delete_vs_shader;
1823 rctx->context.set_blend_color = r600_set_blend_color;
1824 rctx->context.set_clip_state = r600_set_clip_state;
1825 rctx->context.set_constant_buffer = r600_set_constant_buffer;
1826 rctx->context.set_fragment_sampler_views = r600_set_ps_sampler_view;
1827 rctx->context.set_framebuffer_state = r600_set_framebuffer_state;
1828 rctx->context.set_polygon_stipple = r600_set_polygon_stipple;
1829 rctx->context.set_sample_mask = r600_set_sample_mask;
1830 rctx->context.set_scissor_state = r600_set_scissor_state;
1831 rctx->context.set_stencil_ref = r600_set_stencil_ref;
1832 rctx->context.set_vertex_buffers = r600_set_vertex_buffers;
1833 rctx->context.set_index_buffer = r600_set_index_buffer;
1834 rctx->context.set_vertex_sampler_views = r600_set_vs_sampler_view;
1835 rctx->context.set_viewport_state = r600_set_viewport_state;
1836 rctx->context.sampler_view_destroy = r600_sampler_view_destroy;
1837 }
1838
1839 static void r600_init_config2(struct r600_pipe_context *rctx)
1840 {
1841 int ps_prio;
1842 int vs_prio;
1843 int gs_prio;
1844 int es_prio;
1845 int num_ps_gprs;
1846 int num_vs_gprs;
1847 int num_gs_gprs;
1848 int num_es_gprs;
1849 int num_temp_gprs;
1850 int num_ps_threads;
1851 int num_vs_threads;
1852 int num_gs_threads;
1853 int num_es_threads;
1854 int num_ps_stack_entries;
1855 int num_vs_stack_entries;
1856 int num_gs_stack_entries;
1857 int num_es_stack_entries;
1858 enum radeon_family family;
1859 struct r600_pipe_state *rstate = &rctx->config;
1860 u32 tmp;
1861
1862 family = r600_get_family(rctx->radeon);
1863 ps_prio = 0;
1864 vs_prio = 1;
1865 gs_prio = 2;
1866 es_prio = 3;
1867 switch (family) {
1868 case CHIP_R600:
1869 num_ps_gprs = 192;
1870 num_vs_gprs = 56;
1871 num_temp_gprs = 4;
1872 num_gs_gprs = 0;
1873 num_es_gprs = 0;
1874 num_ps_threads = 136;
1875 num_vs_threads = 48;
1876 num_gs_threads = 4;
1877 num_es_threads = 4;
1878 num_ps_stack_entries = 128;
1879 num_vs_stack_entries = 128;
1880 num_gs_stack_entries = 0;
1881 num_es_stack_entries = 0;
1882 break;
1883 case CHIP_RV630:
1884 case CHIP_RV635:
1885 num_ps_gprs = 84;
1886 num_vs_gprs = 36;
1887 num_temp_gprs = 4;
1888 num_gs_gprs = 0;
1889 num_es_gprs = 0;
1890 num_ps_threads = 144;
1891 num_vs_threads = 40;
1892 num_gs_threads = 4;
1893 num_es_threads = 4;
1894 num_ps_stack_entries = 40;
1895 num_vs_stack_entries = 40;
1896 num_gs_stack_entries = 32;
1897 num_es_stack_entries = 16;
1898 break;
1899 case CHIP_RV610:
1900 case CHIP_RV620:
1901 case CHIP_RS780:
1902 case CHIP_RS880:
1903 default:
1904 num_ps_gprs = 84;
1905 num_vs_gprs = 36;
1906 num_temp_gprs = 4;
1907 num_gs_gprs = 0;
1908 num_es_gprs = 0;
1909 num_ps_threads = 136;
1910 num_vs_threads = 48;
1911 num_gs_threads = 4;
1912 num_es_threads = 4;
1913 num_ps_stack_entries = 40;
1914 num_vs_stack_entries = 40;
1915 num_gs_stack_entries = 32;
1916 num_es_stack_entries = 16;
1917 break;
1918 case CHIP_RV670:
1919 num_ps_gprs = 144;
1920 num_vs_gprs = 40;
1921 num_temp_gprs = 4;
1922 num_gs_gprs = 0;
1923 num_es_gprs = 0;
1924 num_ps_threads = 136;
1925 num_vs_threads = 48;
1926 num_gs_threads = 4;
1927 num_es_threads = 4;
1928 num_ps_stack_entries = 40;
1929 num_vs_stack_entries = 40;
1930 num_gs_stack_entries = 32;
1931 num_es_stack_entries = 16;
1932 break;
1933 case CHIP_RV770:
1934 num_ps_gprs = 192;
1935 num_vs_gprs = 56;
1936 num_temp_gprs = 4;
1937 num_gs_gprs = 0;
1938 num_es_gprs = 0;
1939 num_ps_threads = 188;
1940 num_vs_threads = 60;
1941 num_gs_threads = 0;
1942 num_es_threads = 0;
1943 num_ps_stack_entries = 256;
1944 num_vs_stack_entries = 256;
1945 num_gs_stack_entries = 0;
1946 num_es_stack_entries = 0;
1947 break;
1948 case CHIP_RV730:
1949 case CHIP_RV740:
1950 num_ps_gprs = 84;
1951 num_vs_gprs = 36;
1952 num_temp_gprs = 4;
1953 num_gs_gprs = 0;
1954 num_es_gprs = 0;
1955 num_ps_threads = 188;
1956 num_vs_threads = 60;
1957 num_gs_threads = 0;
1958 num_es_threads = 0;
1959 num_ps_stack_entries = 128;
1960 num_vs_stack_entries = 128;
1961 num_gs_stack_entries = 0;
1962 num_es_stack_entries = 0;
1963 break;
1964 case CHIP_RV710:
1965 num_ps_gprs = 192;
1966 num_vs_gprs = 56;
1967 num_temp_gprs = 4;
1968 num_gs_gprs = 0;
1969 num_es_gprs = 0;
1970 num_ps_threads = 144;
1971 num_vs_threads = 48;
1972 num_gs_threads = 0;
1973 num_es_threads = 0;
1974 num_ps_stack_entries = 128;
1975 num_vs_stack_entries = 128;
1976 num_gs_stack_entries = 0;
1977 num_es_stack_entries = 0;
1978 break;
1979 }
1980
1981 rstate->id = R600_PIPE_STATE_CONFIG;
1982
1983 /* SQ_CONFIG */
1984 tmp = 0;
1985 switch (family) {
1986 case CHIP_RV610:
1987 case CHIP_RV620:
1988 case CHIP_RS780:
1989 case CHIP_RS880:
1990 case CHIP_RV710:
1991 break;
1992 default:
1993 tmp |= S_008C00_VC_ENABLE(1);
1994 break;
1995 }
1996 tmp |= S_008C00_DX9_CONSTS(1);
1997 tmp |= S_008C00_ALU_INST_PREFER_VECTOR(1);
1998 tmp |= S_008C00_PS_PRIO(ps_prio);
1999 tmp |= S_008C00_VS_PRIO(vs_prio);
2000 tmp |= S_008C00_GS_PRIO(gs_prio);
2001 tmp |= S_008C00_ES_PRIO(es_prio);
2002 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_008C00_SQ_CONFIG, tmp, 0xFFFFFFFF, NULL);
2003
2004 /* SQ_GPR_RESOURCE_MGMT_1 */
2005 tmp = 0;
2006 tmp |= S_008C04_NUM_PS_GPRS(num_ps_gprs);
2007 tmp |= S_008C04_NUM_VS_GPRS(num_vs_gprs);
2008 tmp |= S_008C04_NUM_CLAUSE_TEMP_GPRS(num_temp_gprs);
2009 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_008C04_SQ_GPR_RESOURCE_MGMT_1, tmp, 0xFFFFFFFF, NULL);
2010
2011 /* SQ_GPR_RESOURCE_MGMT_2 */
2012 tmp = 0;
2013 tmp |= S_008C08_NUM_GS_GPRS(num_gs_gprs);
2014 tmp |= S_008C08_NUM_GS_GPRS(num_es_gprs);
2015 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_008C08_SQ_GPR_RESOURCE_MGMT_2, tmp, 0xFFFFFFFF, NULL);
2016
2017 /* SQ_THREAD_RESOURCE_MGMT */
2018 tmp = 0;
2019 tmp |= S_008C0C_NUM_PS_THREADS(num_ps_threads);
2020 tmp |= S_008C0C_NUM_VS_THREADS(num_vs_threads);
2021 tmp |= S_008C0C_NUM_GS_THREADS(num_gs_threads);
2022 tmp |= S_008C0C_NUM_ES_THREADS(num_es_threads);
2023 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_008C0C_SQ_THREAD_RESOURCE_MGMT, tmp, 0xFFFFFFFF, NULL);
2024
2025 /* SQ_STACK_RESOURCE_MGMT_1 */
2026 tmp = 0;
2027 tmp |= S_008C10_NUM_PS_STACK_ENTRIES(num_ps_stack_entries);
2028 tmp |= S_008C10_NUM_VS_STACK_ENTRIES(num_vs_stack_entries);
2029 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_008C10_SQ_STACK_RESOURCE_MGMT_1, tmp, 0xFFFFFFFF, NULL);
2030
2031 /* SQ_STACK_RESOURCE_MGMT_2 */
2032 tmp = 0;
2033 tmp |= S_008C14_NUM_GS_STACK_ENTRIES(num_gs_stack_entries);
2034 tmp |= S_008C14_NUM_ES_STACK_ENTRIES(num_es_stack_entries);
2035 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_008C14_SQ_STACK_RESOURCE_MGMT_2, tmp, 0xFFFFFFFF, NULL);
2036
2037 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_009714_VC_ENHANCE, 0x00000000, 0xFFFFFFFF, NULL);
2038 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028350_SX_MISC, 0x00000000, 0xFFFFFFFF, NULL);
2039
2040 if (family >= CHIP_RV770) {
2041 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0x00004000, 0xFFFFFFFF, NULL);
2042 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_009508_TA_CNTL_AUX, 0x07000002, 0xFFFFFFFF, NULL);
2043 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_009830_DB_DEBUG, 0x00000000, 0xFFFFFFFF, NULL);
2044 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_009838_DB_WATERMARKS, 0x00420204, 0xFFFFFFFF, NULL);
2045 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0286C8_SPI_THREAD_GROUPING, 0x00000000, 0xFFFFFFFF, NULL);
2046 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A4C_PA_SC_MODE_CNTL, 0x00514000, 0xFFFFFFFF, NULL);
2047 } else {
2048 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0x00000000, 0xFFFFFFFF, NULL);
2049 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_009508_TA_CNTL_AUX, 0x07000003, 0xFFFFFFFF, NULL);
2050 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_009830_DB_DEBUG, 0x82000000, 0xFFFFFFFF, NULL);
2051 r600_pipe_state_add_reg(rstate, R600_GROUP_CONFIG, R_009838_DB_WATERMARKS, 0x01020204, 0xFFFFFFFF, NULL);
2052 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0286C8_SPI_THREAD_GROUPING, 0x00000001, 0xFFFFFFFF, NULL);
2053 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A4C_PA_SC_MODE_CNTL, 0x00004010, 0xFFFFFFFF, NULL);
2054 }
2055 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0288A8_SQ_ESGS_RING_ITEMSIZE, 0x00000000, 0xFFFFFFFF, NULL);
2056 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0288AC_SQ_GSVS_RING_ITEMSIZE, 0x00000000, 0xFFFFFFFF, NULL);
2057 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0288B0_SQ_ESTMP_RING_ITEMSIZE, 0x00000000, 0xFFFFFFFF, NULL);
2058 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0288B4_SQ_GSTMP_RING_ITEMSIZE, 0x00000000, 0xFFFFFFFF, NULL);
2059 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0288B8_SQ_VSTMP_RING_ITEMSIZE, 0x00000000, 0xFFFFFFFF, NULL);
2060 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0288BC_SQ_PSTMP_RING_ITEMSIZE, 0x00000000, 0xFFFFFFFF, NULL);
2061 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0288C0_SQ_FBUF_RING_ITEMSIZE, 0x00000000, 0xFFFFFFFF, NULL);
2062 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0288C4_SQ_REDUC_RING_ITEMSIZE, 0x00000000, 0xFFFFFFFF, NULL);
2063 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_0288C8_SQ_GS_VERT_ITEMSIZE, 0x00000000, 0xFFFFFFFF, NULL);
2064 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A10_VGT_OUTPUT_PATH_CNTL, 0x00000000, 0xFFFFFFFF, NULL);
2065 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A14_VGT_HOS_CNTL, 0x00000000, 0xFFFFFFFF, NULL);
2066 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A18_VGT_HOS_MAX_TESS_LEVEL, 0x00000000, 0xFFFFFFFF, NULL);
2067 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A1C_VGT_HOS_MIN_TESS_LEVEL, 0x00000000, 0xFFFFFFFF, NULL);
2068 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A20_VGT_HOS_REUSE_DEPTH, 0x00000000, 0xFFFFFFFF, NULL);
2069 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A24_VGT_GROUP_PRIM_TYPE, 0x00000000, 0xFFFFFFFF, NULL);
2070 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A28_VGT_GROUP_FIRST_DECR, 0x00000000, 0xFFFFFFFF, NULL);
2071 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A2C_VGT_GROUP_DECR, 0x00000000, 0xFFFFFFFF, NULL);
2072 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A30_VGT_GROUP_VECT_0_CNTL, 0x00000000, 0xFFFFFFFF, NULL);
2073 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A34_VGT_GROUP_VECT_1_CNTL, 0x00000000, 0xFFFFFFFF, NULL);
2074 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A38_VGT_GROUP_VECT_0_FMT_CNTL, 0x00000000, 0xFFFFFFFF, NULL);
2075 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL, 0x00000000, 0xFFFFFFFF, NULL);
2076 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A40_VGT_GS_MODE, 0x00000000, 0xFFFFFFFF, NULL);
2077 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028AB0_VGT_STRMOUT_EN, 0x00000000, 0xFFFFFFFF, NULL);
2078 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028AB4_VGT_REUSE_OFF, 0x00000001, 0xFFFFFFFF, NULL);
2079 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028AB8_VGT_VTX_CNT_EN, 0x00000000, 0xFFFFFFFF, NULL);
2080 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028B20_VGT_STRMOUT_BUFFER_EN, 0x00000000, 0xFFFFFFFF, NULL);
2081
2082 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028400_VGT_MAX_VTX_INDX, 0x00FFFFFF, 0xFFFFFFFF, NULL);
2083 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028404_VGT_MIN_VTX_INDX, 0x00000000, 0xFFFFFFFF, NULL);
2084 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, 0x00000000, 0xFFFFFFFF, NULL);
2085 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A84_VGT_PRIMITIVEID_EN, 0x00000000, 0xFFFFFFFF, NULL);
2086 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, 0x00000000, 0xFFFFFFFF, NULL);
2087 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028AA0_VGT_INSTANCE_STEP_RATE_0, 0x00000000, 0xFFFFFFFF, NULL);
2088 r600_pipe_state_add_reg(rstate, R600_GROUP_CONTEXT, R_028AA4_VGT_INSTANCE_STEP_RATE_1, 0x00000000, 0xFFFFFFFF, NULL);
2089 r600_context_pipe_state_set(&rctx->ctx, rstate);
2090 }
2091
2092 static struct pipe_context *r600_create_context2(struct pipe_screen *screen, void *priv)
2093 {
2094 struct r600_pipe_context *rctx = CALLOC_STRUCT(r600_pipe_context);
2095 struct r600_screen* rscreen = (struct r600_screen *)screen;
2096
2097 if (rctx == NULL)
2098 return NULL;
2099 rctx->context.winsys = rscreen->screen.winsys;
2100 rctx->context.screen = screen;
2101 rctx->context.priv = priv;
2102 rctx->context.destroy = r600_destroy_context;
2103 rctx->context.draw_vbo = r600_draw_vbo2;
2104 rctx->context.flush = r600_flush2;
2105
2106 /* Easy accessing of screen/winsys. */
2107 rctx->screen = rscreen;
2108 rctx->radeon = rscreen->radeon;
2109
2110 r600_init_blit_functions2(rctx);
2111 r600_init_state_functions2(rctx);
2112 r600_init_context_resource_functions2(rctx);
2113
2114 rctx->blitter = util_blitter_create(&rctx->context);
2115 if (rctx->blitter == NULL) {
2116 FREE(rctx);
2117 return NULL;
2118 }
2119
2120 if (r600_context_init(&rctx->ctx, rctx->radeon)) {
2121 r600_destroy_context(&rctx->context);
2122 return NULL;
2123 }
2124
2125 r600_init_config2(rctx);
2126
2127 return &rctx->context;
2128 }
2129
2130 static int r600_get_shader_param(struct pipe_screen* pscreen, unsigned shader, enum pipe_shader_cap param)
2131 {
2132 switch(shader)
2133 {
2134 case PIPE_SHADER_FRAGMENT:
2135 case PIPE_SHADER_VERTEX:
2136 break;
2137 case PIPE_SHADER_GEOMETRY:
2138 /* TODO: support and enable geometry programs */
2139 return 0;
2140 default:
2141 /* TODO: support tessellation on Evergreen */
2142 return 0;
2143 }
2144
2145 /* TODO: all these should be fixed, since r600 surely supports much more! */
2146 switch (param) {
2147 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
2148 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
2149 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
2150 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
2151 return 16384;
2152 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
2153 return 8; /* FIXME */
2154 case PIPE_SHADER_CAP_MAX_INPUTS:
2155 if(shader == PIPE_SHADER_FRAGMENT)
2156 return 10;
2157 else
2158 return 16;
2159 case PIPE_SHADER_CAP_MAX_TEMPS:
2160 return 256; //max native temporaries
2161 case PIPE_SHADER_CAP_MAX_ADDRS:
2162 return 1; //max native address registers/* FIXME Isn't this equal to TEMPS? */
2163 case PIPE_SHADER_CAP_MAX_CONSTS:
2164 return 256; //max native parameters
2165 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
2166 return 1;
2167 case PIPE_SHADER_CAP_MAX_PREDS:
2168 return 0; /* FIXME */
2169 case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
2170 /* TODO: support this! */
2171 return 0;
2172 default:
2173 return 0;
2174 }
2175 }
2176
2177 struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
2178 const struct pipe_resource *templ);
2179 struct pipe_resource *r600_user_buffer_create2(struct pipe_screen *screen,
2180 void *ptr, unsigned bytes,
2181 unsigned bind)
2182 {
2183 struct pipe_resource *resource;
2184 struct r600_resource *rresource;
2185 struct pipe_resource desc;
2186 struct radeon *radeon = (struct radeon *)screen->winsys;
2187 void *rptr;
2188
2189 desc.screen = screen;
2190 desc.target = PIPE_BUFFER;
2191 desc.format = PIPE_FORMAT_R8_UNORM;
2192 desc.usage = PIPE_USAGE_IMMUTABLE;
2193 desc.bind = bind;
2194 desc.width0 = bytes;
2195 desc.height0 = 1;
2196 desc.depth0 = 1;
2197 desc.flags = 0;
2198 resource = r600_buffer_create(screen, &desc);
2199 if (resource == NULL) {
2200 return NULL;
2201 }
2202
2203 rresource = (struct r600_resource *)resource;
2204 rptr = radeon_ws_bo_map(radeon, rresource->bo, 0, NULL);
2205 memcpy(rptr, ptr, bytes);
2206 radeon_ws_bo_unmap(radeon, rresource->bo);
2207
2208 return resource;
2209 }
2210
2211 void r600_init_screen_texture_functions(struct pipe_screen *screen);
2212 struct pipe_screen *r600_screen_create2(struct radeon *radeon)
2213 {
2214 struct r600_screen *rscreen;
2215
2216 rscreen = CALLOC_STRUCT(r600_screen);
2217 if (rscreen == NULL) {
2218 return NULL;
2219 }
2220
2221 rscreen->radeon = radeon;
2222 rscreen->screen.winsys = (struct pipe_winsys*)radeon;
2223 rscreen->screen.destroy = r600_destroy_screen;
2224 rscreen->screen.get_name = r600_get_name;
2225 rscreen->screen.get_vendor = r600_get_vendor;
2226 rscreen->screen.get_param = r600_get_param;
2227 rscreen->screen.get_shader_param = r600_get_shader_param;
2228 rscreen->screen.get_paramf = r600_get_paramf;
2229 rscreen->screen.is_format_supported = r600_is_format_supported;
2230 rscreen->screen.context_create = r600_create_context2;
2231 r600_init_screen_texture_functions(&rscreen->screen);
2232 r600_init_screen_resource_functions(&rscreen->screen);
2233 rscreen->screen.user_buffer_create = r600_user_buffer_create2;
2234
2235 return &rscreen->screen;
2236 }