Merge commit 'origin/gallium-master-merge'
[mesa.git] / src / gallium / drivers / i965simple / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32 #include "brw_batch.h"
33 #include "brw_context.h"
34 #include "brw_state.h"
35 #include "brw_defines.h"
36
37
38
39
40
41 /***********************************************************************
42 * Blend color
43 */
44
45 static void upload_blend_constant_color(struct brw_context *brw)
46 {
47 struct brw_blend_constant_color bcc;
48
49 memset(&bcc, 0, sizeof(bcc));
50 bcc.header.opcode = CMD_BLEND_CONSTANT_COLOR;
51 bcc.header.length = sizeof(bcc)/4-2;
52 bcc.blend_constant_color[0] = brw->attribs.BlendColor.color[0];
53 bcc.blend_constant_color[1] = brw->attribs.BlendColor.color[1];
54 bcc.blend_constant_color[2] = brw->attribs.BlendColor.color[2];
55 bcc.blend_constant_color[3] = brw->attribs.BlendColor.color[3];
56
57 BRW_CACHED_BATCH_STRUCT(brw, &bcc);
58 }
59
60
61 const struct brw_tracked_state brw_blend_constant_color = {
62 .dirty = {
63 .brw = BRW_NEW_BLEND,
64 .cache = 0
65 },
66 .update = upload_blend_constant_color
67 };
68
69
70 /***********************************************************************
71 * Drawing rectangle
72 */
73 static void upload_drawing_rect(struct brw_context *brw)
74 {
75 struct brw_drawrect bdr;
76
77 memset(&bdr, 0, sizeof(bdr));
78 bdr.header.opcode = CMD_DRAW_RECT;
79 bdr.header.length = sizeof(bdr)/4 - 2;
80 bdr.xmin = 0;
81 bdr.ymin = 0;
82 bdr.xmax = brw->attribs.FrameBuffer.cbufs[0]->width;
83 bdr.ymax = brw->attribs.FrameBuffer.cbufs[0]->height;
84 bdr.xorg = 0;
85 bdr.yorg = 0;
86
87 /* Can't use BRW_CACHED_BATCH_STRUCT because this is also emitted
88 * uncached in brw_draw.c:
89 */
90 BRW_BATCH_STRUCT(brw, &bdr);
91 }
92
93 const struct brw_tracked_state brw_drawing_rect = {
94 .dirty = {
95 .brw = BRW_NEW_SCENE,
96 .cache = 0
97 },
98 .update = upload_drawing_rect
99 };
100
101 /**
102 * Upload the binding table pointers, which point each stage's array of surface
103 * state pointers.
104 *
105 * The binding table pointers are relative to the surface state base address,
106 * which is the BRW_SS_POOL cache buffer.
107 */
108 static void upload_binding_table_pointers(struct brw_context *brw)
109 {
110 struct brw_binding_table_pointers btp;
111 memset(&btp, 0, sizeof(btp));
112
113 btp.header.opcode = CMD_BINDING_TABLE_PTRS;
114 btp.header.length = sizeof(btp)/4 - 2;
115 btp.vs = 0;
116 btp.gs = 0;
117 btp.clp = 0;
118 btp.sf = 0;
119 btp.wm = brw->wm.bind_ss_offset;
120
121 BRW_CACHED_BATCH_STRUCT(brw, &btp);
122 }
123
124 const struct brw_tracked_state brw_binding_table_pointers = {
125 .dirty = {
126 .brw = 0,
127 .cache = CACHE_NEW_SURF_BIND
128 },
129 .update = upload_binding_table_pointers,
130 };
131
132
133 /**
134 * Upload pointers to the per-stage state.
135 *
136 * The state pointers in this packet are all relative to the general state
137 * base address set by CMD_STATE_BASE_ADDRESS, which is the BRW_GS_POOL buffer.
138 */
139 static void upload_pipelined_state_pointers(struct brw_context *brw )
140 {
141 struct brw_pipelined_state_pointers psp;
142 memset(&psp, 0, sizeof(psp));
143
144 psp.header.opcode = CMD_PIPELINED_STATE_POINTERS;
145 psp.header.length = sizeof(psp)/4 - 2;
146
147 psp.vs.offset = brw->vs.state_gs_offset >> 5;
148 psp.sf.offset = brw->sf.state_gs_offset >> 5;
149 psp.wm.offset = brw->wm.state_gs_offset >> 5;
150 psp.cc.offset = brw->cc.state_gs_offset >> 5;
151
152 /* GS gets turned on and off regularly. Need to re-emit URB fence
153 * after this occurs.
154 */
155 if (brw->gs.prog_active) {
156 psp.gs.offset = brw->gs.state_gs_offset >> 5;
157 psp.gs.enable = 1;
158 }
159
160 if (0) {
161 psp.clp.offset = brw->clip.state_gs_offset >> 5;
162 psp.clp.enable = 1;
163 }
164
165
166 if (BRW_CACHED_BATCH_STRUCT(brw, &psp))
167 brw->state.dirty.brw |= BRW_NEW_PSP;
168 }
169
170 const struct brw_tracked_state brw_pipelined_state_pointers = {
171 .dirty = {
172 .brw = 0,
173 .cache = (CACHE_NEW_VS_UNIT |
174 CACHE_NEW_GS_UNIT |
175 CACHE_NEW_GS_PROG |
176 CACHE_NEW_CLIP_UNIT |
177 CACHE_NEW_SF_UNIT |
178 CACHE_NEW_WM_UNIT |
179 CACHE_NEW_CC_UNIT)
180 },
181 .update = upload_pipelined_state_pointers
182 };
183
184 static void upload_psp_urb_cbs(struct brw_context *brw )
185 {
186 upload_pipelined_state_pointers(brw);
187 brw_upload_urb_fence(brw);
188 brw_upload_constant_buffer_state(brw);
189 }
190
191
192 const struct brw_tracked_state brw_psp_urb_cbs = {
193 .dirty = {
194 .brw = BRW_NEW_URB_FENCE,
195 .cache = (CACHE_NEW_VS_UNIT |
196 CACHE_NEW_GS_UNIT |
197 CACHE_NEW_GS_PROG |
198 CACHE_NEW_CLIP_UNIT |
199 CACHE_NEW_SF_UNIT |
200 CACHE_NEW_WM_UNIT |
201 CACHE_NEW_CC_UNIT)
202 },
203 .update = upload_psp_urb_cbs
204 };
205
206 /**
207 * Upload the depthbuffer offset and format.
208 *
209 * We have to do this per state validation as we need to emit the relocation
210 * in the batch buffer.
211 */
212 static void upload_depthbuffer(struct brw_context *brw)
213 {
214 struct pipe_surface *depth_surface = brw->attribs.FrameBuffer.zsbuf;
215
216 BEGIN_BATCH(5, INTEL_BATCH_NO_CLIPRECTS);
217 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (5 - 2));
218 if (depth_surface == NULL) {
219 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
220 (BRW_SURFACE_NULL << 29));
221 OUT_BATCH(0);
222 OUT_BATCH(0);
223 OUT_BATCH(0);
224 } else {
225 unsigned int format;
226 struct brw_texture *tex = (struct brw_texture *)depth_surface->texture;
227 assert(depth_surface->block.width == 1);
228 assert(depth_surface->block.height == 1);
229 switch (depth_surface->block.size) {
230 case 2:
231 format = BRW_DEPTHFORMAT_D16_UNORM;
232 break;
233 case 4:
234 if (depth_surface->format == PIPE_FORMAT_Z32_FLOAT)
235 format = BRW_DEPTHFORMAT_D32_FLOAT;
236 else
237 format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
238 break;
239 default:
240 assert(0);
241 return;
242 }
243
244 OUT_BATCH((depth_surface->stride - 1) |
245 (format << 18) |
246 (BRW_TILEWALK_YMAJOR << 26) |
247 // (depth_surface->region->tiled << 27) |
248 (BRW_SURFACE_2D << 29));
249 OUT_RELOC(tex->buffer,
250 PIPE_BUFFER_USAGE_GPU_READ | PIPE_BUFFER_USAGE_GPU_WRITE, 0);
251 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
252 ((depth_surface->stride/depth_surface->block.size - 1) << 6) |
253 ((depth_surface->height - 1) << 19));
254 OUT_BATCH(0);
255 }
256 ADVANCE_BATCH();
257 }
258
259 const struct brw_tracked_state brw_depthbuffer = {
260 .dirty = {
261 .brw = BRW_NEW_SCENE,
262 .cache = 0
263 },
264 .update = upload_depthbuffer,
265 };
266
267
268
269
270 /***********************************************************************
271 * Polygon stipple packet
272 */
273
274 static void upload_polygon_stipple(struct brw_context *brw)
275 {
276 struct brw_polygon_stipple bps;
277 unsigned i;
278
279 memset(&bps, 0, sizeof(bps));
280 bps.header.opcode = CMD_POLY_STIPPLE_PATTERN;
281 bps.header.length = sizeof(bps)/4-2;
282
283 /* XXX: state tracker should send *all* state down initially!
284 */
285 if (brw->attribs.PolygonStipple)
286 for (i = 0; i < 32; i++)
287 bps.stipple[i] = brw->attribs.PolygonStipple->stipple[31 - i]; /* invert */
288
289 BRW_CACHED_BATCH_STRUCT(brw, &bps);
290 }
291
292 const struct brw_tracked_state brw_polygon_stipple = {
293 .dirty = {
294 .brw = BRW_NEW_STIPPLE,
295 .cache = 0
296 },
297 .update = upload_polygon_stipple
298 };
299
300
301 /***********************************************************************
302 * Line stipple packet
303 */
304
305 static void upload_line_stipple(struct brw_context *brw)
306 {
307 struct brw_line_stipple bls;
308 float tmp;
309 int tmpi;
310
311 memset(&bls, 0, sizeof(bls));
312 bls.header.opcode = CMD_LINE_STIPPLE_PATTERN;
313 bls.header.length = sizeof(bls)/4 - 2;
314
315 bls.bits0.pattern = brw->attribs.Raster->line_stipple_pattern;
316 bls.bits1.repeat_count = brw->attribs.Raster->line_stipple_factor;
317
318 tmp = 1.0 / (float) brw->attribs.Raster->line_stipple_factor;
319 tmpi = tmp * (1<<13);
320
321
322 bls.bits1.inverse_repeat_count = tmpi;
323
324 BRW_CACHED_BATCH_STRUCT(brw, &bls);
325 }
326
327 const struct brw_tracked_state brw_line_stipple = {
328 .dirty = {
329 .brw = BRW_NEW_STIPPLE,
330 .cache = 0
331 },
332 .update = upload_line_stipple
333 };
334
335
336 /***********************************************************************
337 * Misc constant state packets
338 */
339
340 static void upload_pipe_control(struct brw_context *brw)
341 {
342 struct brw_pipe_control pc;
343
344 return;
345
346 memset(&pc, 0, sizeof(pc));
347
348 pc.header.opcode = CMD_PIPE_CONTROL;
349 pc.header.length = sizeof(pc)/4 - 2;
350 pc.header.post_sync_operation = PIPE_CONTROL_NOWRITE;
351
352 pc.header.instruction_state_cache_flush_enable = 1;
353
354 pc.bits1.dest_addr_type = PIPE_CONTROL_GTTWRITE_GLOBAL;
355
356 BRW_BATCH_STRUCT(brw, &pc);
357 }
358
359 const struct brw_tracked_state brw_pipe_control = {
360 .dirty = {
361 .brw = BRW_NEW_SCENE,
362 .cache = 0
363 },
364 .update = upload_pipe_control
365 };
366
367
368 /***********************************************************************
369 * Misc invarient state packets
370 */
371
372 static void upload_invarient_state( struct brw_context *brw )
373 {
374 {
375 struct brw_mi_flush flush;
376
377 memset(&flush, 0, sizeof(flush));
378 flush.opcode = CMD_MI_FLUSH;
379 flush.flags = BRW_FLUSH_STATE_CACHE | BRW_FLUSH_READ_CACHE;
380 BRW_BATCH_STRUCT(brw, &flush);
381 }
382
383 {
384 /* 0x61040000 Pipeline Select */
385 /* PipelineSelect : 0 */
386 struct brw_pipeline_select ps;
387
388 memset(&ps, 0, sizeof(ps));
389 ps.header.opcode = CMD_PIPELINE_SELECT;
390 ps.header.pipeline_select = 0;
391 BRW_BATCH_STRUCT(brw, &ps);
392 }
393
394 {
395 struct brw_global_depth_offset_clamp gdo;
396 memset(&gdo, 0, sizeof(gdo));
397
398 /* Disable depth offset clamping.
399 */
400 gdo.header.opcode = CMD_GLOBAL_DEPTH_OFFSET_CLAMP;
401 gdo.header.length = sizeof(gdo)/4 - 2;
402 gdo.depth_offset_clamp = 0.0;
403
404 BRW_BATCH_STRUCT(brw, &gdo);
405 }
406
407
408 /* 0x61020000 State Instruction Pointer */
409 {
410 struct brw_system_instruction_pointer sip;
411 memset(&sip, 0, sizeof(sip));
412
413 sip.header.opcode = CMD_STATE_INSN_POINTER;
414 sip.header.length = 0;
415 sip.bits0.pad = 0;
416 sip.bits0.system_instruction_pointer = 0;
417 BRW_BATCH_STRUCT(brw, &sip);
418 }
419
420
421 {
422 struct brw_vf_statistics vfs;
423 memset(&vfs, 0, sizeof(vfs));
424
425 vfs.opcode = CMD_VF_STATISTICS;
426 if (BRW_DEBUG & DEBUG_STATS)
427 vfs.statistics_enable = 1;
428
429 BRW_BATCH_STRUCT(brw, &vfs);
430 }
431
432
433 {
434 struct brw_polygon_stipple_offset bpso;
435
436 memset(&bpso, 0, sizeof(bpso));
437 bpso.header.opcode = CMD_POLY_STIPPLE_OFFSET;
438 bpso.header.length = sizeof(bpso)/4-2;
439 bpso.bits0.x_offset = 0;
440 bpso.bits0.y_offset = 0;
441
442 BRW_BATCH_STRUCT(brw, &bpso);
443 }
444 }
445
446 const struct brw_tracked_state brw_invarient_state = {
447 .dirty = {
448 .brw = BRW_NEW_SCENE,
449 .cache = 0
450 },
451 .update = upload_invarient_state
452 };
453
454 /**
455 * Define the base addresses which some state is referenced from.
456 *
457 * This allows us to avoid having to emit relocations in many places for
458 * cached state, and instead emit pointers inside of large, mostly-static
459 * state pools. This comes at the expense of memory, and more expensive cache
460 * misses.
461 */
462 static void upload_state_base_address( struct brw_context *brw )
463 {
464 /* Output the structure (brw_state_base_address) directly to the
465 * batchbuffer, so we can emit relocations inline.
466 */
467 BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
468 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
469 OUT_RELOC(brw->pool[BRW_GS_POOL].buffer,
470 PIPE_BUFFER_USAGE_GPU_READ,
471 1); /* General state base address */
472 OUT_RELOC(brw->pool[BRW_SS_POOL].buffer,
473 PIPE_BUFFER_USAGE_GPU_READ,
474 1); /* Surface state base address */
475 OUT_BATCH(1); /* Indirect object base address */
476 OUT_BATCH(1); /* General state upper bound */
477 OUT_BATCH(1); /* Indirect object upper bound */
478 ADVANCE_BATCH();
479 }
480
481
482 const struct brw_tracked_state brw_state_base_address = {
483 .dirty = {
484 .brw = BRW_NEW_SCENE,
485 .cache = 0
486 },
487 .update = upload_state_base_address
488 };