Merge branch 'master' of ssh://git.freedesktop.org/git/mesa/mesa into pipe-video
[mesa.git] / src / mesa / drivers / dri / i965 / brw_gs_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33 #include "main/glheader.h"
34 #include "main/macros.h"
35 #include "main/enums.h"
36
37 #include "program/program.h"
38 #include "intel_batchbuffer.h"
39
40 #include "brw_defines.h"
41 #include "brw_context.h"
42 #include "brw_eu.h"
43 #include "brw_gs.h"
44
45 static void brw_gs_alloc_regs( struct brw_gs_compile *c,
46 GLuint nr_verts )
47 {
48 GLuint i = 0,j;
49
50 /* Register usage is static, precompute here:
51 */
52 c->reg.R0 = retype(brw_vec8_grf(i, 0), BRW_REGISTER_TYPE_UD); i++;
53
54 /* Payload vertices plus space for more generated vertices:
55 */
56 for (j = 0; j < nr_verts; j++) {
57 c->reg.vertex[j] = brw_vec4_grf(i, 0);
58 i += c->nr_regs;
59 }
60
61 c->reg.temp = brw_vec8_grf(i, 0);
62
63 c->prog_data.urb_read_length = c->nr_regs;
64 c->prog_data.total_grf = i;
65 }
66
67
68 static void brw_gs_emit_vue(struct brw_gs_compile *c,
69 struct brw_reg vert,
70 GLboolean last,
71 GLuint header)
72 {
73 struct brw_compile *p = &c->func;
74 struct intel_context *intel = &c->func.brw->intel;
75 GLboolean allocate = !last;
76 struct brw_reg temp;
77
78 if (intel->gen < 6)
79 temp = c->reg.R0;
80 else {
81 temp = c->reg.temp;
82 brw_MOV(p, retype(temp, BRW_REGISTER_TYPE_UD),
83 retype(c->reg.R0, BRW_REGISTER_TYPE_UD));
84 }
85
86 /* Overwrite PrimType and PrimStart in the message header, for
87 * each vertex in turn:
88 */
89 brw_MOV(p, get_element_ud(temp, 2), brw_imm_ud(header));
90
91 /* Copy the vertex from vertn into m1..mN+1:
92 */
93 brw_copy8(p, brw_message_reg(1), vert, c->nr_regs);
94
95 /* Send each vertex as a seperate write to the urb. This is
96 * different to the concept in brw_sf_emit.c, where subsequent
97 * writes are used to build up a single urb entry. Each of these
98 * writes instantiates a seperate urb entry, and a new one must be
99 * allocated each time.
100 */
101 brw_urb_WRITE(p,
102 allocate ? temp : retype(brw_null_reg(), BRW_REGISTER_TYPE_UD),
103 0,
104 temp,
105 allocate,
106 1, /* used */
107 c->nr_regs + 1, /* msg length */
108 allocate ? 1 : 0, /* response length */
109 allocate ? 0 : 1, /* eot */
110 1, /* writes_complete */
111 0, /* urb offset */
112 BRW_URB_SWIZZLE_NONE);
113
114 if (intel->gen >= 6 && allocate)
115 brw_MOV(p, get_element_ud(c->reg.R0, 0), get_element_ud(temp, 0));
116 }
117
118 static void brw_gs_ff_sync(struct brw_gs_compile *c, int num_prim)
119 {
120 struct brw_compile *p = &c->func;
121 struct intel_context *intel = &c->func.brw->intel;
122
123 if (intel->gen < 6) {
124 brw_MOV(p, get_element_ud(c->reg.R0, 1), brw_imm_ud(num_prim));
125 brw_ff_sync(p,
126 c->reg.R0,
127 0,
128 c->reg.R0,
129 1, /* allocate */
130 1, /* response length */
131 0 /* eot */);
132 } else {
133 brw_MOV(p, retype(c->reg.temp, BRW_REGISTER_TYPE_UD),
134 retype(c->reg.R0, BRW_REGISTER_TYPE_UD));
135 brw_MOV(p, get_element_ud(c->reg.temp, 1), brw_imm_ud(num_prim));
136 brw_ff_sync(p,
137 c->reg.temp,
138 0,
139 c->reg.temp,
140 1, /* allocate */
141 1, /* response length */
142 0 /* eot */);
143 brw_MOV(p, get_element_ud(c->reg.R0, 0),
144 get_element_ud(c->reg.temp, 0));
145 }
146 }
147
148
149 void brw_gs_quads( struct brw_gs_compile *c, struct brw_gs_prog_key *key )
150 {
151 struct intel_context *intel = &c->func.brw->intel;
152
153 brw_gs_alloc_regs(c, 4);
154
155 /* Use polygons for correct edgeflag behaviour. Note that vertex 3
156 * is the PV for quads, but vertex 0 for polygons:
157 */
158 if (intel->needs_ff_sync)
159 brw_gs_ff_sync(c, 1);
160 if (key->pv_first) {
161 brw_gs_emit_vue(c, c->reg.vertex[0], 0, ((_3DPRIM_POLYGON << 2) | R02_PRIM_START));
162 brw_gs_emit_vue(c, c->reg.vertex[1], 0, (_3DPRIM_POLYGON << 2));
163 brw_gs_emit_vue(c, c->reg.vertex[2], 0, (_3DPRIM_POLYGON << 2));
164 brw_gs_emit_vue(c, c->reg.vertex[3], 1, ((_3DPRIM_POLYGON << 2) | R02_PRIM_END));
165 }
166 else {
167 brw_gs_emit_vue(c, c->reg.vertex[3], 0, ((_3DPRIM_POLYGON << 2) | R02_PRIM_START));
168 brw_gs_emit_vue(c, c->reg.vertex[0], 0, (_3DPRIM_POLYGON << 2));
169 brw_gs_emit_vue(c, c->reg.vertex[1], 0, (_3DPRIM_POLYGON << 2));
170 brw_gs_emit_vue(c, c->reg.vertex[2], 1, ((_3DPRIM_POLYGON << 2) | R02_PRIM_END));
171 }
172 }
173
174 void brw_gs_quad_strip( struct brw_gs_compile *c, struct brw_gs_prog_key *key )
175 {
176 struct intel_context *intel = &c->func.brw->intel;
177
178 brw_gs_alloc_regs(c, 4);
179
180 if (intel->needs_ff_sync)
181 brw_gs_ff_sync(c, 1);
182 if (key->pv_first) {
183 brw_gs_emit_vue(c, c->reg.vertex[0], 0, ((_3DPRIM_POLYGON << 2) | R02_PRIM_START));
184 brw_gs_emit_vue(c, c->reg.vertex[1], 0, (_3DPRIM_POLYGON << 2));
185 brw_gs_emit_vue(c, c->reg.vertex[2], 0, (_3DPRIM_POLYGON << 2));
186 brw_gs_emit_vue(c, c->reg.vertex[3], 1, ((_3DPRIM_POLYGON << 2) | R02_PRIM_END));
187 }
188 else {
189 brw_gs_emit_vue(c, c->reg.vertex[2], 0, ((_3DPRIM_POLYGON << 2) | R02_PRIM_START));
190 brw_gs_emit_vue(c, c->reg.vertex[3], 0, (_3DPRIM_POLYGON << 2));
191 brw_gs_emit_vue(c, c->reg.vertex[0], 0, (_3DPRIM_POLYGON << 2));
192 brw_gs_emit_vue(c, c->reg.vertex[1], 1, ((_3DPRIM_POLYGON << 2) | R02_PRIM_END));
193 }
194 }
195
196 void brw_gs_tris( struct brw_gs_compile *c )
197 {
198 struct intel_context *intel = &c->func.brw->intel;
199
200 brw_gs_alloc_regs(c, 3);
201
202 if (intel->needs_ff_sync)
203 brw_gs_ff_sync(c, 1);
204 brw_gs_emit_vue(c, c->reg.vertex[0], 0, ((_3DPRIM_TRILIST << 2) | R02_PRIM_START));
205 brw_gs_emit_vue(c, c->reg.vertex[1], 0, (_3DPRIM_TRILIST << 2));
206 brw_gs_emit_vue(c, c->reg.vertex[2], 1, ((_3DPRIM_TRILIST << 2) | R02_PRIM_END));
207 }
208
209 void brw_gs_lines( struct brw_gs_compile *c )
210 {
211 struct intel_context *intel = &c->func.brw->intel;
212
213 brw_gs_alloc_regs(c, 2);
214
215 if (intel->needs_ff_sync)
216 brw_gs_ff_sync(c, 1);
217 brw_gs_emit_vue(c, c->reg.vertex[0], 0, ((_3DPRIM_LINESTRIP << 2) | R02_PRIM_START));
218 brw_gs_emit_vue(c, c->reg.vertex[1], 1, ((_3DPRIM_LINESTRIP << 2) | R02_PRIM_END));
219 }
220
221 void brw_gs_points( struct brw_gs_compile *c )
222 {
223 struct intel_context *intel = &c->func.brw->intel;
224
225 brw_gs_alloc_regs(c, 1);
226
227 if (intel->needs_ff_sync)
228 brw_gs_ff_sync(c, 1);
229 brw_gs_emit_vue(c, c->reg.vertex[0], 1, ((_3DPRIM_POINTLIST << 2) | R02_PRIM_START | R02_PRIM_END));
230 }
231
232
233
234
235
236
237
238