2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
31 #include "vk_format_info.h"
33 #include "genxml/gen_macros.h"
34 #include "genxml/genX_pack.h"
36 #if GEN_GEN == 7 && !GEN_IS_HASWELL
38 clamp_int64(int64_t x
, int64_t min
, int64_t max
)
49 gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer
*cmd_buffer
)
51 uint32_t count
= cmd_buffer
->state
.gfx
.dynamic
.scissor
.count
;
52 const VkRect2D
*scissors
= cmd_buffer
->state
.gfx
.dynamic
.scissor
.scissors
;
53 struct anv_state scissor_state
=
54 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, count
* 8, 32);
56 for (uint32_t i
= 0; i
< count
; i
++) {
57 const VkRect2D
*s
= &scissors
[i
];
59 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
60 * ymax < ymin for empty clips. In case clip x, y, width height are all
61 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
62 * what we want. Just special case empty clips and produce a canonical
64 static const struct GEN7_SCISSOR_RECT empty_scissor
= {
65 .ScissorRectangleYMin
= 1,
66 .ScissorRectangleXMin
= 1,
67 .ScissorRectangleYMax
= 0,
68 .ScissorRectangleXMax
= 0
71 const int max
= 0xffff;
72 struct GEN7_SCISSOR_RECT scissor
= {
73 /* Do this math using int64_t so overflow gets clamped correctly. */
74 .ScissorRectangleYMin
= clamp_int64(s
->offset
.y
, 0, max
),
75 .ScissorRectangleXMin
= clamp_int64(s
->offset
.x
, 0, max
),
76 .ScissorRectangleYMax
= clamp_int64((uint64_t) s
->offset
.y
+ s
->extent
.height
- 1, 0, max
),
77 .ScissorRectangleXMax
= clamp_int64((uint64_t) s
->offset
.x
+ s
->extent
.width
- 1, 0, max
)
80 if (s
->extent
.width
<= 0 || s
->extent
.height
<= 0) {
81 GEN7_SCISSOR_RECT_pack(NULL
, scissor_state
.map
+ i
* 8,
84 GEN7_SCISSOR_RECT_pack(NULL
, scissor_state
.map
+ i
* 8, &scissor
);
88 anv_batch_emit(&cmd_buffer
->batch
,
89 GEN7_3DSTATE_SCISSOR_STATE_POINTERS
, ssp
) {
90 ssp
.ScissorRectPointer
= scissor_state
.offset
;
93 anv_state_flush(cmd_buffer
->device
, scissor_state
);
97 static const uint32_t vk_to_gen_index_type
[] = {
98 [VK_INDEX_TYPE_UINT16
] = INDEX_WORD
,
99 [VK_INDEX_TYPE_UINT32
] = INDEX_DWORD
,
102 static const uint32_t restart_index_for_type
[] = {
103 [VK_INDEX_TYPE_UINT16
] = UINT16_MAX
,
104 [VK_INDEX_TYPE_UINT32
] = UINT32_MAX
,
107 void genX(CmdBindIndexBuffer
)(
108 VkCommandBuffer commandBuffer
,
111 VkIndexType indexType
)
113 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
114 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
116 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_INDEX_BUFFER
;
118 cmd_buffer
->state
.restart_index
= restart_index_for_type
[indexType
];
119 cmd_buffer
->state
.gfx
.gen7
.index_buffer
= buffer
;
120 cmd_buffer
->state
.gfx
.gen7
.index_type
= vk_to_gen_index_type
[indexType
];
121 cmd_buffer
->state
.gfx
.gen7
.index_offset
= offset
;
125 get_depth_format(struct anv_cmd_buffer
*cmd_buffer
)
127 const struct anv_render_pass
*pass
= cmd_buffer
->state
.pass
;
128 const struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
130 if (subpass
->depth_stencil_attachment
.attachment
>= pass
->attachment_count
)
133 struct anv_render_pass_attachment
*att
=
134 &pass
->attachments
[subpass
->depth_stencil_attachment
.attachment
];
136 switch (att
->format
) {
137 case VK_FORMAT_D16_UNORM
:
138 case VK_FORMAT_D16_UNORM_S8_UINT
:
141 case VK_FORMAT_X8_D24_UNORM_PACK32
:
142 case VK_FORMAT_D24_UNORM_S8_UINT
:
143 return D24_UNORM_X8_UINT
;
145 case VK_FORMAT_D32_SFLOAT
:
146 case VK_FORMAT_D32_SFLOAT_S8_UINT
:
155 genX(cmd_buffer_flush_dynamic_state
)(struct anv_cmd_buffer
*cmd_buffer
)
157 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.gfx
.base
.pipeline
;
158 struct anv_dynamic_state
*d
= &cmd_buffer
->state
.gfx
.dynamic
;
160 if (cmd_buffer
->state
.gfx
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
161 ANV_CMD_DIRTY_RENDER_TARGETS
|
162 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
|
163 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
)) {
164 uint32_t sf_dw
[GENX(3DSTATE_SF_length
)];
165 struct GENX(3DSTATE_SF
) sf
= {
166 GENX(3DSTATE_SF_header
),
167 .DepthBufferSurfaceFormat
= get_depth_format(cmd_buffer
),
168 .LineWidth
= d
->line_width
,
169 .GlobalDepthOffsetConstant
= d
->depth_bias
.bias
,
170 .GlobalDepthOffsetScale
= d
->depth_bias
.slope
,
171 .GlobalDepthOffsetClamp
= d
->depth_bias
.clamp
173 GENX(3DSTATE_SF_pack
)(NULL
, sf_dw
, &sf
);
175 anv_batch_emit_merge(&cmd_buffer
->batch
, sf_dw
, pipeline
->gen7
.sf
);
178 if (cmd_buffer
->state
.gfx
.dirty
& (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
|
179 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
)) {
180 struct anv_state cc_state
=
181 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
182 GENX(COLOR_CALC_STATE_length
) * 4,
184 struct GENX(COLOR_CALC_STATE
) cc
= {
185 .BlendConstantColorRed
= d
->blend_constants
[0],
186 .BlendConstantColorGreen
= d
->blend_constants
[1],
187 .BlendConstantColorBlue
= d
->blend_constants
[2],
188 .BlendConstantColorAlpha
= d
->blend_constants
[3],
189 .StencilReferenceValue
= d
->stencil_reference
.front
& 0xff,
190 .BackfaceStencilReferenceValue
= d
->stencil_reference
.back
& 0xff,
192 GENX(COLOR_CALC_STATE_pack
)(NULL
, cc_state
.map
, &cc
);
193 anv_state_flush(cmd_buffer
->device
, cc_state
);
195 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_CC_STATE_POINTERS
), ccp
) {
196 ccp
.ColorCalcStatePointer
= cc_state
.offset
;
200 if (cmd_buffer
->state
.gfx
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
201 ANV_CMD_DIRTY_RENDER_TARGETS
|
202 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
|
203 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
)) {
204 uint32_t depth_stencil_dw
[GENX(DEPTH_STENCIL_STATE_length
)];
206 struct GENX(DEPTH_STENCIL_STATE
) depth_stencil
= {
207 .StencilTestMask
= d
->stencil_compare_mask
.front
& 0xff,
208 .StencilWriteMask
= d
->stencil_write_mask
.front
& 0xff,
210 .BackfaceStencilTestMask
= d
->stencil_compare_mask
.back
& 0xff,
211 .BackfaceStencilWriteMask
= d
->stencil_write_mask
.back
& 0xff,
213 .StencilBufferWriteEnable
=
214 (d
->stencil_write_mask
.front
|| d
->stencil_write_mask
.back
) &&
215 pipeline
->writes_stencil
,
217 GENX(DEPTH_STENCIL_STATE_pack
)(NULL
, depth_stencil_dw
, &depth_stencil
);
219 struct anv_state ds_state
=
220 anv_cmd_buffer_merge_dynamic(cmd_buffer
, depth_stencil_dw
,
221 pipeline
->gen7
.depth_stencil_state
,
222 GENX(DEPTH_STENCIL_STATE_length
), 64);
224 anv_batch_emit(&cmd_buffer
->batch
,
225 GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS
), dsp
) {
226 dsp
.PointertoDEPTH_STENCIL_STATE
= ds_state
.offset
;
230 if (cmd_buffer
->state
.gfx
.gen7
.index_buffer
&&
231 cmd_buffer
->state
.gfx
.dirty
& (ANV_CMD_DIRTY_PIPELINE
|
232 ANV_CMD_DIRTY_INDEX_BUFFER
)) {
233 struct anv_buffer
*buffer
= cmd_buffer
->state
.gfx
.gen7
.index_buffer
;
234 uint32_t offset
= cmd_buffer
->state
.gfx
.gen7
.index_offset
;
237 anv_batch_emit(&cmd_buffer
->batch
, GEN75_3DSTATE_VF
, vf
) {
238 vf
.IndexedDrawCutIndexEnable
= pipeline
->primitive_restart
;
239 vf
.CutIndex
= cmd_buffer
->state
.restart_index
;
243 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_INDEX_BUFFER
), ib
) {
245 ib
.CutIndexEnable
= pipeline
->primitive_restart
;
247 ib
.IndexFormat
= cmd_buffer
->state
.gfx
.gen7
.index_type
;
248 ib
.MemoryObjectControlState
= GENX(MOCS
);
250 ib
.BufferStartingAddress
= anv_address_add(buffer
->address
,
252 ib
.BufferEndingAddress
= anv_address_add(buffer
->address
,
257 cmd_buffer
->state
.gfx
.dirty
= 0;
261 genX(cmd_buffer_enable_pma_fix
)(struct anv_cmd_buffer
*cmd_buffer
,
264 /* The NP PMA fix doesn't exist on gen7 */
267 void genX(CmdSetEvent
)(
268 VkCommandBuffer commandBuffer
,
270 VkPipelineStageFlags stageMask
)
272 anv_finishme("Implement events on gen7");
275 void genX(CmdResetEvent
)(
276 VkCommandBuffer commandBuffer
,
278 VkPipelineStageFlags stageMask
)
280 anv_finishme("Implement events on gen7");
283 void genX(CmdWaitEvents
)(
284 VkCommandBuffer commandBuffer
,
286 const VkEvent
* pEvents
,
287 VkPipelineStageFlags srcStageMask
,
288 VkPipelineStageFlags destStageMask
,
289 uint32_t memoryBarrierCount
,
290 const VkMemoryBarrier
* pMemoryBarriers
,
291 uint32_t bufferMemoryBarrierCount
,
292 const VkBufferMemoryBarrier
* pBufferMemoryBarriers
,
293 uint32_t imageMemoryBarrierCount
,
294 const VkImageMemoryBarrier
* pImageMemoryBarriers
)
296 anv_finishme("Implement events on gen7");
298 genX(CmdPipelineBarrier
)(commandBuffer
, srcStageMask
, destStageMask
,
299 false, /* byRegion */
300 memoryBarrierCount
, pMemoryBarriers
,
301 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
302 imageMemoryBarrierCount
, pImageMemoryBarriers
);