2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
26 * The main driver hooks for drawing and launching compute shaders.
31 #include "pipe/p_defines.h"
32 #include "pipe/p_state.h"
33 #include "pipe/p_context.h"
34 #include "pipe/p_screen.h"
35 #include "util/u_inlines.h"
36 #include "util/u_transfer.h"
37 #include "util/u_upload_mgr.h"
38 #include "intel/compiler/brw_compiler.h"
39 #include "iris_context.h"
40 #include "iris_defines.h"
43 * Record the current primitive mode and restart information, flagging
44 * related packets as dirty if necessary.
47 iris_update_draw_info(struct iris_context
*ice
,
48 const struct pipe_draw_info
*info
)
50 if (ice
->state
.prim_mode
!= info
->mode
) {
51 ice
->state
.prim_mode
= info
->mode
;
52 ice
->state
.dirty
|= IRIS_DIRTY_VF_TOPOLOGY
;
55 if (info
->mode
== PIPE_PRIM_PATCHES
&&
56 ice
->state
.vertices_per_patch
!= info
->vertices_per_patch
) {
57 ice
->state
.vertices_per_patch
= info
->vertices_per_patch
;
58 ice
->state
.dirty
|= IRIS_DIRTY_VF_TOPOLOGY
;
60 /* Flag constants dirty for gl_PatchVerticesIn if needed. */
61 const struct shader_info
*tcs_info
=
62 iris_get_shader_info(ice
, MESA_SHADER_TESS_CTRL
);
64 tcs_info
->system_values_read
& (1ull << SYSTEM_VALUE_VERTICES_IN
)) {
65 ice
->state
.dirty
|= IRIS_DIRTY_CONSTANTS_TCS
;
66 ice
->state
.shaders
[MESA_SHADER_TESS_CTRL
].cbuf0_needs_upload
= true;
70 if (ice
->state
.primitive_restart
!= info
->primitive_restart
||
71 ice
->state
.cut_index
!= info
->restart_index
) {
72 ice
->state
.dirty
|= IRIS_DIRTY_VF
;
73 ice
->state
.primitive_restart
= info
->primitive_restart
;
74 ice
->state
.cut_index
= info
->restart_index
;
79 * The pipe->draw_vbo() driver hook. Performs a draw on the GPU.
82 iris_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*info
)
84 struct iris_context
*ice
= (struct iris_context
*) ctx
;
85 struct iris_batch
*batch
= &ice
->batches
[IRIS_BATCH_RENDER
];
87 if (ice
->state
.predicate
== IRIS_PREDICATE_STATE_DONT_RENDER
)
90 /* We can't safely re-emit 3DSTATE_SO_BUFFERS because it may zero the
91 * write offsets, changing the behavior.
93 if (unlikely(INTEL_DEBUG
& DEBUG_REEMIT
))
94 ice
->state
.dirty
|= IRIS_ALL_DIRTY_FOR_RENDER
& ~IRIS_DIRTY_SO_BUFFERS
;
96 iris_batch_maybe_flush(batch
, 1500);
98 iris_update_draw_info(ice
, info
);
100 iris_update_compiled_shaders(ice
);
102 bool draw_aux_buffer_disabled
[BRW_MAX_DRAW_BUFFERS
] = { };
103 for (gl_shader_stage stage
= 0; stage
< MESA_SHADER_COMPUTE
; stage
++) {
104 if (ice
->shaders
.prog
[stage
])
105 iris_predraw_resolve_inputs(ice
,batch
, &ice
->state
.shaders
[stage
],
106 draw_aux_buffer_disabled
, true);
108 iris_predraw_resolve_framebuffer(ice
, batch
, draw_aux_buffer_disabled
);
110 iris_binder_reserve_3d(ice
);
112 ice
->vtbl
.update_surface_base_address(batch
, &ice
->state
.binder
);
113 ice
->vtbl
.upload_render_state(ice
, batch
, info
);
115 ice
->state
.dirty
&= ~IRIS_ALL_DIRTY_FOR_RENDER
;
117 iris_postdraw_update_resolve_tracking(ice
, batch
);
121 iris_update_grid_size_resource(struct iris_context
*ice
,
122 const struct pipe_grid_info
*grid
)
124 const struct iris_screen
*screen
= (void *) ice
->ctx
.screen
;
125 const struct isl_device
*isl_dev
= &screen
->isl_dev
;
126 struct iris_state_ref
*grid_ref
= &ice
->state
.grid_size
;
127 struct iris_state_ref
*state_ref
= &ice
->state
.grid_surf_state
;
129 // XXX: if the shader doesn't actually care about the grid info,
130 // don't bother uploading the surface?
132 if (grid
->indirect
) {
133 pipe_resource_reference(&grid_ref
->res
, grid
->indirect
);
134 grid_ref
->offset
= grid
->indirect_offset
;
136 /* Zero out the grid size so that the next non-indirect grid launch will
137 * re-upload it properly.
139 memset(ice
->state
.last_grid
, 0, sizeof(ice
->state
.last_grid
));
141 /* If the size is the same, we don't need to upload anything. */
142 if (memcmp(ice
->state
.last_grid
, grid
->grid
, sizeof(grid
->grid
)) == 0)
145 memcpy(ice
->state
.last_grid
, grid
->grid
, sizeof(grid
->grid
));
147 u_upload_data(ice
->state
.dynamic_uploader
, 0, sizeof(grid
->grid
), 4,
148 grid
->grid
, &grid_ref
->offset
, &grid_ref
->res
);
151 void *surf_map
= NULL
;
152 u_upload_alloc(ice
->state
.surface_uploader
, 0, isl_dev
->ss
.size
,
153 isl_dev
->ss
.align
, &state_ref
->offset
, &state_ref
->res
,
156 iris_bo_offset_from_base_address(iris_resource_bo(state_ref
->res
));
157 isl_buffer_fill_state(&screen
->isl_dev
, surf_map
,
158 .address
= grid_ref
->offset
+
159 iris_resource_bo(grid_ref
->res
)->gtt_offset
,
160 .size_B
= sizeof(grid
->grid
),
161 .format
= ISL_FORMAT_RAW
,
163 .mocs
= 4); // XXX: MOCS
165 ice
->state
.dirty
|= IRIS_DIRTY_BINDINGS_CS
;
169 iris_launch_grid(struct pipe_context
*ctx
, const struct pipe_grid_info
*grid
)
171 struct iris_context
*ice
= (struct iris_context
*) ctx
;
172 struct iris_batch
*batch
= &ice
->batches
[IRIS_BATCH_COMPUTE
];
174 if (ice
->state
.predicate
== IRIS_PREDICATE_STATE_DONT_RENDER
)
177 if (unlikely(INTEL_DEBUG
& DEBUG_REEMIT
))
178 ice
->state
.dirty
|= IRIS_ALL_DIRTY_FOR_COMPUTE
;
180 /* We can't do resolves on the compute engine, so awkwardly, we have to
181 * do them on the render batch...
183 for (gl_shader_stage stage
= 0; stage
< MESA_SHADER_STAGES
; stage
++) {
184 iris_predraw_resolve_inputs(ice
, &ice
->batches
[IRIS_BATCH_RENDER
],
185 &ice
->state
.shaders
[stage
], NULL
, false);
188 iris_batch_maybe_flush(batch
, 1500);
190 //if (dirty & IRIS_DIRTY_UNCOMPILED_CS)
191 iris_update_compiled_compute_shader(ice
);
193 iris_update_grid_size_resource(ice
, grid
);
195 iris_binder_reserve_compute(ice
);
196 ice
->vtbl
.update_surface_base_address(batch
, &ice
->state
.binder
);
198 if (ice
->state
.compute_predicate
) {
199 ice
->vtbl
.load_register_mem64(batch
, MI_PREDICATE_DATA
,
200 ice
->state
.compute_predicate
, 0);
201 ice
->state
.compute_predicate
= NULL
;
204 ice
->vtbl
.upload_compute_state(ice
, batch
, grid
);
206 ice
->state
.dirty
&= ~IRIS_ALL_DIRTY_FOR_COMPUTE
;
208 /* Note: since compute shaders can't access the framebuffer, there's
209 * no need to call iris_postdraw_update_resolve_tracking.