#include "brw_context.h"
#include "brw_state.h"
#include "brw_batchbuffer.h"
+#include "brw_debug.h"
-/* This is used to initialize brw->state.atoms[]. We could use this
- * list directly except for a single atom, brw_constant_buffer, which
- * has a .dirty value which changes according to the parameters of the
- * current fragment and vertex programs, and so cannot be a static
- * value.
- */
const struct brw_tracked_state *atoms[] =
{
- &brw_check_fallback,
-
-// &brw_wm_input_sizes,
+/* &brw_wm_input_sizes, */
&brw_vs_prog,
&brw_gs_prog,
&brw_clip_prog,
&brw_cc_unit,
&brw_vs_surfaces, /* must do before unit */
- &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
+ /*&brw_wm_constant_surface,*/ /* must do before wm surfaces/bind bo */
&brw_wm_surfaces, /* must do before samplers and unit */
&brw_wm_samplers,
&brw_blend_constant_color,
&brw_depthbuffer,
-
&brw_polygon_stipple,
- &brw_polygon_stipple_offset,
-
&brw_line_stipple,
- &brw_aa_line_parameters,
&brw_psp_urb_cbs,
&brw_index_buffer,
&brw_vertices,
- &brw_constant_buffer
+ &brw_curbe_buffer
};
/* Clear the last round of validated bos */
for (i = 0; i < brw->state.validated_bo_count; i++) {
- brw->sws->bo_unreference(brw->state.validated_bos[i]);
- brw->state.validated_bos[i] = NULL;
+ bo_reference(&brw->state.validated_bos[i], NULL);
}
brw->state.validated_bo_count = 0;
}
{
struct brw_state_flags *state = &brw->state.dirty;
GLuint i;
+ int ret;
brw_clear_validated_bos(brw);
- brw_add_validated_bo(brw, intel->batch->buf);
+ brw_add_validated_bo(brw, brw->batch->buf);
- if (brw->emit_state_always) {
+ if (brw->flags.always_emit_state) {
state->mesa |= ~0;
state->brw |= ~0;
state->cache |= ~0;
* If this fails, we can experience GPU lock-ups.
*/
{
- const struct brw_fragment_program *fp = brw->fragment_program;
+ const struct brw_fragment_shader *fp = brw->curr.fragment_shader;
if (fp) {
- assert(fp->info.max_sampler <= brw->nr_samplers &&
- fp->info.max_texture <= brw->nr_textures);
+ assert(fp->info.file_max[TGSI_FILE_SAMPLER] < (int)brw->curr.num_samplers);
+ /*assert(fp->info.texture_max <= brw->curr.num_textures);*/
}
}
enum pipe_error brw_upload_state(struct brw_context *brw)
{
struct brw_state_flags *state = &brw->state.dirty;
+ int ret;
int i;
- static int dirty_count = 0;
brw_clear_validated_bos(brw);
- if (INTEL_DEBUG) {
+ if (BRW_DEBUG) {
/* Debug version which enforces various sanity checks on the
* state flags which are generated and checked to help ensure
* state atoms are ordered correctly in the list.
*/
struct brw_state_flags examined, prev;
- _mesa_memset(&examined, 0, sizeof(examined));
+ memset(&examined, 0, sizeof(examined));
prev = *state;
for (i = 0; i < Elements(atoms); i++) {
}
}
- if (INTEL_DEBUG & DEBUG_STATE) {
- brw_update_dirty_count(mesa_bits, state->mesa);
- brw_update_dirty_count(brw_bits, state->brw);
- brw_update_dirty_count(cache_bits, state->cache);
- if (dirty_count++ % 1000 == 0) {
- brw_print_dirty_count(mesa_bits, state->mesa);
- brw_print_dirty_count(brw_bits, state->brw);
- brw_print_dirty_count(cache_bits, state->cache);
- debug_printf("\n");
- }
+ if (BRW_DEBUG & DEBUG_STATE) {
+ brw_update_dirty_counts( state->mesa,
+ state->brw,
+ state->cache );
}
/* Clear dirty flags:
*/
memset(state, 0, sizeof(*state));
+ return 0;
}