}
 
 /**
- * Updates the state of the noop feature.
+ * Updates the state of the noop feature.  Returns true if there was a noop
+ * transition that led to state invalidation.
  */
-uint64_t
-iris_batch_prepare_noop(struct iris_batch *batch, bool noop_enable, uint64_t dirty_flags)
+bool
+iris_batch_prepare_noop(struct iris_batch *batch, bool noop_enable)
 {
    if (batch->noop_enabled == noop_enable)
       return 0;
    /* We only need to update the entire state if we transition from noop ->
     * not-noop.
     */
-   return !batch->noop_enabled ? dirty_flags : 0;
+   return !batch->noop_enabled;
 }
 
 
 bool iris_batch_references(struct iris_batch *batch, struct iris_bo *bo);
 
-uint64_t iris_batch_prepare_noop(struct iris_batch *batch,
-                                 bool noop_enable,
-                                 uint64_t dirty_flags);
+bool iris_batch_prepare_noop(struct iris_batch *batch, bool noop_enable);
 
 #define RELOC_WRITE EXEC_OBJECT_WRITE
 
 
 {
    struct iris_context *ice = (struct iris_context *) ctx;
 
-   ice->state.dirty |= iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_RENDER],
-                                               enable,
-                                               IRIS_ALL_DIRTY_FOR_RENDER);
-   ice->state.dirty |= iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_COMPUTE],
-                                               enable,
-                                               IRIS_ALL_DIRTY_FOR_COMPUTE);
+   if (iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_RENDER], enable))
+      ice->state.dirty |= IRIS_ALL_DIRTY_FOR_RENDER;
+
+   if (iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_COMPUTE], enable))
+      ice->state.dirty |= IRIS_ALL_DIRTY_FOR_COMPUTE;
 }
 
 void