@@ -511,7 +511,7 @@ def move_unets(unet_training_mask):
511511if next_task == 'eval' :
512512if exists (evaluate_config ):
513513accelerator .print (print_ribbon (f"Starting Evaluation{ epoch } " ,repeat = 40 ))
514- evaluation = evaluate_trainer (trainer ,dataloaders ["val" ],inference_device ,first_trainable_unet ,last_trainable_unet ,clip = clip ,inference_device = inference_device ,** evaluate_config .dict (),condition_on_text_encodings = condition_on_text_encodings ,cond_scale = cond_scale )
514+ evaluation = evaluate_trainer (trainer ,dataloaders ["val" ],inference_device ,first_trainable_unet ,last_trainable_unet ,clip = clip ,inference_device = inference_device ,** evaluate_config .model_dump (),condition_on_text_encodings = condition_on_text_encodings ,cond_scale = cond_scale )
515515if is_master :
516516tracker .log (evaluation ,step = step ())
517517next_task = 'sample'
@@ -548,7 +548,7 @@ def create_tracker(accelerator: Accelerator, config: TrainDecoderConfig, config_
548548accelerator .wait_for_everyone ()# If nodes arrive at this point at different times they might try to autoresume the current run which makes no sense and will cause errors
549549tracker :Tracker = tracker_config .create (config ,accelerator_config ,dummy_mode = dummy )
550550tracker .save_config (config_path ,config_name = 'decoder_config.json' )
551- tracker .add_save_metadata (state_dict_key = 'config' ,metadata = config .dict ())
551+ tracker .add_save_metadata (state_dict_key = 'config' ,metadata = config .model_dump ())
552552return tracker
553553
554554def initialize_training (config :TrainDecoderConfig ,config_path ):
@@ -585,7 +585,7 @@ def initialize_training(config: TrainDecoderConfig, config_path):
585585val_prop = config .data .splits .val ,
586586test_prop = config .data .splits .test ,
587587n_sample_images = config .train .n_sample_images ,
588- ** config .data .dict (),
588+ ** config .data .model_dump (),
589589rank = rank ,
590590seed = config .seed ,
591591 )