diff --git a/doc/config_files/config_simulation_plume.yaml b/doc/config_files/config_simulation_plume.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6f091711a2b7fe7b4bf9a249d79356143669c137
--- /dev/null
+++ b/doc/config_files/config_simulation_plume.yaml
@@ -0,0 +1,74 @@
+####################
+# SIMULATION #
+####################
+simClass: Plume
+GPU: True
+sim_method: convnet # Choose between convnet and CG (as reference)
+
+#Field saving options
+save_field: True
+save_field_x_ite: 10
+save_post_x_ite: 10
+
+#Plot options
+plot_field: True
+plot_x_ite: 50
+
+#Post-computations options
+post_computations: False
+
+out_dir: './output/dir/'
+
+####################
+# PHYSICAL FORCES #
+####################
+Richardson: 0.1
+gravity: 1.0
+gravity_x: 0
+gravity_y: 1
+
+####################
+# DISCRETIZATION #
+####################
+Nx: 128 #[] number of control volumes in x direction
+Ny: 128 #[] number of control volumes in y direction
+Nt: 1000 #[] number of time steps to simulate
+
+# CFL
+CFL: 0.2
+
+
+####################
+# SOLVER IA #
+####################
+ite_transition: 0
+network_params:
+ load_path: '/path/to/neurasim/trained_networks/lt_nograd_4_16/Unet_lt_nograd_4_16/'
+ model_name: 'Unet_lt_nograd_4_16'
+ new_train: 'new'
+
+####################
+# NORMALIZATION #
+####################
+normalization:
+ normalize: True
+ scale_factor: 10.0
+ debug_folder: './results/debug/'
+
+####################
+# GEOMETRY #
+####################
+#Domain
+Lx: 128
+Ly: 128
+
+#BC
+BC_domain_x: OPEN
+BC_domain_y: STICKY
+
+#Cilinder
+cylinder: False
+D: 10
+yD: 150
+input_rad: 0.145
+input_vel: 1.0
\ No newline at end of file
diff --git a/doc/config_files/config_simulation_plume_cyl.yaml b/doc/config_files/config_simulation_plume_cyl.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f10fe35f1a0033739663902613f7305e20811ed4
--- /dev/null
+++ b/doc/config_files/config_simulation_plume_cyl.yaml
@@ -0,0 +1,72 @@
+####################
+# SIMULATION #
+####################
+simClass: Plume
+GPU: True
+sim_method: convnet # Choose between convnet and CG (as reference)
+
+#Field saving options
+save_field: True
+save_field_x_ite: 10
+save_post_x_ite: 10
+
+#Plot options
+plot_field: True
+plot_x_ite: 50
+
+#Post-computations options
+post_computations: False
+
+out_dir: './output/dir/'
+
+####################
+# PHYSICAL FORCES #
+####################
+Richardson: 0.1
+gravity: 1.0
+gravity_x: 0
+gravity_y: 1
+
+####################
+# DISCRETIZATION #
+####################
+Nx: 128 #[] number of control volumes in x direction
+Ny: 128 #[] number of control volumes in y direction
+Nt: 1000 #[] number of time steps to simulate
+# CFL
+CFL: 0.2
+
+####################
+# SOLVER IA #
+####################
+ite_transition: 0
+network_params:
+ load_path: '/path/to/neurasim/trained_networks/lt_nograd_4_16/Unet_lt_nograd_4_16/'
+ model_name: 'Unet_lt_nograd_4_16'
+ new_train: 'new' # Option to read networks trained with older versions, not to be modified in this scope
+
+####################
+# NORMALIZATION #
+####################
+normalization:
+ normalize: True
+ scale_factor: 10.0
+ debug_folder: './results/debug/'
+
+####################
+# GEOMETRY #
+####################
+#Domain
+Lx: 128
+Ly: 128
+
+#BC
+BC_domain_x: OPEN
+BC_domain_y: STICKY
+
+#Cilinder
+cylinder: True
+D: 20
+yD: 80
+input_rad: 0.145
+input_vel: 1.0
\ No newline at end of file
diff --git a/doc/config_files/config_simulation_vk.yaml b/doc/config_files/config_simulation_vk.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..22b2c33b230a03491820caf057d7403ddb0ccf64
--- /dev/null
+++ b/doc/config_files/config_simulation_vk.yaml
@@ -0,0 +1,70 @@
+
+####################
+# SIMULATION #
+####################
+simClass: VonKarman_rotative
+GPU: True
+sim_method: convnet # Choose between convnet and CG (as reference)
+
+#Field saving options
+save_field: True
+save_field_x_ite: 50
+save_post_x_ite: 50
+
+#Plot options
+plot_field: True
+plot_x_ite: 50
+
+#Post-computations options
+post_computations: True
+
+out_dir: './output/dir/'
+
+####################
+# PHYSICAL FORCES #
+####################
+Reynolds: 100.0
+Alpha: 0.0 # Rotating dimensionless parameter!
+
+####################
+# DISCRETIZATION #
+####################
+Nx: 896 #[] number of control volumes in x direction
+Ny: 608 #[] number of control volumes in y direction
+Nt: 10000 #[] number of time steps to simulate
+
+# CFL
+CFL: 0.2
+
+
+####################
+# SOLVER IA #
+####################
+ite_transition: 0
+network_params:
+ load_path: '/path/to/neurasim/trained_networks/lt_nograd_4_16/Unet_lt_nograd_4_16/'
+ model_name: 'Unet_lt_nograd_4_16'
+ new_train: 'new' # Option to read networks trained with older versions, not to be modified in this scope
+
+####################
+# NORMALIZATION #
+####################
+normalization:
+ normalize: True
+ scale_factor: 10.0
+ debug_folder: './results/debug/'
+
+####################
+# GEOMETRY #
+####################
+#Domain
+Lx: 300
+Ly: 200
+
+#BC
+BC_domain_x: OPEN
+BC_domain_y: STICKY
+
+#Cilinder
+D: 10
+xD: 100
diff --git a/doc/config_files/config_train.yaml b/doc/config_files/config_train.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b3c31c6410244067de7c46de6c1cf4417fee26ac
--- /dev/null
+++ b/doc/config_files/config_train.yaml
@@ -0,0 +1,200 @@
+# Configuration file with default parameters.
+# Some can be modified through the command line. See help function for training
+# script and README.md for more info.
+# This table is saved to disk (as pytorch objects) on every epoch
+# so that simulations can be paused and restarted.
+#=========================================
+# MODEL
+#=========================================
+
+#=========================================
+# DATA
+#=========================================
+# dataDir : Dataset location
+dataDir: "/absolute/path/to/data/datasets/"
+# dataset : Dataset name. Folder inside dataDir with training and testing scenes
+dataset: "dataset_name"
+# numWorkers : number of parallel workers for dataloader. Set to 0 to allow PyTorch
+# to automatically manage loading.
+numWorkers: 3
+# If true, dataset is preprocessed and programs exists.
+# Preprocessing is automatic if no previous preproc is detected on current dataset.
+preprocOriginalFluidNetDataOnly: false
+# shuffleTraining : Shuffles dataset
+shuffleTraining: true
+
+
+#=========================================
+# OUTPUT
+#=========================================
+# modelDir : Output folder for trained model and loss log.
+modelDir: "/absolute/path/to/save/your/model/modelname"
+# modelFilename : Trained model name
+modelFilename: "convModel"
+
+#=========================================
+# TRAINING MONITORING
+#=========================================
+
+# freqToFile : Epoch frequency for loss output to file/image saving.
+freqToFile: 25
+# printTraining : Debug options for training.
+# Prints or shows validation dataset and compares net
+# output to GT.
+# Options: save (save figures), show (shows in windows), none
+printTraining: "save"
+
+#=========================================
+# TRAINING PARAMETER
+#=========================================
+batchSize: 64
+# maxEpochs : Maximum number of epochs
+maxEpochs: 1000
+# resume : resume training from checkpoint.
+resumeTraining: false
+modelParam:
+ # model : options ('FluidNet', 'ScaleNet')
+ # -FluidNet : uses the architecture found in lib/model.py (based on FluidNet)
+ # -ScaleNet : uses a multiscale architecture found in lib/multi_scale_net.py
+ model: "ScaleNet"
+
+ # inputChannels : Network inputs. At least one of them must be set to true!
+ inputChannels:
+ div: true
+ pDiv: false
+ UDiv: false
+ # lr : learning rate. If using scientific notation, necessary to precise type
+ # for yaml->python cast.
+ lr: !!python/float 5e-5
+ # fooLambda : Weighting for each loss. Set to 0 to disable loss.
+ # MSE of pressure
+ pL2Lambda: 0
+ # MSE of divergence (Ground truth is zero divergence)
+ divL2Lambda: 1
+ # Absolute difference of pressure
+ pL1Lambda: 0
+ # Absolute difference of divergence
+ divL1Lambda: 0
+ # MSE of long term divergence
+ # If > 0, implements the Long Term divergence concept from FluidNet
+ divLongTermLambda: 5
+ # Differentiable long term loss, or ordinary lt (data augmentation)
+ ltGrad: false
+ # longTermDivNumSteps : We want to measure what the divergence is after
+ # a set number of steps for each training and test sample. Set table
+ # to nil to disable, (or set longTermDivLambda to 0).
+ longTermDivNumSteps:
+ - 2
+ - 4
+ # longTermDivProbability is the probability that longTermDivNumSteps[0]
+ # will be taken, otherwise longTermDivNumSteps[1] will be taken with
+ # probability of 1 - longTermDivProbability.
+ longTermDivProbability: 0.9
+ # normalizeInput : if true, normalizes input by max(std(chan), threshold)
+ normalizeInput: true
+ # normalizeInputChan : which channel to calculate std
+ normalizeInputChan: "UDiv"
+ # normalizeInputThreshold : don't normalize input noise
+ normalizeInputThreshold: 0.00001
+ # normalizing scale factor
+ scale_factor: 10
+ # Dictionary for normalization
+ normalization:
+ normalize: True
+ scale_factor: 10.0
+ debug_folder: "/absolute/path/for/debugging"
+
+ #=========================================
+ # PHYSICAL PARAMETERS
+ #=========================================
+ # Time step: default simulation timestep.
+ dt: 0.1
+ # Resolution of domain (it must match the data coming from the data loader!)
+ nnx: 128
+ nny: 128
+ # ONLY APPLIED IF LONG TERM DIV IS ACTIVATED
+ # ----------------------------------
+ # buoyancyScale : Buoyancy forces scale
+ # gravityScale : Gravity forces scale
+ # Note: Manta and FluidNet divide gravity forces into "gravity" and "buoyancy"
+ # They represent the two terms arising from Boussinesq approximation
+ # rho*g = rho_0*g + delta_rho*g
+ # (1) (2)
+ # rho_0 being the average density and delta_rho local difference of density
+ # w.r.t average density.
+ # Mantaflow calls (1) gravity and (2) buoyancy and allows for different g's
+ buoyancyScale: 0
+ gravityScale: 0
+ # Gravity vector: Direction of gravity Vector
+ gravityVec:
+ x: 0
+ y: 0
+ z: 0
+ # training buoyancy scale : This is the buoyancy to use when adding buoyancy
+ # to the long term training. It will be applied in a random cardinal direction.
+ trainBuoyancyScale: 0. #2.0
+ # training buoyancy probability : This is the probability to add buoyancy when
+ # long term training.
+ trainBuoyancyProb: 0. #0.3
+ # training gravity scale : This is the gravity to use when adding gravity
+ # to the long term training. It will be applied in a random cardinal direction.
+ trainGravityScale: 2.0
+ # training gravity probability : This is the probability to add buoyancy when
+ # long term training.
+ trainGravityProb: 0.3
+ # ------------------------------------
+ # Introduces a correcting factor in the denisty equation
+ # from "A splitting method for incompressible flows with variable
+ # density based on a pressure Poisson equation" (Guermond, Salgado).
+ # Not really tested... Recommendation is to leave it as false.
+ correctScalar: false
+ # operatingDensity : When applying buoyancy, buoyancyScale is multiplied
+ # by (density(i,j) - operatingDensity)
+ operatingDensity: 0.0
+ # viscosity : introduces a viscous term in moment equation.
+ # Algortihm taken from the book "Fluid Simulation for Computer Graphics" by
+ # Bridson
+ viscosity: 0
+ # timeScaleSigma : Amplitude of time scale perturb during training.
+ timeScaleSigma: 1
+ # maccormackStrength : used in semi-lagrangian MacCormack advection
+ # when LT div is activated. 0.6 is a good value. If ~1, can lead to
+ # high frequency artifacts.
+ maccormackStrength: 0.6
+ # sampleOutsideFluid : if true, allows particles in advection to 'land' inside
+ # obstacles. In general, we don't want that, so leave it as false to avoid
+ # possible artifacts.
+ sampleOutsideFluid: false
+
+ #=========================================
+ # SIMULATION PARAMETERS
+ #=========================================
+ sim_phi:
+ # GPU utilization
+ GPU: True
+ # Domain Discretization
+ Nx: 128 #[] number of control volumes in x direction
+ Ny: 128 #[] number of control volumes in y direction
+ Nt: 1 #[] number of time steps to simulate
+ #Domain
+ Lx: 128 #[m] or in mm in consistently changed
+ Ly: 128 #[m]
+ # CFL
+ CFL: 0.2
+ # time
+ dt: 1
+ # Choose between network and CG, set to NN for the training
+ sim_method: 'convnet' # CG or convnet
+ # Network normalization and debugging
+ normalization:
+ normalize: True
+ scale_factor: 10.0
+ debug_folder: "/absolute/path/for/debugging"
+ # Network to load for lt simulations (matches the network been trained)
+ network_params:
+ load_path: "/absolute/path/where/model/is/saved"
+ model_name: 'modelname'
+ new_train: 'new' # Legacy option to be deleted
+ # For debugging purposes
+ in_dir: './'
+ out_dir: './'