Updated formatting and comments. No modification to configuration values.

This commit is contained in:
Michael Luciuk 2025-07-07 10:23:42 -04:00
parent c12ba88b78
commit c9e996bac8

View File

@ -1,20 +1,16 @@
general:
# Run mode. Options are 'prod' or 'dev'.
run_mode: prod
dataset:
#number of slices you want to split each recording into
num_slices: 8
# Seed for the random number generator, used for signal generation
seed: 42
#training/val split between the 2 data sets
train_split: 0.8
val_split : 0.2
# Number of samples per recording
recording_length: 1024
#used to initialize a random number generator.
seed: 25
#multiple modulations to contain in the dataset
modulation_types: [bpsk, qpsk, qam16, qam64]
# List of signal modulation schemes to include in the dataset
modulation_types:
- bpsk
- qpsk
- qam16
- qam64
# Rolloff factor for pulse shaping filter (0 < beta <= 1)
beta: 0.3
@ -23,20 +19,18 @@ dataset:
sps: 4
# SNR sweep range: start, stop (exclusive), and step (in dB)
snr_start: -6 # Start value of SNR sweep (in dB)
snr_stop: 13 # Stop value (exclusive) of SNR sweep (in dB)
snr_step: 3 # Step size for SNR sweep (in dB)
snr_start: -6
snr_stop: 13
snr_step: 3
# Number of iterations (samples) per modulation and SNR combination
# Number of iterations (signal recordings) per modulation and SNR combination
num_iterations: 3
# Number of samples per generated recording
recording_length: 1024
# Settings for each modulation scheme
# Keys must match entries in `modulation_types`
# - `num_bits_per_symbol`: how many bits each symbol encodes (e.g., 1 for BPSK, 4 for 16-QAM)
# - `constellation_type`: type of modulation (e.g., "psk", "qam", "fsk", "ofdm")
# Modulation scheme settings; keys must match the `modulation_types` list above
# Each entry includes:
# - num_bits_per_symbol: bits encoded per symbol (e.g., 1 for BPSK, 4 for 16-QAM)
# - constellation_type: modulation category (e.g., "psk", "qam", "fsk", "ofdm")
# TODO: Combine entries for 'modulation_types' and 'modulation_settings'
modulation_settings:
bpsk:
num_bits_per_symbol: 1
@ -51,20 +45,25 @@ dataset:
num_bits_per_symbol: 6
constellation_type: qam
# Number of slices to cut from each recording
num_slices: 8
# Training and validation split ratios; must sum to 1
train_split: 0.8
val_split : 0.2
training:
# Number of training samples processed together before the model updates its weights
# Number of training examples processed together before the model updates its weights
batch_size: 256
# Number of complete passes through the training dataset during training
epochs: 5
# Learning rate: how much weights are updated after every batch
# Suggested range for fine-tuning: 1e-6 to 1e-4
# Learning rate: step size for weight updates after each batch
# Recommended range for fine-tuning: 1e-6 to 1e-4
learning_rate: 1e-4
# Whether to use GPU acceleration for training (if available)
# Enable GPU acceleration for training if available
use_gpu: true
# Dropout rate for individual neurons/layers (probability of dropping out a unit)
@ -73,13 +72,12 @@ training:
# Drop path rate: probability of dropping entire residual paths (stochastic depth)
drop_path_rate: 0.2
# Weight decay (L2 regularization) to help prevent overfitting
# Weight decay (L2 regularization) coefficient to help prevent overfitting
wd: 0.01
app:
# Optimization style for ORT conversion. Options: 'Fixed', 'None'
optimization_style: Fixed
# Optimization style for ORT conversion; options: 'Fixed', 'None'
optimization_style: "Fixed"
# Target platform architecture. Common options: 'amd64', 'arm64'
target_platform: amd64
# Target platform architecture; common options: 'amd64', 'arm64'
target_platform: "amd64"