added app.yaml configs

This commit is contained in:
Liyu Xiao 2025-06-16 13:43:59 -04:00
parent f967009dbf
commit 06bd4d4001
9 changed files with 134 additions and 46 deletions

View File

@ -82,13 +82,13 @@ jobs:
- name: 4. Convert to ONNX file
run: |
mkdir -p onnx_files
MKL_DISABLE_FAST_MM=1 PYTHONPATH=. python onnx_scripts/convert_to_onnx.py
MKL_DISABLE_FAST_MM=1 PYTHONPATH=. python scripts/onnx/convert_to_onnx.py
echo "building inference app"
- name: Upload ONNX file
uses: actions/upload-artifact@v3
with:
name: ria-demo-onnx
name: onnx-file
path: onnx_files/inference_recognition_model.onnx
- name: List checkpoint directory
@ -97,7 +97,7 @@ jobs:
- name: 5. Profile ONNX model
run: |
PYTHONPATH=. python onnx_scripts/profile_onnx.py
PYTHONPATH=. python scrips/onnx/profile_onnx.py
- name: Upload JSON profiling data
uses: actions/upload-artifact@v3
@ -107,16 +107,12 @@ jobs:
- name: 6. Convert to ORT file
run: |
python -m onnxruntime.tools.convert_onnx_models_to_ort \
/workspace/qoherent/modrec-workflow/onnx_files/inference_recognition_model.onnx \
--output_dir ort_files \
--optimization_style Fixed \
--target_platform amd64
python -m scripts/ort/convert_to_ort.py
- name: Upload ORT file
uses: actions/upload-artifact@v3
with:
name: ria-demo-ort
name: ort-file
path: ort_files/inference_recognition_model.ort

View File

@ -16,25 +16,52 @@ dataset:
#multiple modulations to contain in the dataset
modulation_types: [bpsk, qpsk, qam16, qam64]
# Rolloff factor for pulse shaping filter (0 < beta <= 1)
beta: 0.3
# Samples per symbol (determines bandwidth of the digital signal)
sps: 4
# SNR sweep range: start, stop (exclusive), and step (in dB)
snr_start: -6 # Start value of SNR sweep (in dB)
snr_stop: 13 # Stop value (exclusive) of SNR sweep (in dB)
snr_step: 3 # Step size for SNR sweep (in dB)
# Number of iterations (samples) per modulation and SNR combination
num_iterations: 3
# Number of samples per generated recording
recording_length: 1024
training:
#number of training samples being processed together before model updates its weights
# Number of training samples processed together before the model updates its weights
batch_size: 256
#number of passes through the data set during the training process
# Number of complete passes through the training dataset during training
epochs: 5
#how much the weights update during training after every batch
#suggested range for fine-tuning: (1e-6, 1e-4)
# Learning rate: how much weights are updated after every batch
# Suggested range for fine-tuning: 1e-6 to 1e-4
learning_rate: 1e-4
# Whether to use GPU acceleration for training (if available)
use_gpu: true
inference:
#num classes to classify on
num_classes: 4
# Dropout rate for individual neurons/layers (probability of dropping out a unit)
drop_rate: 0.5
# Drop path rate: probability of dropping entire residual paths (stochastic depth)
drop_path_rate: 0.2
# Weight decay (L2 regularization) to help prevent overfitting
wd: 0.01
app:
build_dir: dist
# Optimization style for ORT conversion. Options: 'Fixed', 'None'
optimization_style: Fixed
# Target platform architecture. Common options: 'amd64', 'arm64'
target_platform: amd64

View File

@ -17,6 +17,13 @@ class DataSetConfig:
seed: int
modulation_types: list
val_split: float
beta: float
sps: int
snr_start: int
snr_end: int
snr_step: int
num_iterations: int
recording_length: int
@dataclass
@ -25,16 +32,16 @@ class TrainingConfig:
epochs: int
learning_rate: float
use_gpu: bool
@dataclass
class InferenceConfig:
num_classes: int
drop_rate: float
drop_path_rate: float
wd: int
@dataclass
class AppConfig:
build_dir: str
optimization_style: str
target_platform: str
class AppSettings:
@ -49,7 +56,6 @@ class AppSettings:
self.general = GeneralConfig(**config_data["general"])
self.dataset = DataSetConfig(**config_data["dataset"])
self.training = TrainingConfig(**config_data["training"])
self.inference = InferenceConfig(**config_data["inference"])
self.app = AppConfig(**config_data["app"])

View File

@ -1,12 +1,46 @@
h5py
pytorch-lightning
matplotlib
numpy
PyYAML
scikit-learn
timm
torch
onnx
onnxruntime
aiohappyeyeballs~=2.6.1
aiohttp~=3.12.12
aiosignal~=1.3.2
attrs~=25.3.0
filelock~=3.18.0
frozenlist~=1.7.0
fsspec~=2025.5.1
h5py~=3.14.0
idna~=3.10
Jinja2~=3.1.6
lightning~=2.5.1.post0
lightning-utilities~=0.14.3
MarkupSafe~=3.0.2
mpmath~=1.3.0
multidict~=6.4.4
networkx~=3.5
numpy~=2.3.0
nvidia-cublas-cu12~=12.6.4.1
nvidia-cuda-cupti-cu12~=12.6.80
nvidia-cuda-nvrtc-cu12~=12.6.77
nvidia-cuda-runtime-cu12~=12.6.77
nvidia-cudnn-cu12~=9.5.1.17
nvidia-cufft-cu12~=11.3.0.4
nvidia-cufile-cu12~=1.11.1.6
nvidia-curand-cu12~=10.3.7.77
nvidia-cusolver-cu12~=11.7.1.2
nvidia-cusparse-cu12~=12.5.4.2
nvidia-cusparselt-cu12~=0.6.3
nvidia-nccl-cu12~=2.26.2
nvidia-nvjitlink-cu12~=12.6.85
nvidia-nvtx-cu12~=12.6.77
packaging~=24.2
propcache~=0.3.2
pytorch-lightning~=2.5.1.post0
PyYAML~=6.0.2
setuptools~=80.9.0
sympy~=1.14.0
torch~=2.7.1
torchmetrics~=1.7.3
tqdm~=4.67.1
triton~=3.3.1
typing_extensions~=4.14.0
utils~=1.0.2
yarl~=1.20.1
--index-url https://git.riahub.ai/api/packages/qoherent/pypi/simple/
utils

View File

@ -3,6 +3,7 @@ import numpy as np
from utils.signal import block_generator
import argparse
import os
from helpers.app_settings import get_app_settings
mods = {
"bpsk": {"num_bits_per_symbol": 1, "constellation_type": "psk"},
@ -13,12 +14,13 @@ mods = {
def generate_modulated_signals(output_dir):
for modulation in ["bpsk", "qpsk", "qam16", "qam64"]:
for snr in np.arange(-6, 13, 3):
settings = get_app_settings().dataset
for modulation in settings.modulation_types:
for snr in np.arange(settings.snr_start, settings.snr_end, settings.snr_step):
for i in range(3):
recording_length = 1024
beta = 0.3 # the rolloff factor, can be changed to add variety
sps = 4 # samples per symbol, or the relative bandwidth of the digital signal. Can also be changed.
recording_length = settings.recording_length
beta = settings.beta # the rolloff factor, can be changed to add variety
sps = settings.sps # samples per symbol, or the relative bandwidth of the digital signal. Can also be changed.
# blocks don't directly take the string 'qpsk' so we use the dict 'mods' to get parameters
constellation_type = mods[modulation]["constellation_type"]

View File

@ -17,14 +17,13 @@ def convert_to_onnx(ckpt_path, fp16=False):
output_path (str): The path to save the converted ONNX model.
"""
settings = get_app_settings()
inference_cfg = settings.inference
dataset_cfg = settings.dataset
in_channels = 2
batch_size = 1
slice_length = int(1024 / dataset_cfg.num_slices)
num_classes = inference_cfg.num_classes
num_classes = len(dataset_cfg.modulation_types)
model = RFClassifier(
model=mobilenetv3(

View File

@ -0,0 +1,24 @@
import subprocess
from helpers.app_settings import get_app_settings
settings = get_app_settings()
input_path = f"{settings.app.build_dir}/inference_recognition_model.onnx"
optimization_style = settings.app.optimization_style
target_platform = settings.app.optimization_style
# Build the command
command = [
"python",
"-m",
"onnxruntime.tools.convert_onnx_models_to_ort",
input_path,
"--output_dir", "ort_files",
"--optimization_style", optimization_style,
"--target_platform", target_platform
]
# Run it
subprocess.run(command, check=True)

View File

@ -60,10 +60,10 @@ def train_model():
num_classes = len(ds_train.label_encoder.classes_)
hparams = {
"drop_path_rate": 0.2,
"drop_rate": 0.5,
"drop_path_rate": training_cfg.drop_path_rate,
"drop_rate": training_cfg.drop_rate,
"learning_rate": float(training_cfg.learning_rate),
"wd": 0.01,
"wd": training_cfg.wd,
}
class RFClassifier(L.LightningModule):