name: Modulation Recognition Demo on: push: branches: [main] pull_request: branches: [main] jobs: ria-demo: runs-on: ubuntu-latest-2080 steps: - name: Print GPU information run: | if command -v nvidia-smi &> /dev/null; then echo "✅ NVIDIA GPU is available" nvidia-smi else echo "⚠️ No NVIDIA GPU found" fi - name: Checkout project code uses: actions/checkout@v4 with: lfs: true - name: Set up Python uses: actions/setup-python@v4 with: python-version: "3.10" - name: Install dependencies (incl. RIA Hub utils) run: | set -e python -m pip install --upgrade pip echo "Trying to install utils from RIA Hub..." if ! pip install \ --index-url "https://${{ secrets.RIAHUB_USER }}:${{ secrets.RIAHUB_TOKEN }}@git.riahub.ai/api/packages/qoherent/pypi/simple/" \ utils; then echo "RIA Hub install failed, falling back to local wheel..." pip install ./wheels/utils-*.whl fi pip install -r requirements.txt - name: 1. Generate Recordings run: | mkdir -p data/recordings PYTHONPATH=. python scripts/dataset_manager/data_gen.py --output-dir data/recordings - name: 📦 Compress Recordings run: tar -czf recordings.tar.gz -C data/recordings . - name: ⬆️ Upload recordings uses: actions/upload-artifact@v4 with: name: recordings path: recordings.tar.gz - name: 2. Build HDF5 Dataset run: | mkdir -p data/dataset PYTHONPATH=. python scripts/dataset_manager/produce_dataset.py shell: bash - name: ⬆️ Upload Dataset uses: actions/upload-artifact@v3 with: name: dataset path: data/dataset/** - name: 3. Train Model env: NO_NNPACK: 1 PYTORCH_NO_NNPACK: 1 run: | mkdir -p checkpoint_files PYTHONPATH=. python scripts/model_builder/train.py 2>/dev/null - name: 4. Plot Model env: NO_NNPACK: 1 PYTORCH_NO_NNPACK: 1 run: | PYTHONPATH=. python scripts/model_builder/plot_data.py 2>/dev/null - name: ⬆️ Upload Checkpoints uses: actions/upload-artifact@v3 with: name: checkpoints path: checkpoint_files/* - name: 5. Export model to ONNX graph env: NO_NNPACK: 1 PYTORCH_NO_NNPACK: 1 run: | mkdir -p onnx_files MKL_DISABLE_FAST_MM=1 PYTHONPATH=. python scripts/application_packager/convert_to_onnx.py 2>/dev/null - name: ⬆️ Upload ONNX file uses: actions/upload-artifact@v3 with: name: onnx-file path: onnx_files/inference_recognition_model.onnx - name: 6. Profile ONNX model run: | PYTHONPATH=. python scripts/application_packager/profile_onnx.py - name: ⬆️ Upload JSON trace uses: actions/upload-artifact@v3 with: name: profile-data path: "**/onnxruntime_profile_*.json" - name: 7. Convert ONNX graph to an ORT file run: | PYTHONPATH=. python scripts/application_packager/convert_to_ort.py - name: ⬆️ Upload ORT file uses: actions/upload-artifact@v3 with: name: ort-file path: ort_files/inference_recognition_model.ort