deleted old recordings, updated gpu for training,

This commit is contained in:
Liyu Xiao 2025-05-22 15:57:20 -04:00
parent 123cb82334
commit 44507493a3
6 changed files with 50 additions and 51 deletions

1
.gitignore vendored
View File

@ -6,3 +6,4 @@ __pycache__/
*.ckpt
*.ipynb
*.onnx
*.npy

View File

@ -20,9 +20,7 @@ training:
use_gpu: true
inference:
model_path: checkpoints/inference_recognition_model.ckpt
num_classes: 4
output_path: onnx_files/inference_recognition_model.onnx
app:
build_dir: dist

View File

@ -80,4 +80,6 @@ if __name__ == "__main__":
ckpt_path=os.path.join(CHECKPOINTS_DIR, model_checkpoint), fp16=False
)
print("Conversion complete stored at: ", os.path.join(ONNX_DIR, model_checkpoint))
output_file = "inference_recognition_model.onnx"
print("Conversion complete stored at: ", os.path.join(ONNX_DIR, output_file))

View File

@ -13,56 +13,56 @@ mods = {
def generate_modulated_signals():
for modulation in ["bpsk", "qpsk", "qam16", "qam64"]:
for snr in np.arange(-6, 13, 3):
for i in range(100):
recording_length = 1024
beta = 0.3 # the rolloff factor, can be changed to add variety
sps = 4 # samples per symbol, or the relative bandwidth of the digital signal. Can also be changed.
recording_length = 1024
beta = 0.3 # the rolloff factor, can be changed to add variety
sps = 4 # samples per symbol, or the relative bandwidth of the digital signal. Can also be changed.
# blocks don't directly take the string 'qpsk' so we use the dict 'mods' to get parameters
constellation_type = mods[modulation]["constellation_type"]
num_bits_per_symbol = mods[modulation]["num_bits_per_symbol"]
# blocks don't directly take the string 'qpsk' so we use the dict 'mods' to get parameters
constellation_type = mods[modulation]["constellation_type"]
num_bits_per_symbol = mods[modulation]["num_bits_per_symbol"]
# construct the digital modulation blocks with these parameters
# we have bit source -> mapper -> upsampling -> pulse shaping
# construct the digital modulation blocks with these parameters
# we have bit source -> mapper -> upsampling -> pulse shaping
bit_source = block_generator.RandomBinarySource()
mapper = block_generator.Mapper(
constellation_type=constellation_type,
num_bits_per_symbol=num_bits_per_symbol,
)
upsampler = block_generator.Upsampling(factor=sps)
pulse_shaping_filter = block_generator.RaisedCosineFilter(
upsampling_factor=sps, beta=beta
)
bit_source = block_generator.RandomBinarySource()
mapper = block_generator.Mapper(
constellation_type=constellation_type,
num_bits_per_symbol=num_bits_per_symbol,
)
upsampler = block_generator.Upsampling(factor=sps)
pulse_shaping_filter = block_generator.RaisedCosineFilter(
upsampling_factor=sps, beta=beta
)
pulse_shaping_filter.connect_input([upsampler])
upsampler.connect_input([mapper])
mapper.connect_input([bit_source])
pulse_shaping_filter.connect_input([upsampler])
upsampler.connect_input([mapper])
mapper.connect_input([bit_source])
modulation_recording = pulse_shaping_filter.record(
num_samples=recording_length
)
modulation_recording = pulse_shaping_filter.record(
num_samples=recording_length
)
# add noise by calculating the power of the modulation recording and generating AWGN from the snr parameter
signal_power = np.mean(np.abs(modulation_recording.data[0] ** 2))
awgn_source = block_generator.AWGNSource(
variance=(signal_power / 2) * (10 ** (((-1 * snr) / 20)))
)
noise = awgn_source.record(num_samples=recording_length)
samples_with_noise = modulation_recording.data + noise.data
output_recording = Recording(data=samples_with_noise)
# add noise by calculating the power of the modulation recording and generating AWGN from the snr parameter
signal_power = np.mean(np.abs(modulation_recording.data[0] ** 2))
awgn_source = block_generator.AWGNSource(
variance=(signal_power / 2) * (10 ** (((-1 * snr) / 20)))
)
noise = awgn_source.record(num_samples=recording_length)
samples_with_noise = modulation_recording.data + noise.data
output_recording = Recording(data=samples_with_noise)
# add metadata for ML later
output_recording.add_to_metadata(key="modulation", value=modulation)
output_recording.add_to_metadata(key="snr", value=int(snr))
output_recording.add_to_metadata(key="beta", value=beta)
output_recording.add_to_metadata(key="sps", value=sps)
# add metadata for ML later
output_recording.add_to_metadata(key="modulation", value=modulation)
output_recording.add_to_metadata(key="snr", value=int(snr))
output_recording.add_to_metadata(key="beta", value=beta)
output_recording.add_to_metadata(key="sps", value=sps)
# view if you want
# output_recording.view()
# view if you want
# output_recording.view()
# save to file
output_recording.to_npy() # optionally add path and filename parameters
# save to file
output_recording.to_npy() # optionally add path and filename parameters
if __name__ == "__main__":

View File

@ -31,7 +31,7 @@ def train_model():
train_flag = True
batch_size = 128
epochs = 1
epochs = 50
checkpoint_dir = training_cfg.checkpoint_dir
checkpoint_filename = training_cfg.checkpoint_filename
@ -76,8 +76,8 @@ def train_model():
hparams = {
"drop_path_rate": 0.2,
"drop_rate": 0.5,
"learning_rate": 3e-4,
"wd": 0.2,
"learning_rate": 1e-4,
"wd": 0.01,
}
class RFClassifier(L.LightningModule):

View File

@ -33,9 +33,7 @@ class TrainingConfig:
@dataclass
class InferenceConfig:
model_path: str
num_classes: int
output_path: str
@dataclass