Structure for cli implemented
This commit is contained in:
parent
7c1313a210
commit
2429d62067
0
ria_toolkit_oss_cli/__init__.py
Normal file
0
ria_toolkit_oss_cli/__init__.py
Normal file
20
ria_toolkit_oss_cli/cli.py
Normal file
20
ria_toolkit_oss_cli/cli.py
Normal file
|
|
@ -0,0 +1,20 @@
|
||||||
|
"""
|
||||||
|
This module contains the main group for the utils CLI.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import click
|
||||||
|
|
||||||
|
from utils_cli.utils import commands
|
||||||
|
|
||||||
|
|
||||||
|
@click.group()
|
||||||
|
@click.option("-v", "--verbose", is_flag=True, type=bool, help="Increase verbosity, especially useful for debugging.")
|
||||||
|
def cli(verbose):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# Loop through project commands, binding them all to the CLI.
|
||||||
|
for command_name in dir(commands):
|
||||||
|
command = getattr(commands, command_name)
|
||||||
|
if isinstance(command, click.Command):
|
||||||
|
cli.add_command(command, name=command_name)
|
||||||
803
ria_toolkit_oss_cli/ria_toolkit_oss/annotate.py
Normal file
803
ria_toolkit_oss_cli/ria_toolkit_oss/annotate.py
Normal file
|
|
@ -0,0 +1,803 @@
|
||||||
|
"""Annotate command - Automatic detection and manual annotation management."""
|
||||||
|
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import click
|
||||||
|
|
||||||
|
from ria_toolkit_oss.annotations import (
|
||||||
|
annotate_with_cusum,
|
||||||
|
detect_signals_energy,
|
||||||
|
split_recording_annotations,
|
||||||
|
threshold_qualifier,
|
||||||
|
)
|
||||||
|
from ria_toolkit_oss.datatypes import Annotation
|
||||||
|
from ria_toolkit_oss.datatypes.recording import Recording
|
||||||
|
from ria_toolkit_oss.io import load_recording, to_blue, to_npy, to_sigmf, to_wav
|
||||||
|
from ria_toolkit_oss.common import format_frequency, format_sample_count
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_sigmf_path(filepath):
|
||||||
|
"""Normalize SigMF path to base name without extension."""
|
||||||
|
path = Path(filepath)
|
||||||
|
|
||||||
|
# Handle .sigmf-data, .sigmf-meta, or .sigmf
|
||||||
|
if ".sigmf" in path.suffix:
|
||||||
|
# Remove the suffix to get base name
|
||||||
|
return path.with_suffix("")
|
||||||
|
else:
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
def detect_input_format(filepath):
|
||||||
|
"""Detect file format from extension."""
|
||||||
|
path = Path(filepath)
|
||||||
|
ext = path.suffix.lower()
|
||||||
|
|
||||||
|
if ext in [".sigmf-data", ".sigmf-meta"]:
|
||||||
|
return "sigmf"
|
||||||
|
elif path.name.endswith(".sigmf"):
|
||||||
|
return "sigmf"
|
||||||
|
elif ext == ".npy":
|
||||||
|
return "npy"
|
||||||
|
elif ext == ".wav":
|
||||||
|
return "wav"
|
||||||
|
elif ext == ".blue":
|
||||||
|
return "blue"
|
||||||
|
else:
|
||||||
|
raise click.ClickException(f"Unknown format for '{filepath}'. Supported: .sigmf, .npy, .wav, .blue")
|
||||||
|
|
||||||
|
|
||||||
|
def determine_output_path(input_path, output_path, fmt, quiet, overwrite):
|
||||||
|
if fmt == "sigmf":
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f"Saving annotations to: {output_path}.sigmf-meta")
|
||||||
|
|
||||||
|
if output_path:
|
||||||
|
# Normalize the output path for consistency
|
||||||
|
return normalize_sigmf_path(output_path)
|
||||||
|
else:
|
||||||
|
# Auto-generate from input path
|
||||||
|
return normalize_sigmf_path(input_path)
|
||||||
|
else:
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f"Saving to: {output_path}")
|
||||||
|
|
||||||
|
if output_path:
|
||||||
|
return Path(output_path)
|
||||||
|
else:
|
||||||
|
# Other formats: add _annotated suffix unless --overwrite
|
||||||
|
if overwrite:
|
||||||
|
return input_path
|
||||||
|
else:
|
||||||
|
return input_path.with_name(input_path.stem + "_annotated" + input_path.suffix)
|
||||||
|
|
||||||
|
|
||||||
|
def save_recording_auto(recording, output_path, input_path, quiet=False, overwrite=False):
|
||||||
|
"""Save recording, auto-detecting format from extension.
|
||||||
|
|
||||||
|
For SigMF: Only overwrites metadata file, data file is unchanged
|
||||||
|
For other formats: Creates _annotated copy by default, unless overwrite=True
|
||||||
|
"""
|
||||||
|
input_path = Path(input_path)
|
||||||
|
fmt = detect_input_format(input_path)
|
||||||
|
|
||||||
|
# Determine output path
|
||||||
|
output_path = determine_output_path(
|
||||||
|
input_path=input_path, output_path=output_path, fmt=fmt, quiet=quiet, overwrite=overwrite
|
||||||
|
)
|
||||||
|
|
||||||
|
if fmt == "sigmf":
|
||||||
|
# Normalize path for SigMF
|
||||||
|
base_path = output_path
|
||||||
|
stem = base_path.name
|
||||||
|
parent = base_path.parent
|
||||||
|
|
||||||
|
# For SigMF: only save metadata, copy data if needed
|
||||||
|
meta_path = parent / f"{stem}.sigmf-meta"
|
||||||
|
data_path = parent / f"{stem}.sigmf-data"
|
||||||
|
|
||||||
|
# If output is different from input, copy data file
|
||||||
|
input_base = normalize_sigmf_path(input_path)
|
||||||
|
if input_base != base_path:
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
# Construct input data path correctly
|
||||||
|
# input_base is like /path/to/recording or /path/to/recording.sigmf
|
||||||
|
# We need /path/to/recording.sigmf-data
|
||||||
|
if str(input_base).endswith(".sigmf"):
|
||||||
|
input_data = Path(str(input_base).replace(".sigmf", ".sigmf-data"))
|
||||||
|
else:
|
||||||
|
input_data = input_base.parent / f"{input_base.name}.sigmf-data"
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f" Copying: {data_path}")
|
||||||
|
shutil.copy2(input_data, data_path)
|
||||||
|
|
||||||
|
# Always save metadata (this is the whole point)
|
||||||
|
to_sigmf(recording, filename=stem, path=parent, overwrite=True)
|
||||||
|
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f" Updated: {meta_path}")
|
||||||
|
if input_base != base_path:
|
||||||
|
click.echo(f" Created: {data_path}")
|
||||||
|
|
||||||
|
elif fmt == "npy":
|
||||||
|
to_npy(recording, filename=output_path.stem, path=output_path.parent, overwrite=True)
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f" Created: {output_path}")
|
||||||
|
elif fmt == "wav":
|
||||||
|
to_wav(recording, filename=output_path.stem, path=output_path.parent, overwrite=True)
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f" Created: {output_path}")
|
||||||
|
elif fmt == "blue":
|
||||||
|
to_blue(recording, filename=output_path.stem, path=output_path.parent, overwrite=True)
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f" Created: {output_path}")
|
||||||
|
|
||||||
|
|
||||||
|
def determine_frequency_bounds(recording: Recording, freq_lower, freq_upper):
|
||||||
|
# Handle frequency bounds
|
||||||
|
if (freq_lower is None) != (freq_upper is None):
|
||||||
|
raise click.ClickException("Must specify both --freq-lower and --freq-upper, or neither")
|
||||||
|
|
||||||
|
if freq_lower is None:
|
||||||
|
# Default to full bandwidth
|
||||||
|
sample_rate = recording.metadata.get("sample_rate", 1)
|
||||||
|
center_freq = recording.metadata.get("center_frequency", 0)
|
||||||
|
freq_lower = center_freq - (sample_rate / 2)
|
||||||
|
freq_upper = center_freq + (sample_rate / 2)
|
||||||
|
freq_default = True
|
||||||
|
else:
|
||||||
|
freq_default = False
|
||||||
|
if freq_lower >= freq_upper:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Invalid frequency range: lower ({format_frequency(freq_lower)}) "
|
||||||
|
f"must be < upper ({format_frequency(freq_upper)})"
|
||||||
|
)
|
||||||
|
|
||||||
|
return freq_lower, freq_upper, freq_default
|
||||||
|
|
||||||
|
|
||||||
|
def get_indices_list(indices, recording: Recording):
|
||||||
|
if indices:
|
||||||
|
try:
|
||||||
|
indices_list = [int(idx.strip()) for idx in indices.split(",")]
|
||||||
|
# Validate indices
|
||||||
|
for idx in indices_list:
|
||||||
|
if idx < 0 or idx >= len(recording.annotations):
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Invalid index {idx}. Recording has {len(recording.annotations)} annotation(s)"
|
||||||
|
)
|
||||||
|
except ValueError as e:
|
||||||
|
raise click.ClickException(f"Invalid indices format. Expected comma-separated integers: {e}")
|
||||||
|
|
||||||
|
return indices_list
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Main command group
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@click.group()
|
||||||
|
def annotate():
|
||||||
|
"""Manage and auto-detect annotations on RF recordings.
|
||||||
|
|
||||||
|
\b
|
||||||
|
Subcommands:
|
||||||
|
list - List annotations
|
||||||
|
add - Add manual annotation
|
||||||
|
remove - Remove annotation by index
|
||||||
|
clear - Clear all annotations
|
||||||
|
energy - Auto-detect using energy method
|
||||||
|
cusum - Auto-detect using CUSUM method
|
||||||
|
threshold - Auto-detect using threshold method
|
||||||
|
separate - Split annotations by frequency components (Phase 2)
|
||||||
|
|
||||||
|
\b
|
||||||
|
File Path Handling:
|
||||||
|
- SigMF files: Pass .sigmf-data, .sigmf-meta, or base name
|
||||||
|
- Other formats: .npy, .wav, .blue files
|
||||||
|
|
||||||
|
\b
|
||||||
|
Output Behavior:
|
||||||
|
- SigMF: Updates .sigmf-meta only (data unchanged), in-place
|
||||||
|
- Other: Creates _annotated copy unless --overwrite specified
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# List subcommand
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@annotate.command()
|
||||||
|
@click.argument("input", type=click.Path(exists=True))
|
||||||
|
@click.option("--verbose", is_flag=True, help="Show detailed annotation info")
|
||||||
|
def list(input, verbose):
|
||||||
|
"""List all annotations in a recording.
|
||||||
|
|
||||||
|
\b
|
||||||
|
Examples:
|
||||||
|
utils annotate list recording.sigmf-data
|
||||||
|
utils annotate list signal.npy --verbose
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
recording = load_recording(input)
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to load recording: {e}")
|
||||||
|
|
||||||
|
if len(recording.annotations) == 0:
|
||||||
|
click.echo(f"No annotations in {Path(input).name}")
|
||||||
|
return
|
||||||
|
|
||||||
|
click.echo(f"\nAnnotations in {Path(input).name}:")
|
||||||
|
for i, ann in enumerate(recording.annotations):
|
||||||
|
# Parse type from comment JSON
|
||||||
|
try:
|
||||||
|
comment_data = json.loads(ann.comment)
|
||||||
|
ann_type = comment_data.get("type", "unknown")
|
||||||
|
user_comment = comment_data.get("user_comment", "")
|
||||||
|
except (json.JSONDecodeError, TypeError):
|
||||||
|
ann_type = "unknown"
|
||||||
|
user_comment = ann.comment or ""
|
||||||
|
|
||||||
|
# Basic info
|
||||||
|
freq_range = f"{format_frequency(ann.freq_lower_edge)} - {format_frequency(ann.freq_upper_edge)}"
|
||||||
|
click.echo(
|
||||||
|
f" [{i}] Samples {format_sample_count(ann.sample_start)}-"
|
||||||
|
f"{format_sample_count(ann.sample_start + ann.sample_count)}: {ann.label}"
|
||||||
|
)
|
||||||
|
click.echo(f" Type: {ann_type}")
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
if user_comment:
|
||||||
|
click.echo(f" Comment: {user_comment}")
|
||||||
|
click.echo(f" Frequency: {freq_range}")
|
||||||
|
if ann.detail:
|
||||||
|
click.echo(f" Detail: {ann.detail}")
|
||||||
|
|
||||||
|
click.echo(f"\nTotal: {len(recording.annotations)} annotation(s)")
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Add subcommand
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@annotate.command(context_settings={"max_content_width": 200})
|
||||||
|
@click.argument("input", type=click.Path(exists=True))
|
||||||
|
@click.option("--start", type=int, required=True, help="Start sample index")
|
||||||
|
@click.option("--count", type=int, required=True, help="Sample count")
|
||||||
|
@click.option("--label", type=str, required=True, help="Annotation label")
|
||||||
|
@click.option("--freq-lower", type=float, help="Lower frequency edge (Hz)")
|
||||||
|
@click.option("--freq-upper", type=float, help="Upper frequency edge (Hz)")
|
||||||
|
@click.option("--comment", type=str, help="Human-readable comment")
|
||||||
|
@click.option(
|
||||||
|
"--type",
|
||||||
|
"annotation_type",
|
||||||
|
type=click.Choice(["standalone", "parallel", "intersection"]),
|
||||||
|
default="standalone",
|
||||||
|
help="Annotation type",
|
||||||
|
)
|
||||||
|
@click.option("--output", "-o", type=click.Path(), help="Output file path")
|
||||||
|
@click.option("--overwrite", is_flag=True, help="Overwrite input file (non-SigMF only)")
|
||||||
|
@click.option("--quiet", is_flag=True, help="Quiet mode")
|
||||||
|
def add(input, start, count, label, freq_lower, freq_upper, comment, annotation_type, output, overwrite, quiet):
|
||||||
|
"""Add a manual annotation.
|
||||||
|
|
||||||
|
\b
|
||||||
|
Examples:
|
||||||
|
utils annotate add file.npy --start 1000 --count 500 --label wifi
|
||||||
|
utils annotate add signal.sigmf-data --start 0 --count 1000 --label burst --comment "Strong signal"
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
recording = load_recording(input)
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f"Loaded: {input}")
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to load recording: {e}")
|
||||||
|
|
||||||
|
# Validate sample range
|
||||||
|
n_samples = len(recording.data[0])
|
||||||
|
if start < 0:
|
||||||
|
raise click.ClickException(f"--start must be >= 0, got {start}")
|
||||||
|
if count <= 0:
|
||||||
|
raise click.ClickException(f"--count must be > 0, got {count}")
|
||||||
|
if start + count > n_samples:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Invalid annotation range:\n"
|
||||||
|
f" Start: {start:,}\n"
|
||||||
|
f" Count: {count:,}\n"
|
||||||
|
f" End: {start + count:,}\n"
|
||||||
|
f"Recording only has {n_samples:,} samples"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Handle frequency bounds
|
||||||
|
freq_lower, freq_upper, freq_default = determine_frequency_bounds(
|
||||||
|
recording=recording, freq_lower=freq_lower, freq_upper=freq_upper
|
||||||
|
)
|
||||||
|
|
||||||
|
# Build comment JSON
|
||||||
|
comment_data = {"type": annotation_type}
|
||||||
|
if comment:
|
||||||
|
comment_data["user_comment"] = comment
|
||||||
|
|
||||||
|
# Create annotation
|
||||||
|
ann = Annotation(
|
||||||
|
sample_start=start,
|
||||||
|
sample_count=count,
|
||||||
|
freq_lower_edge=freq_lower,
|
||||||
|
freq_upper_edge=freq_upper,
|
||||||
|
label=label,
|
||||||
|
comment=json.dumps(comment_data),
|
||||||
|
detail={},
|
||||||
|
)
|
||||||
|
|
||||||
|
recording._annotations.append(ann)
|
||||||
|
|
||||||
|
if not quiet:
|
||||||
|
click.echo("\nAdding annotation:")
|
||||||
|
click.echo(f" Start: {format_sample_count(start)}")
|
||||||
|
click.echo(f" Count: {format_sample_count(count)} samples")
|
||||||
|
freq_str = (
|
||||||
|
"full bandwidth" if freq_default else f"{format_frequency(freq_lower)} - {format_frequency(freq_upper)}"
|
||||||
|
)
|
||||||
|
click.echo(f" Frequency: {freq_str}")
|
||||||
|
click.echo(f" Label: {label}")
|
||||||
|
click.echo(f" Type: {annotation_type}")
|
||||||
|
if comment:
|
||||||
|
click.echo(f" Comment: {comment}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
save_recording_auto(recording, output, input, quiet, overwrite)
|
||||||
|
if not quiet:
|
||||||
|
click.echo(" ✓ Saved")
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to save: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Remove subcommand
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@annotate.command(context_settings={"max_content_width": 200})
|
||||||
|
@click.argument("input", type=click.Path(exists=True))
|
||||||
|
@click.argument("index", type=int)
|
||||||
|
@click.option("--output", "-o", type=click.Path(), help="Output file path")
|
||||||
|
@click.option("--overwrite", is_flag=True, help="Overwrite input file (non-SigMF only)")
|
||||||
|
@click.option("--quiet", is_flag=True, help="Quiet mode")
|
||||||
|
def remove(input, index, output, overwrite, quiet):
|
||||||
|
"""Remove annotation by index.
|
||||||
|
|
||||||
|
Use 'utils annotate list' to see annotation indices.
|
||||||
|
|
||||||
|
\b
|
||||||
|
Examples:
|
||||||
|
utils annotate remove signal.sigmf-data 2
|
||||||
|
utils annotate remove file.npy 0
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
recording = load_recording(input)
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f"Loaded: {input}")
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to load recording: {e}")
|
||||||
|
|
||||||
|
if index < 0 or index >= len(recording.annotations):
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Cannot remove annotation at index {index}\n"
|
||||||
|
f"Recording has {len(recording.annotations)} annotation(s) (indices 0-{len(recording.annotations)-1})"
|
||||||
|
)
|
||||||
|
|
||||||
|
removed_ann = recording.annotations[index]
|
||||||
|
recording._annotations.pop(index)
|
||||||
|
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f"\nRemoving annotation [{index}]:")
|
||||||
|
click.echo(
|
||||||
|
f" Removed: samples {format_sample_count(removed_ann.sample_start)}-"
|
||||||
|
f"{format_sample_count(removed_ann.sample_start + removed_ann.sample_count)} ({removed_ann.label})"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
save_recording_auto(recording, output, input, quiet, overwrite)
|
||||||
|
if not quiet:
|
||||||
|
click.echo(" ✓ Saved")
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to save: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Clear subcommand
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@annotate.command(context_settings={"max_content_width": 175})
|
||||||
|
@click.argument("input", type=click.Path(exists=True))
|
||||||
|
@click.option("--output", "-o", type=click.Path(), help="Output file path")
|
||||||
|
@click.option("--overwrite", is_flag=True, help="Overwrite input file (non-SigMF only)")
|
||||||
|
@click.option("--force", is_flag=True, help="Skip confirmation")
|
||||||
|
@click.option("--quiet", is_flag=True, help="Quiet mode")
|
||||||
|
def clear(input, output, overwrite, force, quiet):
|
||||||
|
"""Clear all annotations.
|
||||||
|
|
||||||
|
\b
|
||||||
|
Examples:
|
||||||
|
utils annotate clear signal.sigmf-data
|
||||||
|
utils annotate clear file.npy --force
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
recording = load_recording(input)
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f"Loaded: {input}")
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to load recording: {e}")
|
||||||
|
|
||||||
|
count_before = len(recording.annotations)
|
||||||
|
|
||||||
|
if count_before == 0:
|
||||||
|
if not quiet:
|
||||||
|
click.echo("No annotations to clear")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Confirm unless --force
|
||||||
|
if not force and not quiet:
|
||||||
|
click.echo(f"\nWarning: This will remove all {count_before} annotation(s)")
|
||||||
|
click.confirm("Continue?", abort=True)
|
||||||
|
|
||||||
|
recording._annotations = []
|
||||||
|
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f"\nCleared {count_before} annotation(s)")
|
||||||
|
|
||||||
|
try:
|
||||||
|
save_recording_auto(recording, output, input, quiet, overwrite)
|
||||||
|
if not quiet:
|
||||||
|
click.echo(" ✓ Saved")
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to save: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Energy detection subcommand
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@annotate.command(context_settings={"max_content_width": 200})
|
||||||
|
@click.argument("input", type=click.Path(exists=True))
|
||||||
|
@click.option("--label", type=str, default="signal", help="Annotation label")
|
||||||
|
@click.option("--threshold", type=float, default=1.2, help="Threshold multiplier above noise floor")
|
||||||
|
@click.option("--segments", type=int, default=10, help="Number of segments for noise estimation")
|
||||||
|
@click.option("--window-size", type=int, default=200, help="Smoothing window size")
|
||||||
|
@click.option("--min-distance", type=int, default=5000, help="Min distance between detections")
|
||||||
|
@click.option(
|
||||||
|
"--freq-method",
|
||||||
|
type=click.Choice(["nbw", "obw", "full-detected", "full-bandwidth"]),
|
||||||
|
default="nbw",
|
||||||
|
help="Frequency bounding method",
|
||||||
|
)
|
||||||
|
@click.option("--nfft", type=int, default=65536, help="FFT size for frequency calculation")
|
||||||
|
@click.option("--obw-power", type=float, default=0.9999, help="Power percentage for OBW/NBW (0.99-0.9999)")
|
||||||
|
@click.option(
|
||||||
|
"--type",
|
||||||
|
"annotation_type",
|
||||||
|
type=click.Choice(["standalone", "parallel", "intersection"]),
|
||||||
|
default="standalone",
|
||||||
|
help="Annotation type",
|
||||||
|
)
|
||||||
|
@click.option("--output", "-o", type=click.Path(), help="Output file path")
|
||||||
|
@click.option("--overwrite", is_flag=True, help="Overwrite input file (non-SigMF only)")
|
||||||
|
@click.option("--quiet", is_flag=True, help="Quiet mode")
|
||||||
|
def energy(
|
||||||
|
input,
|
||||||
|
label,
|
||||||
|
threshold,
|
||||||
|
segments,
|
||||||
|
window_size,
|
||||||
|
min_distance,
|
||||||
|
freq_method,
|
||||||
|
nfft,
|
||||||
|
obw_power,
|
||||||
|
annotation_type,
|
||||||
|
output,
|
||||||
|
overwrite,
|
||||||
|
quiet,
|
||||||
|
):
|
||||||
|
"""Auto-detect signals using energy-based method.
|
||||||
|
|
||||||
|
Detects bursts based on energy above noise floor. Best for bursty signals
|
||||||
|
and intermittent transmissions.
|
||||||
|
|
||||||
|
\b
|
||||||
|
Frequency Bounding Methods:
|
||||||
|
nbw - Nominal bandwidth (default, best for real signals)
|
||||||
|
obw - Occupied bandwidth (more conservative, includes sidelobes)
|
||||||
|
full-detected - Lowest to highest spectral component
|
||||||
|
full-bandwidth - Entire Nyquist span
|
||||||
|
|
||||||
|
\b
|
||||||
|
Examples:
|
||||||
|
utils annotate energy capture.sigmf-data --label burst
|
||||||
|
utils annotate energy signal.npy --threshold 1.5 --min-distance 10000
|
||||||
|
utils annotate energy signal.sigmf-data --freq-method obw
|
||||||
|
utils annotate energy signal.sigmf-data --freq-method full-detected
|
||||||
|
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
recording = load_recording(input)
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f"Loaded: {input}")
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to load recording: {e}")
|
||||||
|
|
||||||
|
if not quiet:
|
||||||
|
click.echo("\nDetecting signals using energy-based method...")
|
||||||
|
click.echo(" Time detection:")
|
||||||
|
click.echo(f" Segments: {segments}")
|
||||||
|
click.echo(f" Threshold: {threshold}x noise floor")
|
||||||
|
click.echo(f" Window size: {window_size} samples")
|
||||||
|
click.echo(f" Min distance: {min_distance} samples")
|
||||||
|
click.echo(f" Frequency bounds: {freq_method}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
initial_count = len(recording.annotations)
|
||||||
|
recording = detect_signals_energy(
|
||||||
|
recording,
|
||||||
|
k=segments,
|
||||||
|
threshold_factor=threshold,
|
||||||
|
window_size=window_size,
|
||||||
|
min_distance=min_distance,
|
||||||
|
label=label,
|
||||||
|
annotation_type=annotation_type,
|
||||||
|
freq_method=freq_method,
|
||||||
|
nfft=nfft,
|
||||||
|
obw_power=obw_power,
|
||||||
|
)
|
||||||
|
added = len(recording.annotations) - initial_count
|
||||||
|
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f" ✓ Added {added} annotation(s)")
|
||||||
|
|
||||||
|
save_recording_auto(recording, output, input, quiet, overwrite)
|
||||||
|
if not quiet:
|
||||||
|
click.echo(" ✓ Saved")
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Energy detection failed: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# CUSUM detection subcommand
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@annotate.command()
|
||||||
|
@click.argument("input", type=click.Path(exists=True))
|
||||||
|
@click.option("--label", type=str, default="segment", help="Annotation label")
|
||||||
|
@click.option("--min-duration", type=float, default=5.0, help="Min duration in ms (prevents over-segmentation)")
|
||||||
|
@click.option("--window-size", type=int, default=1, help="Smoothing window size")
|
||||||
|
@click.option("--tolerance", type=int, default=-1, help="Sample tolerance for merging")
|
||||||
|
@click.option(
|
||||||
|
"--type",
|
||||||
|
"annotation_type",
|
||||||
|
type=click.Choice(["standalone", "parallel", "intersection"]),
|
||||||
|
default="standalone",
|
||||||
|
help="Annotation type",
|
||||||
|
)
|
||||||
|
@click.option("--output", "-o", type=click.Path(), help="Output file path")
|
||||||
|
@click.option("--overwrite", is_flag=True, help="Overwrite input file (non-SigMF only)")
|
||||||
|
@click.option("--quiet", is_flag=True, help="Quiet mode")
|
||||||
|
def cusum(input, label, min_duration, window_size, tolerance, annotation_type, output, overwrite, quiet):
|
||||||
|
"""Auto-detect segments using CUSUM method.
|
||||||
|
|
||||||
|
Detects signal state changes (on/off, amplitude transitions). Best for
|
||||||
|
segmenting continuous signals.
|
||||||
|
|
||||||
|
IMPORTANT: Always specify --min-duration to prevent excessive segmentation.
|
||||||
|
|
||||||
|
\b
|
||||||
|
Examples:
|
||||||
|
utils annotate cusum signal.sigmf-data --min-duration 5.0
|
||||||
|
utils annotate cusum data.npy --min-duration 10.0 --label state
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
recording = load_recording(input)
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f"Loaded: {input}")
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to load recording: {e}")
|
||||||
|
|
||||||
|
if not quiet:
|
||||||
|
click.echo("\nDetecting segments using CUSUM...")
|
||||||
|
click.echo(f" Min duration: {min_duration} ms")
|
||||||
|
if window_size != 1:
|
||||||
|
click.echo(f" Window size: {window_size} samples")
|
||||||
|
|
||||||
|
try:
|
||||||
|
initial_count = len(recording.annotations)
|
||||||
|
recording = annotate_with_cusum(
|
||||||
|
recording,
|
||||||
|
label=label,
|
||||||
|
window_size=window_size,
|
||||||
|
min_duration=min_duration,
|
||||||
|
tolerance=tolerance,
|
||||||
|
annotation_type=annotation_type,
|
||||||
|
)
|
||||||
|
added = len(recording.annotations) - initial_count
|
||||||
|
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f" ✓ Added {added} annotation(s)")
|
||||||
|
|
||||||
|
save_recording_auto(recording, output, input, quiet, overwrite)
|
||||||
|
if not quiet:
|
||||||
|
click.echo(" ✓ Saved")
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"CUSUM detection failed: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Threshold detection subcommand
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@annotate.command()
|
||||||
|
@click.argument("input", type=click.Path(exists=True))
|
||||||
|
@click.option("--threshold", type=float, required=True, help="Threshold (0.0-1.0, fraction of max magnitude)")
|
||||||
|
@click.option("--label", type=str, default="signal", help="Annotation label")
|
||||||
|
@click.option("--window-size", type=int, default=1024, help="Smoothing window size")
|
||||||
|
@click.option(
|
||||||
|
"--type",
|
||||||
|
"annotation_type",
|
||||||
|
type=click.Choice(["standalone", "parallel", "intersection"]),
|
||||||
|
default="standalone",
|
||||||
|
help="Annotation type",
|
||||||
|
)
|
||||||
|
@click.option("--output", "-o", type=click.Path(), help="Output file path")
|
||||||
|
@click.option("--overwrite", is_flag=True, help="Overwrite input file (non-SigMF only)")
|
||||||
|
@click.option("--quiet", is_flag=True, help="Quiet mode")
|
||||||
|
def threshold(input, threshold, label, window_size, annotation_type, output, overwrite, quiet):
|
||||||
|
"""Auto-detect signals using threshold method.
|
||||||
|
|
||||||
|
Detects samples above a percentage of maximum magnitude. Best for simple
|
||||||
|
power-based detection.
|
||||||
|
|
||||||
|
\b
|
||||||
|
Examples:
|
||||||
|
utils annotate threshold signal.sigmf-data --threshold 0.7 --label wifi
|
||||||
|
utils annotate threshold data.npy --threshold 0.5 --window-size 2048
|
||||||
|
"""
|
||||||
|
if not (0.0 <= threshold <= 1.0):
|
||||||
|
raise click.ClickException(f"--threshold must be between 0.0 and 1.0, got {threshold}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
recording = load_recording(input)
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f"Loaded: {input}")
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to load recording: {e}")
|
||||||
|
|
||||||
|
if not quiet:
|
||||||
|
click.echo("\nDetecting signals using threshold qualifier...")
|
||||||
|
click.echo(f" Threshold: {threshold * 100:.1f}% of max magnitude")
|
||||||
|
click.echo(f" Window size: {window_size} samples")
|
||||||
|
|
||||||
|
try:
|
||||||
|
initial_count = len(recording.annotations)
|
||||||
|
recording = threshold_qualifier(
|
||||||
|
recording,
|
||||||
|
threshold=threshold,
|
||||||
|
window_size=window_size,
|
||||||
|
label=label,
|
||||||
|
annotation_type=annotation_type,
|
||||||
|
)
|
||||||
|
added = len(recording.annotations) - initial_count
|
||||||
|
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f" ✓ Added {added} annotation(s)")
|
||||||
|
|
||||||
|
save_recording_auto(recording, output, input, quiet, overwrite)
|
||||||
|
if not quiet:
|
||||||
|
click.echo(" ✓ Saved")
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Threshold detection failed: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Separate subcommand (Phase 2: Parallel signal separation)
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@annotate.command()
|
||||||
|
@click.argument("input", type=click.Path(exists=True))
|
||||||
|
@click.option("--indices", type=str, help="Comma-separated annotation indices to split (default: all)")
|
||||||
|
@click.option("--nfft", type=int, default=65536, help="FFT size for spectral analysis")
|
||||||
|
@click.option("--noise-threshold-db", type=float, help="Noise floor threshold in dB (auto-estimated if not specified)")
|
||||||
|
@click.option("--min-component-bw", type=float, default=50e3, help="Min component bandwidth in Hz")
|
||||||
|
@click.option("--output", "-o", type=click.Path(), help="Output file path")
|
||||||
|
@click.option("--overwrite", is_flag=True, help="Overwrite input file (non-SigMF only)")
|
||||||
|
@click.option("--quiet", is_flag=True, help="Quiet mode")
|
||||||
|
@click.option("--verbose", is_flag=True, help="Verbose output (show detected components)")
|
||||||
|
def separate(input, indices, nfft, noise_threshold_db, min_component_bw, output, overwrite, quiet, verbose):
|
||||||
|
"""Split annotations by frequency components (Phase 2).
|
||||||
|
|
||||||
|
Detects multiple frequency components within single annotations and splits
|
||||||
|
them into separate annotations. Uses spectral peak detection with dual
|
||||||
|
bandwidth estimation.
|
||||||
|
|
||||||
|
\b
|
||||||
|
Key Features:
|
||||||
|
- Spectral peak detection for frequency components
|
||||||
|
- Auto noise floor estimation (or user-specified)
|
||||||
|
- Dual bandwidth estimation: -3dB primary, cumulative power fallback
|
||||||
|
- Handles narrowband and wide signals (OFDM)
|
||||||
|
|
||||||
|
\b
|
||||||
|
Examples:
|
||||||
|
utils annotate separate capture.sigmf-data
|
||||||
|
utils annotate separate signal.npy --indices 0,1,2
|
||||||
|
utils annotate separate data.sigmf-data --noise-threshold-db -70
|
||||||
|
utils annotate separate signal.npy --min-component-bw 100000
|
||||||
|
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
recording = load_recording(input)
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f"Loaded: {input}")
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to load recording: {e}")
|
||||||
|
|
||||||
|
# Parse indices if specified
|
||||||
|
indices_list = get_indices_list(indices=indices, recording=recording)
|
||||||
|
|
||||||
|
if len(recording.annotations) == 0:
|
||||||
|
if not quiet:
|
||||||
|
click.echo("No annotations to split")
|
||||||
|
return
|
||||||
|
|
||||||
|
if not quiet:
|
||||||
|
click.echo("\nSplitting annotations by frequency components...")
|
||||||
|
click.echo(f" Input annotations: {len(recording.annotations)}")
|
||||||
|
if indices_list:
|
||||||
|
click.echo(f" Splitting indices: {indices_list}")
|
||||||
|
click.echo(f" FFT size: {nfft}")
|
||||||
|
if noise_threshold_db is not None:
|
||||||
|
click.echo(f" Noise threshold: {noise_threshold_db} dB")
|
||||||
|
else:
|
||||||
|
click.echo(" Noise threshold: auto-estimated")
|
||||||
|
click.echo(f" Min component BW: {format_frequency(min_component_bw)}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
initial_count = len(recording.annotations)
|
||||||
|
|
||||||
|
recording = split_recording_annotations(
|
||||||
|
recording,
|
||||||
|
indices=indices_list,
|
||||||
|
nfft=nfft,
|
||||||
|
noise_threshold_db=noise_threshold_db,
|
||||||
|
min_component_bw=min_component_bw,
|
||||||
|
)
|
||||||
|
|
||||||
|
final_count = len(recording.annotations)
|
||||||
|
added = final_count - initial_count
|
||||||
|
|
||||||
|
if not quiet:
|
||||||
|
click.echo(f" ✓ Output annotations: {final_count} ({'+' if added >= 0 else ''}{added} change)")
|
||||||
|
if verbose and added > 0:
|
||||||
|
click.echo("\n Details:")
|
||||||
|
for i in range(initial_count, final_count):
|
||||||
|
ann = recording.annotations[i]
|
||||||
|
freq_range = f"{format_frequency(ann.freq_lower_edge)} - {format_frequency(ann.freq_upper_edge)}"
|
||||||
|
click.echo(
|
||||||
|
f" [{i}] samples {format_sample_count(ann.sample_start)}-"
|
||||||
|
f"{format_sample_count(ann.sample_start + ann.sample_count)}: {freq_range}"
|
||||||
|
)
|
||||||
|
|
||||||
|
save_recording_auto(recording, output, input, quiet, overwrite)
|
||||||
|
if not quiet:
|
||||||
|
click.echo(" ✓ Saved")
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Spectral separation failed: {e}")
|
||||||
413
ria_toolkit_oss_cli/ria_toolkit_oss/capture.py
Normal file
413
ria_toolkit_oss_cli/ria_toolkit_oss/capture.py
Normal file
|
|
@ -0,0 +1,413 @@
|
||||||
|
"""Capture command for SDR devices."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import click
|
||||||
|
|
||||||
|
from utils.io import to_blue, to_npy, to_sigmf, to_wav
|
||||||
|
from utils.io.recording import generate_filename
|
||||||
|
from utils.view.view_signal_simple import view_simple_sig
|
||||||
|
|
||||||
|
from .common import (
|
||||||
|
echo_progress,
|
||||||
|
echo_verbose,
|
||||||
|
format_frequency,
|
||||||
|
format_sample_rate,
|
||||||
|
get_sdr_device,
|
||||||
|
load_yaml_config,
|
||||||
|
parse_frequency,
|
||||||
|
parse_metadata_args,
|
||||||
|
)
|
||||||
|
from .config import load_user_config
|
||||||
|
from .discover import (
|
||||||
|
find_bladerf_devices,
|
||||||
|
find_hackrf_devices,
|
||||||
|
find_pluto_devices,
|
||||||
|
find_rtlsdr_devices,
|
||||||
|
find_thinkrf_devices,
|
||||||
|
find_uhd_devices,
|
||||||
|
load_sdr_drivers,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def list_all_devices():
|
||||||
|
# Load drivers and collect all devices
|
||||||
|
load_sdr_drivers(verbose=False)
|
||||||
|
|
||||||
|
all_devices = []
|
||||||
|
all_devices.extend(find_uhd_devices())
|
||||||
|
all_devices.extend(find_pluto_devices())
|
||||||
|
all_devices.extend(find_hackrf_devices())
|
||||||
|
all_devices.extend(find_bladerf_devices())
|
||||||
|
all_devices.extend(find_rtlsdr_devices())
|
||||||
|
all_devices.extend(find_thinkrf_devices())
|
||||||
|
|
||||||
|
return all_devices
|
||||||
|
|
||||||
|
|
||||||
|
def auto_select_device(quiet: bool = False) -> str:
|
||||||
|
"""Auto-select device if only one is connected.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
quiet: Suppress warning messages
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Device type string
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
click.ClickException: If no devices or multiple devices found
|
||||||
|
"""
|
||||||
|
all_devices = list_all_devices()
|
||||||
|
|
||||||
|
if len(all_devices) == 0:
|
||||||
|
raise click.ClickException("No SDR devices found.\n" "Run 'utils discover' to see available devices.")
|
||||||
|
|
||||||
|
elif len(all_devices) == 1:
|
||||||
|
device = all_devices[0]
|
||||||
|
device_type = device.get("type", "Unknown").lower().replace("-", "").replace(" ", "")
|
||||||
|
|
||||||
|
# Map device type names to internal names
|
||||||
|
type_map = {
|
||||||
|
"plutosdr": "pluto",
|
||||||
|
"hackrf": "hackrf",
|
||||||
|
"hackrfone": "hackrf",
|
||||||
|
"bladerf": "bladerf",
|
||||||
|
"usrp": "usrp",
|
||||||
|
"b200": "usrp",
|
||||||
|
"b210": "usrp",
|
||||||
|
"rtlsdr": "rtlsdr",
|
||||||
|
"thinkrf": "thinkrf",
|
||||||
|
}
|
||||||
|
|
||||||
|
device_type = type_map.get(device_type, device_type)
|
||||||
|
|
||||||
|
if not quiet:
|
||||||
|
click.echo(
|
||||||
|
click.style("Warning: ", fg="yellow")
|
||||||
|
+ f"No device specified. Auto-detected {device.get('type', 'Unknown')}",
|
||||||
|
err=True,
|
||||||
|
)
|
||||||
|
click.echo(f"Use --device {device_type} to suppress this warning.\n", err=True)
|
||||||
|
|
||||||
|
return device_type
|
||||||
|
|
||||||
|
else:
|
||||||
|
device_list = "\n".join(f" - {d.get('type', 'Unknown')}" for d in all_devices)
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Multiple devices found. Specify with --device\n\n"
|
||||||
|
f"Available devices:\n{device_list}\n\n"
|
||||||
|
f"Run 'utils discover' for more details."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_metadata_dict(config, metadata):
|
||||||
|
# Parse metadata - start with user config defaults
|
||||||
|
metadata_dict = config.get("metadata", {})
|
||||||
|
|
||||||
|
# Load user config and apply defaults
|
||||||
|
user_config = load_user_config()
|
||||||
|
|
||||||
|
# Apply user config metadata (if user config exists)
|
||||||
|
if user_config:
|
||||||
|
# Add standard metadata fields from config
|
||||||
|
for key in ["author", "organization", "project", "location", "testbed"]:
|
||||||
|
if key in user_config and key not in metadata_dict:
|
||||||
|
metadata_dict[key] = user_config[key]
|
||||||
|
|
||||||
|
# Add SigMF fields from config
|
||||||
|
if "sigmf" in user_config:
|
||||||
|
sigmf = user_config["sigmf"]
|
||||||
|
for key in ["license", "hw", "dataset"]:
|
||||||
|
if key in sigmf and key not in metadata_dict:
|
||||||
|
metadata_dict[key] = sigmf[key]
|
||||||
|
|
||||||
|
# CLI metadata overrides everything
|
||||||
|
if metadata:
|
||||||
|
metadata_dict.update(parse_metadata_args(metadata))
|
||||||
|
|
||||||
|
return metadata_dict
|
||||||
|
|
||||||
|
|
||||||
|
def save_visualization(recording, output_file: str, quiet: bool = False):
|
||||||
|
"""Save visualization of recording.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
recording: Recording object
|
||||||
|
output_file: Path to save visualization (PNG)
|
||||||
|
quiet: Suppress progress messages
|
||||||
|
"""
|
||||||
|
# Generate image filename matching recording filename
|
||||||
|
base_name = os.path.splitext(output_file)[0]
|
||||||
|
if output_file.endswith(".sigmf-data"):
|
||||||
|
base_name = output_file.replace(".sigmf-data", "")
|
||||||
|
output_file = base_name + ".png"
|
||||||
|
|
||||||
|
try:
|
||||||
|
echo_progress(f"Generating visualization: {output_file}", quiet)
|
||||||
|
view_simple_sig(recording, output_path=output_file, saveplot=True, fast_mode=False, labels_mode=True)
|
||||||
|
except ImportError as e:
|
||||||
|
click.echo(click.style("Warning: ", fg="yellow") + f"Could not save visualization: {e}", err=True)
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(click.style("Warning: ", fg="yellow") + f"Failed to save visualization: {e}", err=True)
|
||||||
|
|
||||||
|
|
||||||
|
def select_params(device, sample_rate, gain, bandwidth, quiet, verbose):
|
||||||
|
# Auto-select device if not specified
|
||||||
|
if device is None:
|
||||||
|
device = auto_select_device(quiet)
|
||||||
|
|
||||||
|
# Apply device-specific defaults (matching signal-testbed)
|
||||||
|
if sample_rate is None:
|
||||||
|
# Sample rate defaults based on signal-testbed hardware limits
|
||||||
|
device_sample_rates = {
|
||||||
|
"rtlsdr": 2.4e6, # RTL-SDR max is 3.2 MHz, use 2.4 MHz safe default
|
||||||
|
"thinkrf": 31.25e6, # ThinkRF decimation 4 (from 125 MS/s)
|
||||||
|
"pluto": 20e6, # PlutoSDR up to 61 MHz, 20 MHz safe
|
||||||
|
"hackrf": 20e6, # HackRF up to 20 MHz
|
||||||
|
"bladerf": 40e6, # BladeRF up to 61 MHz, 40 MHz safe
|
||||||
|
"usrp": 50e6, # USRP up to 200 MHz, 50 MHz default from signal-testbed
|
||||||
|
}
|
||||||
|
sample_rate = device_sample_rates.get(device, 20e6)
|
||||||
|
|
||||||
|
if gain is None:
|
||||||
|
# RX gain defaults (matching signal-testbed's 32 dB baseline, adjusted per device)
|
||||||
|
default_gains = {
|
||||||
|
"pluto": 32,
|
||||||
|
"hackrf": 32,
|
||||||
|
"bladerf": 32,
|
||||||
|
"usrp": 32,
|
||||||
|
"rtlsdr": 32, # RTL-SDR will auto-select closest valid gain
|
||||||
|
"thinkrf": 0, # ThinkRF uses attenuation, 0 = no attenuation
|
||||||
|
}
|
||||||
|
gain = default_gains.get(device, 32)
|
||||||
|
echo_verbose(f"Using default RX gain: {gain} dB for {device}", verbose)
|
||||||
|
|
||||||
|
if bandwidth is None:
|
||||||
|
# Bandwidth defaults (match sample rate for most devices)
|
||||||
|
device_bandwidths = {
|
||||||
|
"rtlsdr": None, # RTL-SDR doesn't support bandwidth setting
|
||||||
|
"thinkrf": None, # ThinkRF manages bandwidth internally
|
||||||
|
"pluto": sample_rate,
|
||||||
|
"hackrf": sample_rate,
|
||||||
|
"bladerf": sample_rate,
|
||||||
|
"usrp": sample_rate,
|
||||||
|
}
|
||||||
|
bandwidth = device_bandwidths.get(device)
|
||||||
|
|
||||||
|
return device, sample_rate, gain, bandwidth
|
||||||
|
|
||||||
|
|
||||||
|
def determine_output_format(output, output_format, output_dir):
|
||||||
|
# Determine output format and save
|
||||||
|
# If output specified, parse directory and filename
|
||||||
|
if output:
|
||||||
|
# Auto-detect format from extension if not specified
|
||||||
|
if output_format is None:
|
||||||
|
ext = os.path.splitext(output)[1].lower().lstrip(".")
|
||||||
|
if ext in ["sigmf", "sigmf-data"]:
|
||||||
|
output_format = "sigmf"
|
||||||
|
elif ext == "npy":
|
||||||
|
output_format = "npy"
|
||||||
|
elif ext == "wav":
|
||||||
|
output_format = "wav"
|
||||||
|
elif ext == "blue":
|
||||||
|
output_format = "blue"
|
||||||
|
else:
|
||||||
|
# Default to SigMF
|
||||||
|
output_format = "sigmf"
|
||||||
|
|
||||||
|
# Get output directory and filename from provided path
|
||||||
|
output_path_dir = os.path.dirname(output)
|
||||||
|
if output_path_dir:
|
||||||
|
output_dir = output_path_dir
|
||||||
|
output_filename = os.path.basename(output)
|
||||||
|
|
||||||
|
# Remove extension for formats that add it
|
||||||
|
if output_format == "sigmf":
|
||||||
|
output_filename = output_filename.replace(".sigmf-data", "").replace(".sigmf", "")
|
||||||
|
else:
|
||||||
|
# Use auto-generated filename based on timestamp and rec_id
|
||||||
|
output_filename = None # Will be auto-generated by save functions
|
||||||
|
if output_format is None:
|
||||||
|
output_format = "sigmf" # Default format
|
||||||
|
|
||||||
|
return output_format, output_filename, output_dir
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Main command
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.argument("inputs", nargs=-1, required=True, type=click.Path(exists=True))
|
||||||
|
@click.argument("output", nargs=1, required=True, type=click.Path())
|
||||||
|
@click.option(
|
||||||
|
"--device",
|
||||||
|
"-d",
|
||||||
|
type=click.Choice(["pluto", "hackrf", "bladerf", "usrp", "rtlsdr", "thinkrf"]),
|
||||||
|
help="Device type",
|
||||||
|
)
|
||||||
|
@click.option("--ident", "-i", help="Device identifier (IP address or name=value, e.g., 192.168.2.1 or name=mypluto)")
|
||||||
|
@click.option(
|
||||||
|
"--config", "-c", "config_file", type=click.Path(exists=True), help="Load parameters from YAML config file"
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--sample-rate", "-s", type=float, default=None, help="Sample rate in Hz (e.g., 2e6) [default: device-specific]"
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--center-frequency",
|
||||||
|
"-f",
|
||||||
|
type=str,
|
||||||
|
default="2440M",
|
||||||
|
show_default=True,
|
||||||
|
help="Center frequency (e.g., 915e6, 2.4G)",
|
||||||
|
)
|
||||||
|
@click.option("--gain", "-g", type=float, help="RX gain in dB [default: device-specific]")
|
||||||
|
@click.option("--bandwidth", "-b", type=float, help="Bandwidth in Hz (if supported) [default: device-specific]")
|
||||||
|
@click.option("--num-samples", "-n", type=int, show_default=True, help="Number of samples to capture")
|
||||||
|
@click.option("--duration", "-t", type=float, help="Duration in seconds (alternative to --num-samples)")
|
||||||
|
@click.option("--output", "-o", help="Output filename (defaults to auto-generated with timestamp)")
|
||||||
|
@click.option("--output-dir", default="recordings", help="Output directory (default: recordings/)")
|
||||||
|
@click.option(
|
||||||
|
"--format",
|
||||||
|
"output_format",
|
||||||
|
type=click.Choice(["npy", "sigmf", "wav", "blue"]),
|
||||||
|
help="Output format (default: sigmf)",
|
||||||
|
)
|
||||||
|
@click.option("--save-image", is_flag=True, help="Save visualization PNG alongside recording")
|
||||||
|
@click.option("--metadata", "-m", multiple=True, help="Add custom metadata (KEY=VALUE)")
|
||||||
|
@click.option("--verbose", "-v", is_flag=True, help="Verbose output")
|
||||||
|
@click.option("--quiet", "-q", is_flag=True, help="Suppress progress output")
|
||||||
|
def capture(
|
||||||
|
device,
|
||||||
|
ident,
|
||||||
|
config_file,
|
||||||
|
sample_rate,
|
||||||
|
center_frequency,
|
||||||
|
gain,
|
||||||
|
bandwidth,
|
||||||
|
num_samples,
|
||||||
|
duration,
|
||||||
|
output,
|
||||||
|
output_dir,
|
||||||
|
output_format,
|
||||||
|
save_image,
|
||||||
|
metadata,
|
||||||
|
verbose,
|
||||||
|
quiet,
|
||||||
|
):
|
||||||
|
"""Capture IQ samples from SDR device and save to file.
|
||||||
|
|
||||||
|
\b
|
||||||
|
Examples:
|
||||||
|
utils capture -d hackrf -s 2e6 -f 2.44e6 -b 2e6
|
||||||
|
utils capture -d pluto -s 1e6 -f 2e9 -b 2e6 -n 50
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Load config file if specified
|
||||||
|
config = {}
|
||||||
|
if config_file:
|
||||||
|
config = load_yaml_config(config_file)
|
||||||
|
echo_verbose(f"Loaded config from: {config_file}", verbose)
|
||||||
|
|
||||||
|
# Command-line args override config file
|
||||||
|
device = device or config.get("device")
|
||||||
|
ident = ident or config.get("ident") or config.get("serial") # Support legacy 'serial' in config
|
||||||
|
sample_rate = sample_rate or config.get("sample_rate")
|
||||||
|
center_frequency = center_frequency or config.get("center_frequency")
|
||||||
|
gain = gain or config.get("gain")
|
||||||
|
bandwidth = bandwidth or config.get("bandwidth")
|
||||||
|
num_samples = num_samples or config.get("num_samples")
|
||||||
|
duration = duration or config.get("duration")
|
||||||
|
output = output or config.get("output")
|
||||||
|
output_format = output_format or config.get("format")
|
||||||
|
|
||||||
|
# Parse metadata
|
||||||
|
metadata_dict = get_metadata_dict(config=config, metadata=metadata)
|
||||||
|
|
||||||
|
# Select parameters
|
||||||
|
device, sample_rate, gain, bandwidth = select_params(
|
||||||
|
device=device, sample_rate=sample_rate, gain=gain, bandwidth=bandwidth, quiet=quiet, verbose=verbose
|
||||||
|
)
|
||||||
|
|
||||||
|
# Parse frequency
|
||||||
|
center_freq_hz = parse_frequency(center_frequency)
|
||||||
|
|
||||||
|
# Calculate num_samples from duration if needed
|
||||||
|
if duration is not None and num_samples is None:
|
||||||
|
num_samples = int(duration * sample_rate)
|
||||||
|
echo_verbose(f"Duration {duration}s = {num_samples} samples at {format_sample_rate(sample_rate)}", verbose)
|
||||||
|
|
||||||
|
# Show capture parameters
|
||||||
|
echo_progress(f"Capturing from {device.upper()}...", quiet)
|
||||||
|
echo_progress(f"Sample rate: {format_sample_rate(sample_rate)}", quiet)
|
||||||
|
echo_progress(f"Center frequency: {format_frequency(center_freq_hz)}", quiet)
|
||||||
|
if gain is not None:
|
||||||
|
echo_progress(f"Gain: {gain} dB", quiet)
|
||||||
|
if bandwidth is not None:
|
||||||
|
echo_progress(f"Bandwidth: {format_sample_rate(bandwidth)}", quiet)
|
||||||
|
|
||||||
|
# Initialize device
|
||||||
|
echo_verbose("Initializing device...", verbose)
|
||||||
|
sdr = get_sdr_device(device, ident)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Initialize RX with parameters
|
||||||
|
echo_verbose("Initializing RX...", verbose)
|
||||||
|
sdr.init_rx(
|
||||||
|
sample_rate=sample_rate, center_frequency=center_freq_hz, gain=gain, channel=0 # Default to channel 0
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set bandwidth if supported (after init_rx)
|
||||||
|
if bandwidth is not None and hasattr(sdr, "set_rx_bandwidth"):
|
||||||
|
sdr.set_rx_bandwidth(bandwidth)
|
||||||
|
|
||||||
|
# Capture
|
||||||
|
echo_progress(f"Capturing {num_samples} samples...", quiet)
|
||||||
|
recording = sdr.record(num_samples=num_samples)
|
||||||
|
|
||||||
|
echo_progress(
|
||||||
|
f"Captured {recording.data.shape[1] if len(recording.data.shape) > 1 else len(recording.data)} samples",
|
||||||
|
quiet,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add custom metadata to recording
|
||||||
|
if metadata_dict:
|
||||||
|
for key, value in metadata_dict.items():
|
||||||
|
recording.update_metadata(key, value)
|
||||||
|
|
||||||
|
output_format, output_filename, output_dir = determine_output_format(
|
||||||
|
output=output, output_format=output_format, output_dir=output_dir
|
||||||
|
)
|
||||||
|
echo_progress(f"Saving to {output_format.upper()} format...", quiet)
|
||||||
|
|
||||||
|
# Save recording (filenames with timestamp auto-generated if output_filename is None)
|
||||||
|
# All to_* functions handle directory creation internally
|
||||||
|
# Note: to_sigmf returns None, others return path
|
||||||
|
if output_format == "sigmf":
|
||||||
|
to_sigmf(recording, filename=output_filename, path=output_dir)
|
||||||
|
# Build path manually since to_sigmf doesn't return it
|
||||||
|
base_name = (
|
||||||
|
os.path.splitext(output_filename)[0] if output_filename else generate_filename(recording=recording)
|
||||||
|
)
|
||||||
|
saved_path = os.path.join(output_dir, f"{base_name}.sigmf-data")
|
||||||
|
elif output_format == "npy":
|
||||||
|
saved_path = to_npy(recording, filename=output_filename, path=output_dir)
|
||||||
|
elif output_format == "wav":
|
||||||
|
saved_path = to_wav(recording, filename=output_filename, path=output_dir)
|
||||||
|
elif output_format == "blue":
|
||||||
|
saved_path = to_blue(recording, filename=output_filename, path=output_dir)
|
||||||
|
|
||||||
|
echo_progress(f"Saved to: {saved_path}", quiet)
|
||||||
|
|
||||||
|
# Save visualization if requested
|
||||||
|
if save_image:
|
||||||
|
save_visualization(recording, saved_path, quiet)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
# Clean up device
|
||||||
|
echo_verbose("Closing device...", verbose)
|
||||||
|
sdr.close()
|
||||||
|
|
||||||
|
echo_progress("Capture complete!", quiet)
|
||||||
494
ria_toolkit_oss_cli/ria_toolkit_oss/combine.py
Normal file
494
ria_toolkit_oss_cli/ria_toolkit_oss/combine.py
Normal file
|
|
@ -0,0 +1,494 @@
|
||||||
|
"""Combine command - Combine multiple recordings into a single file."""
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import click
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from utils.data import Recording
|
||||||
|
from utils.io import from_npy_legacy, load_recording
|
||||||
|
from utils_cli.utils.common import (
|
||||||
|
echo_progress,
|
||||||
|
echo_verbose,
|
||||||
|
format_sample_count,
|
||||||
|
save_recording,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load_recording_list(inputs, legacy, verbose, quiet):
|
||||||
|
recordings = []
|
||||||
|
for input_path in inputs:
|
||||||
|
input_path = Path(input_path)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if legacy:
|
||||||
|
recording = from_npy_legacy(str(input_path))
|
||||||
|
else:
|
||||||
|
recording = load_recording(str(input_path))
|
||||||
|
|
||||||
|
# Store original filename in metadata if not present
|
||||||
|
if "original_file" not in recording._metadata:
|
||||||
|
recording._metadata["original_file"] = input_path.name
|
||||||
|
|
||||||
|
num_samples = recording.data.shape[1]
|
||||||
|
echo_verbose(f" Loading {input_path.name} ({format_sample_count(num_samples)} samples)... Done", verbose)
|
||||||
|
recordings.append(recording)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to load {input_path}: {e}")
|
||||||
|
|
||||||
|
return recordings
|
||||||
|
|
||||||
|
|
||||||
|
def pad(recordings, max_len, verbose):
|
||||||
|
if verbose:
|
||||||
|
click.echo(f"Aligning (zero-pad to {format_sample_count(max_len)} samples)...")
|
||||||
|
aligned = []
|
||||||
|
for i, rec in enumerate(recordings):
|
||||||
|
if rec.data.shape[1] < max_len:
|
||||||
|
pad_width = max_len - rec.data.shape[1]
|
||||||
|
padded = np.pad(rec.data, ((0, 0), (0, pad_width)), mode="constant")
|
||||||
|
if verbose:
|
||||||
|
click.echo(f" Recording {i+1}: +{format_sample_count(pad_width)} zeros at end")
|
||||||
|
aligned.append(padded)
|
||||||
|
else:
|
||||||
|
aligned.append(rec.data)
|
||||||
|
return aligned
|
||||||
|
|
||||||
|
|
||||||
|
def pad_start(recordings, max_len, pad_start_sample, verbose):
|
||||||
|
if verbose:
|
||||||
|
click.echo(f"Aligning (pad-start at sample {format_sample_count(pad_start_sample)})...")
|
||||||
|
aligned = []
|
||||||
|
for i, rec in enumerate(recordings):
|
||||||
|
if rec.data.shape[1] < max_len:
|
||||||
|
pad_before = pad_start_sample
|
||||||
|
pad_after = max_len - rec.data.shape[1] - pad_before
|
||||||
|
if pad_after < 0:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Invalid --pad-start-sample\n"
|
||||||
|
f"Start sample {format_sample_count(pad_start_sample)} with recording length "
|
||||||
|
f"{format_sample_count(rec.data.shape[1])} exceeds max length {format_sample_count(max_len)}"
|
||||||
|
)
|
||||||
|
padded = np.pad(rec.data, ((0, 0), (pad_before, pad_after)), mode="constant")
|
||||||
|
if verbose:
|
||||||
|
click.echo(
|
||||||
|
f" Recording {i+1}: +{format_sample_count(pad_before)} zeros before, "
|
||||||
|
f"+{format_sample_count(pad_after)} zeros after"
|
||||||
|
)
|
||||||
|
aligned.append(padded)
|
||||||
|
else:
|
||||||
|
aligned.append(rec.data)
|
||||||
|
return aligned
|
||||||
|
|
||||||
|
|
||||||
|
def pad_center(recordings, max_len, verbose):
|
||||||
|
if verbose:
|
||||||
|
click.echo(f"Aligning (pad-center in {format_sample_count(max_len)} samples)...")
|
||||||
|
aligned = []
|
||||||
|
for i, rec in enumerate(recordings):
|
||||||
|
if rec.data.shape[1] < max_len:
|
||||||
|
total_pad = max_len - rec.data.shape[1]
|
||||||
|
pad_before = total_pad // 2
|
||||||
|
pad_after = total_pad - pad_before
|
||||||
|
padded = np.pad(rec.data, ((0, 0), (pad_before, pad_after)), mode="constant")
|
||||||
|
if verbose:
|
||||||
|
click.echo(
|
||||||
|
f" Recording {i+1}: +{format_sample_count(pad_before)} zeros before, "
|
||||||
|
f"+{format_sample_count(pad_after)} zeros after"
|
||||||
|
)
|
||||||
|
aligned.append(padded)
|
||||||
|
else:
|
||||||
|
aligned.append(rec.data)
|
||||||
|
return aligned
|
||||||
|
|
||||||
|
|
||||||
|
def pad_end(recordings, max_len, verbose):
|
||||||
|
if verbose:
|
||||||
|
click.echo(f"Aligning (pad-end, align to {format_sample_count(max_len)} samples)...")
|
||||||
|
aligned = []
|
||||||
|
for i, rec in enumerate(recordings):
|
||||||
|
if rec.data.shape[1] < max_len:
|
||||||
|
pad_width = max_len - rec.data.shape[1]
|
||||||
|
padded = np.pad(rec.data, ((0, 0), (pad_width, 0)), mode="constant")
|
||||||
|
if verbose:
|
||||||
|
click.echo(f" Recording {i+1}: +{format_sample_count(pad_width)} zeros at beginning")
|
||||||
|
aligned.append(padded)
|
||||||
|
else:
|
||||||
|
aligned.append(rec.data)
|
||||||
|
return aligned
|
||||||
|
|
||||||
|
|
||||||
|
def repeat(recordings, max_len, verbose):
|
||||||
|
if verbose:
|
||||||
|
click.echo(f"Aligning (repeat pattern to match {format_sample_count(max_len)} samples)...")
|
||||||
|
aligned = []
|
||||||
|
for i, rec in enumerate(recordings):
|
||||||
|
if rec.data.shape[1] < max_len:
|
||||||
|
n_repeats = int(np.ceil(max_len / rec.data.shape[1]))
|
||||||
|
repeated = np.tile(rec.data, (1, n_repeats))
|
||||||
|
truncated = repeated[:, :max_len]
|
||||||
|
if verbose:
|
||||||
|
click.echo(
|
||||||
|
f" Recording {i+1}: repeated {n_repeats} times, "
|
||||||
|
f"truncated to {format_sample_count(max_len)} samples"
|
||||||
|
)
|
||||||
|
aligned.append(truncated)
|
||||||
|
else:
|
||||||
|
aligned.append(rec.data)
|
||||||
|
return aligned
|
||||||
|
|
||||||
|
|
||||||
|
def repeat_spaced(recordings, max_len, repeat_spacing, verbose):
|
||||||
|
if repeat_spacing <= 0:
|
||||||
|
raise click.ClickException("Error: --align-mode repeat-spaced requires --repeat-spacing SAMPLES (must be > 0)")
|
||||||
|
if verbose:
|
||||||
|
click.echo(f"Aligning (repeat with {format_sample_count(repeat_spacing)} sample spacing)...")
|
||||||
|
|
||||||
|
aligned = []
|
||||||
|
for i, rec in enumerate(recordings):
|
||||||
|
if rec.data.shape[1] < max_len:
|
||||||
|
result = np.zeros((rec.data.shape[0], max_len), dtype=rec.data.dtype)
|
||||||
|
pattern_len = rec.data.shape[1]
|
||||||
|
pos = 0
|
||||||
|
repetitions = 0
|
||||||
|
while pos < max_len:
|
||||||
|
end = min(pos + pattern_len, max_len)
|
||||||
|
result[:, pos:end] = rec.data[:, : end - pos]
|
||||||
|
repetitions += 1
|
||||||
|
pos = end + repeat_spacing
|
||||||
|
if verbose:
|
||||||
|
click.echo(
|
||||||
|
f" Recording {i+1}: {repetitions} repetitions "
|
||||||
|
f"({format_sample_count(pattern_len)} samples + {format_sample_count(repeat_spacing)} spacing)"
|
||||||
|
)
|
||||||
|
aligned.append(result)
|
||||||
|
else:
|
||||||
|
aligned.append(rec.data)
|
||||||
|
return aligned
|
||||||
|
|
||||||
|
|
||||||
|
def align_for_add(recordings, align_mode, pad_start_sample=0, repeat_spacing=0, verbose=False):
|
||||||
|
"""Align recordings for add mode based on alignment strategy.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
recordings: List of Recording objects
|
||||||
|
align_mode: Alignment mode string
|
||||||
|
pad_start_sample: Sample offset for pad-start mode
|
||||||
|
repeat_spacing: Spacing between repetitions for repeat-spaced mode
|
||||||
|
verbose: Verbose output
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of aligned numpy arrays
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
click.ClickException: If alignment fails or is invalid
|
||||||
|
"""
|
||||||
|
lengths = [rec.data.shape[1] for rec in recordings]
|
||||||
|
max_len = max(lengths)
|
||||||
|
min_len = min(lengths)
|
||||||
|
|
||||||
|
# All same length, no alignment needed
|
||||||
|
if len(set(lengths)) == 1:
|
||||||
|
if verbose:
|
||||||
|
click.echo(f" All recordings same length ({format_sample_count(max_len)} samples)")
|
||||||
|
return [rec.data for rec in recordings]
|
||||||
|
|
||||||
|
if align_mode == "error":
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Recordings have different lengths: {[format_sample_count(len) for len in lengths]}\n"
|
||||||
|
f"Use --align-mode to specify alignment strategy:\n"
|
||||||
|
f" --align-mode truncate (use shortest: {format_sample_count(min_len)} samples)\n"
|
||||||
|
f" --align-mode pad (zero-pad to longest: {format_sample_count(max_len)} samples)\n"
|
||||||
|
f" --align-mode pad-center (center shorter in longer)\n"
|
||||||
|
f" --align-mode pad-end (align end of recordings)\n"
|
||||||
|
f" --align-mode repeat (repeat shorter to match longest)"
|
||||||
|
)
|
||||||
|
|
||||||
|
elif align_mode == "truncate":
|
||||||
|
if verbose:
|
||||||
|
click.echo(f"Aligning (truncate to {format_sample_count(min_len)} samples)...")
|
||||||
|
for i, rec in enumerate(recordings):
|
||||||
|
if rec.data.shape[1] > min_len:
|
||||||
|
click.echo(f" Recording {i+1}: truncated from {format_sample_count(rec.data.shape[1])} samples")
|
||||||
|
return [rec.data[:, :min_len] for rec in recordings]
|
||||||
|
|
||||||
|
elif align_mode == "pad":
|
||||||
|
return pad(recordings, max_len, verbose)
|
||||||
|
|
||||||
|
elif align_mode == "pad-start":
|
||||||
|
return pad_start(recordings, max_len, pad_start_sample, verbose)
|
||||||
|
|
||||||
|
elif align_mode == "pad-center":
|
||||||
|
return pad_center(recordings, max_len, verbose)
|
||||||
|
|
||||||
|
elif align_mode == "pad-end":
|
||||||
|
return pad_end(recordings, max_len, verbose)
|
||||||
|
|
||||||
|
elif align_mode == "repeat":
|
||||||
|
return repeat(recordings, max_len, verbose)
|
||||||
|
|
||||||
|
elif align_mode == "repeat-spaced":
|
||||||
|
return repeat_spaced(recordings, max_len, repeat_spacing, verbose)
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise click.ClickException(f"Unknown alignment mode: {align_mode}")
|
||||||
|
|
||||||
|
|
||||||
|
def concat_recordings(recordings, verbose=False):
|
||||||
|
"""Concatenate recordings end-to-end.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
recordings: List of Recording objects
|
||||||
|
verbose: Verbose output
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Recording: Combined recording
|
||||||
|
"""
|
||||||
|
if verbose:
|
||||||
|
click.echo("Concatenating...")
|
||||||
|
|
||||||
|
# Concatenate data
|
||||||
|
combined_data = np.concatenate([r.data for r in recordings], axis=1)
|
||||||
|
|
||||||
|
# Merge annotations with adjusted indices
|
||||||
|
combined_annotations = []
|
||||||
|
offset = 0
|
||||||
|
for rec in recordings:
|
||||||
|
for ann in rec._annotations:
|
||||||
|
new_ann = copy.deepcopy(ann)
|
||||||
|
new_ann.sample_start += offset
|
||||||
|
combined_annotations.append(new_ann)
|
||||||
|
offset += rec.data.shape[1]
|
||||||
|
|
||||||
|
# Use metadata from first recording
|
||||||
|
combined_metadata = recordings[0]._metadata.copy()
|
||||||
|
combined_metadata["combined_from"] = [rec._metadata.get("original_file", "unknown") for rec in recordings]
|
||||||
|
combined_metadata["combine_mode"] = "concat"
|
||||||
|
combined_metadata["num_inputs"] = len(recordings)
|
||||||
|
combined_metadata["combine_timestamp"] = time.time()
|
||||||
|
|
||||||
|
# Create combined recording
|
||||||
|
result = Recording(data=combined_data, metadata=combined_metadata)
|
||||||
|
result._annotations = combined_annotations
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
click.echo(f"Total: {format_sample_count(combined_data.shape[1])} samples")
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def add_recordings(recordings, align_mode="error", pad_start_sample=0, repeat_spacing=0, verbose=False):
|
||||||
|
"""Add/mix recordings sample-by-sample.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
recordings: List of Recording objects
|
||||||
|
align_mode: Alignment mode for different-length recordings
|
||||||
|
pad_start_sample: Sample offset for pad-start mode
|
||||||
|
repeat_spacing: Spacing for repeat-spaced mode
|
||||||
|
verbose: Verbose output
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Recording: Combined recording
|
||||||
|
"""
|
||||||
|
# Align recordings
|
||||||
|
aligned_data = align_for_add(
|
||||||
|
recordings, align_mode, pad_start_sample=pad_start_sample, repeat_spacing=repeat_spacing, verbose=verbose
|
||||||
|
)
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
click.echo("Adding signals...")
|
||||||
|
|
||||||
|
# Add all signals
|
||||||
|
combined_data = sum(aligned_data)
|
||||||
|
|
||||||
|
# Keep first recording's annotations only
|
||||||
|
combined_metadata = recordings[0]._metadata.copy()
|
||||||
|
combined_metadata["combined_from"] = [rec._metadata.get("original_file", "unknown") for rec in recordings]
|
||||||
|
combined_metadata["combine_mode"] = "add"
|
||||||
|
combined_metadata["align_mode"] = align_mode
|
||||||
|
combined_metadata["num_inputs"] = len(recordings)
|
||||||
|
combined_metadata["combine_timestamp"] = time.time()
|
||||||
|
|
||||||
|
# Warn if other recordings had annotations
|
||||||
|
if any(len(rec._annotations) > 0 for rec in recordings[1:]):
|
||||||
|
click.echo("Warning: Only first recording's annotations preserved (others discarded in add mode)", err=True)
|
||||||
|
|
||||||
|
# Create combined recording
|
||||||
|
result = Recording(data=combined_data, metadata=combined_metadata)
|
||||||
|
result._annotations = recordings[0]._annotations.copy()
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
click.echo(f"Total: {format_sample_count(combined_data.shape[1])} samples")
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.argument("inputs", nargs=-1, required=True, type=click.Path(exists=True))
|
||||||
|
@click.argument("output", nargs=1, required=True, type=click.Path())
|
||||||
|
@click.option(
|
||||||
|
"--mode",
|
||||||
|
type=click.Choice(["concat", "add"], case_sensitive=False),
|
||||||
|
default="concat",
|
||||||
|
help="Combination mode (default: concat)",
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--align-mode",
|
||||||
|
type=click.Choice(
|
||||||
|
["error", "truncate", "pad", "pad-start", "pad-center", "pad-end", "repeat", "repeat-spaced"],
|
||||||
|
case_sensitive=False,
|
||||||
|
),
|
||||||
|
default="error",
|
||||||
|
help="Add mode alignment strategy (default: error)",
|
||||||
|
)
|
||||||
|
@click.option("--pad-start-sample", type=int, default=0, metavar="N", help="Sample offset for pad-start mode")
|
||||||
|
@click.option(
|
||||||
|
"--repeat-spacing",
|
||||||
|
type=int,
|
||||||
|
default=0,
|
||||||
|
metavar="SAMPLES",
|
||||||
|
help="Spacing between repetitions for repeat-spaced mode",
|
||||||
|
)
|
||||||
|
@click.option("--legacy", is_flag=True, help="Load inputs as legacy NPY format")
|
||||||
|
@click.option("--normalize", is_flag=True, help="Normalize after combining")
|
||||||
|
@click.option(
|
||||||
|
"--output-format",
|
||||||
|
type=click.Choice(["sigmf", "npy", "wav", "blue"], case_sensitive=False),
|
||||||
|
help="Force output format",
|
||||||
|
)
|
||||||
|
@click.option("--overwrite", is_flag=True, help="Overwrite existing output file")
|
||||||
|
@click.option(
|
||||||
|
"--metadata", multiple=True, metavar="KEY=VALUE", help="Add custom metadata (can be used multiple times)"
|
||||||
|
)
|
||||||
|
@click.option("--verbose", is_flag=True, help="Verbose output")
|
||||||
|
@click.option("--quiet", is_flag=True, help="Suppress output")
|
||||||
|
def combine(
|
||||||
|
inputs,
|
||||||
|
output,
|
||||||
|
mode,
|
||||||
|
align_mode,
|
||||||
|
pad_start_sample,
|
||||||
|
repeat_spacing,
|
||||||
|
legacy,
|
||||||
|
normalize,
|
||||||
|
output_format,
|
||||||
|
overwrite,
|
||||||
|
metadata,
|
||||||
|
verbose,
|
||||||
|
quiet,
|
||||||
|
):
|
||||||
|
"""Combine multiple recordings into a single file.
|
||||||
|
|
||||||
|
\b
|
||||||
|
INPUTS Input recording files (2 or more)
|
||||||
|
OUTPUT Output filename
|
||||||
|
|
||||||
|
\b
|
||||||
|
Modes:
|
||||||
|
concat Concatenate recordings end-to-end (default)
|
||||||
|
add Add signals sample-by-sample (mix/superimpose)
|
||||||
|
|
||||||
|
\b
|
||||||
|
Examples:
|
||||||
|
# Concatenate recordings
|
||||||
|
utils combine chunk1.npy chunk2.npy chunk3.npy full.npy
|
||||||
|
\b
|
||||||
|
# Add signal and noise
|
||||||
|
utils combine signal.npy noise.npy noisy.npy --mode add\n
|
||||||
|
\b
|
||||||
|
# Add with center alignment
|
||||||
|
utils combine long.npy short.npy output.npy --mode add --align-mode pad-center\n
|
||||||
|
\b
|
||||||
|
# Repeat pattern with spacing
|
||||||
|
utils combine signal.npy pattern.npy output.npy --mode add --align-mode repeat-spaced --repeat-spacing 10000
|
||||||
|
"""
|
||||||
|
# Validate inputs
|
||||||
|
if len(inputs) < 2:
|
||||||
|
raise click.ClickException(
|
||||||
|
"Error: At least 2 input files required\n" "Usage: utils combine INPUT1 INPUT2 [INPUT3 ...] OUTPUT"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Special case: single input (though we require 2+ above, this handles edge case)
|
||||||
|
if len(inputs) == 1:
|
||||||
|
echo_progress("Warning: Only one input file specified", quiet)
|
||||||
|
echo_progress("Nothing to combine. Copying to output...", quiet)
|
||||||
|
|
||||||
|
mode = mode.lower()
|
||||||
|
align_mode = align_mode.lower()
|
||||||
|
|
||||||
|
# Load recordings
|
||||||
|
align_str = ", " + align_mode + " alignment" if mode == "add" and align_mode != "error" else ""
|
||||||
|
echo_progress(
|
||||||
|
f"Combining {len(inputs)} recordings ({mode} mode{align_str})...",
|
||||||
|
quiet,
|
||||||
|
)
|
||||||
|
recordings = load_recording_list(inputs, legacy, verbose, quiet)
|
||||||
|
|
||||||
|
# Validate for empty recordings
|
||||||
|
for i, rec in enumerate(recordings):
|
||||||
|
if rec.data.shape[1] == 0:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Error: Input file '{inputs[i]}' has 0 samples\n" "Cannot combine empty recordings"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Validate for add mode
|
||||||
|
if mode == "add":
|
||||||
|
# Check sample rates match
|
||||||
|
sample_rates = [rec._metadata.get("sample_rate") for rec in recordings]
|
||||||
|
sample_rates = [sr for sr in sample_rates if sr is not None]
|
||||||
|
if len(sample_rates) > 1 and len(set(sample_rates)) > 1:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Error: Recordings have different sample rates (add mode)\n"
|
||||||
|
f"Sample rates: {sample_rates}\n"
|
||||||
|
"All recordings must have matching sample rates for add mode"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check channel counts match
|
||||||
|
channel_counts = [rec.data.shape[0] for rec in recordings]
|
||||||
|
if len(set(channel_counts)) > 1:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Error: Recordings have different channel counts\n"
|
||||||
|
f"Channels: {channel_counts}\n"
|
||||||
|
"All recordings must have same number of channels"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Combine recordings
|
||||||
|
if mode == "concat":
|
||||||
|
combined = concat_recordings(recordings, verbose=verbose)
|
||||||
|
elif mode == "add":
|
||||||
|
combined = add_recordings(
|
||||||
|
recordings,
|
||||||
|
align_mode=align_mode,
|
||||||
|
pad_start_sample=pad_start_sample,
|
||||||
|
repeat_spacing=repeat_spacing,
|
||||||
|
verbose=verbose,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise click.ClickException(f"Unknown mode: {mode}")
|
||||||
|
|
||||||
|
# Add custom metadata
|
||||||
|
for meta_item in metadata:
|
||||||
|
if "=" not in meta_item:
|
||||||
|
raise click.ClickException(f"Invalid metadata format: {meta_item} (expected KEY=VALUE)")
|
||||||
|
key, value = meta_item.split("=", 1)
|
||||||
|
combined.update_metadata(key, value)
|
||||||
|
|
||||||
|
# Normalize if requested
|
||||||
|
if normalize:
|
||||||
|
echo_verbose("Normalizing...", verbose)
|
||||||
|
combined = combined.normalize()
|
||||||
|
combined.update_metadata("normalized", True)
|
||||||
|
|
||||||
|
# Save output
|
||||||
|
try:
|
||||||
|
save_recording(combined, output, output_format=output_format, overwrite=overwrite, verbose=verbose)
|
||||||
|
echo_progress(f"Saved to: {output}", quiet)
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to save output: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
combine()
|
||||||
26
ria_toolkit_oss_cli/ria_toolkit_oss/commands.py
Normal file
26
ria_toolkit_oss_cli/ria_toolkit_oss/commands.py
Normal file
|
|
@ -0,0 +1,26 @@
|
||||||
|
# flake8: noqa: F401
|
||||||
|
"""
|
||||||
|
This module contains all the CLI bindings for the utils package.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .annotate import annotate
|
||||||
|
from .capture import capture
|
||||||
|
from .combine import combine
|
||||||
|
from .convert import convert
|
||||||
|
|
||||||
|
# Import all command functions
|
||||||
|
from .discover import discover
|
||||||
|
from .generate import generate
|
||||||
|
from .init import init
|
||||||
|
from .split import split
|
||||||
|
from .transform import transform
|
||||||
|
from .transmit import transmit
|
||||||
|
from .view import view
|
||||||
|
|
||||||
|
# Aliases
|
||||||
|
synth = generate
|
||||||
|
|
||||||
|
# All commands will be automatically registered by cli.py
|
||||||
|
# Commands must be click.Command instances
|
||||||
|
|
||||||
|
|
||||||
408
ria_toolkit_oss_cli/ria_toolkit_oss/common.py
Normal file
408
ria_toolkit_oss_cli/ria_toolkit_oss/common.py
Normal file
|
|
@ -0,0 +1,408 @@
|
||||||
|
"""Common utilities for CLI commands."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
import click
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from ria_toolkit_oss.datatypes.recording import Recording
|
||||||
|
from src.ria_toolkit_oss.io.recording import to_blue, to_npy, to_sigmf, to_wav
|
||||||
|
|
||||||
|
|
||||||
|
def load_yaml_config(config_file: str) -> Dict[str, Any]:
|
||||||
|
"""Load YAML configuration file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_file: Path to YAML file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary of configuration parameters
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
click.ClickException: If file cannot be loaded
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
with open(config_file, "r") as f:
|
||||||
|
config = yaml.safe_load(f)
|
||||||
|
return config or {}
|
||||||
|
except FileNotFoundError:
|
||||||
|
raise click.ClickException(f"Config file not found: {config_file}")
|
||||||
|
except yaml.YAMLError as e:
|
||||||
|
raise click.ClickException(f"Error parsing YAML config: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def detect_file_format(filepath):
|
||||||
|
"""Detect file format from extension.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
filepath: Path to file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Format name ('sigmf', 'npy', 'wav', 'blue')
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
click.ClickException: If format cannot be determined
|
||||||
|
"""
|
||||||
|
filepath = Path(filepath)
|
||||||
|
ext = filepath.suffix.lower()
|
||||||
|
|
||||||
|
if ext in [".sigmf", ".sigmf-data", ".sigmf-meta"]:
|
||||||
|
return "sigmf"
|
||||||
|
elif ext == ".npy":
|
||||||
|
return "npy"
|
||||||
|
elif ext == ".wav":
|
||||||
|
return "wav"
|
||||||
|
elif ext == ".blue":
|
||||||
|
return "blue"
|
||||||
|
else:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Unknown format for '{filepath}'\n" f"Supported extensions: .sigmf, .npy, .wav, .blue"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_metadata_args(metadata_args: List[str]) -> Dict[str, Any]:
|
||||||
|
"""Parse metadata KEY=VALUE arguments.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
metadata_args: List of "KEY=VALUE" strings
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary of parsed metadata
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
click.ClickException: If metadata format is invalid
|
||||||
|
"""
|
||||||
|
metadata = {}
|
||||||
|
for arg in metadata_args:
|
||||||
|
if "=" not in arg:
|
||||||
|
raise click.ClickException(f"Invalid metadata format: '{arg}'. Expected KEY=VALUE")
|
||||||
|
|
||||||
|
key, value = arg.split("=", 1)
|
||||||
|
|
||||||
|
if key in ["experiment", "campaign", "project"]:
|
||||||
|
metadata[key] = value
|
||||||
|
else:
|
||||||
|
# Try to parse numeric values
|
||||||
|
try:
|
||||||
|
# Try float first (handles both int and float)
|
||||||
|
if "." in value or "e" in value.lower():
|
||||||
|
metadata[key] = float(value)
|
||||||
|
else:
|
||||||
|
metadata[key] = int(value)
|
||||||
|
except ValueError:
|
||||||
|
# Keep as string
|
||||||
|
metadata[key] = value
|
||||||
|
|
||||||
|
return metadata
|
||||||
|
|
||||||
|
|
||||||
|
def parse_frequency(freq_str: str) -> float:
|
||||||
|
"""Parse frequency string with suffixes (k, M, G).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
freq_str: Frequency string (e.g., "915e6", "2.4G", "433M")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Frequency in Hz
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
click.ClickException: If frequency format is invalid
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Handle scientific notation and plain numbers
|
||||||
|
if "e" in freq_str.lower() or freq_str.replace(".", "").replace("-", "").isdigit():
|
||||||
|
return float(freq_str)
|
||||||
|
|
||||||
|
# Handle suffix notation (k, M, G)
|
||||||
|
multipliers = {"k": 1e3, "K": 1e3, "M": 1e6, "G": 1e9}
|
||||||
|
|
||||||
|
for suffix, mult in multipliers.items():
|
||||||
|
if freq_str.endswith(suffix):
|
||||||
|
return float(freq_str[:-1]) * mult
|
||||||
|
|
||||||
|
# No suffix, try as plain number
|
||||||
|
return float(freq_str)
|
||||||
|
|
||||||
|
except ValueError:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Invalid frequency format: '{freq_str}'. " "Use formats like: 915e6, 2.4G, 433M, 100k"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def format_frequency(freq_hz: float) -> str:
|
||||||
|
"""Format frequency in human-readable form.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
freq_hz: Frequency in Hz
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted string (e.g., "915.0 MHz")
|
||||||
|
"""
|
||||||
|
if freq_hz >= 1e9:
|
||||||
|
return f"{freq_hz/1e9:.2f} GHz"
|
||||||
|
elif freq_hz >= 1e6:
|
||||||
|
return f"{freq_hz/1e6:.2f} MHz"
|
||||||
|
elif freq_hz >= 1e3:
|
||||||
|
return f"{freq_hz/1e3:.2f} kHz"
|
||||||
|
else:
|
||||||
|
return f"{freq_hz:.2f} Hz"
|
||||||
|
|
||||||
|
|
||||||
|
def format_sample_rate(rate_hz: float) -> str:
|
||||||
|
"""Format sample rate in human-readable form.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
rate_hz: Sample rate in Hz
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted string (e.g., "2.0 MSPS")
|
||||||
|
"""
|
||||||
|
if rate_hz >= 1e6:
|
||||||
|
return f"{rate_hz/1e6:.2f} MS/s"
|
||||||
|
elif rate_hz >= 1e3:
|
||||||
|
return f"{rate_hz/1e3:.2f} kS/s"
|
||||||
|
else:
|
||||||
|
return f"{rate_hz:.2f} S/s"
|
||||||
|
|
||||||
|
|
||||||
|
def format_sample_count(count):
|
||||||
|
"""Format sample count with thousands separator."""
|
||||||
|
return f"{count:,}"
|
||||||
|
|
||||||
|
|
||||||
|
def get_output_path(filename: Optional[str], path: Optional[str], default_dir: str = "recordings") -> str:
|
||||||
|
"""Generate full output path.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
filename: Output filename (can be None for auto-generated)
|
||||||
|
path: Output directory path
|
||||||
|
default_dir: Default directory if path not specified
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Full path for output file
|
||||||
|
"""
|
||||||
|
if path is None:
|
||||||
|
path = default_dir
|
||||||
|
|
||||||
|
# Create directory if it doesn't exist
|
||||||
|
if not os.path.exists(path):
|
||||||
|
os.makedirs(path)
|
||||||
|
|
||||||
|
if filename:
|
||||||
|
return os.path.join(path, filename)
|
||||||
|
else:
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
def save_recording(recording: Recording, output_path=None, output_format=None, overwrite=False, verbose=False):
|
||||||
|
"""Save recording to file with format-specific handling.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
recording: Recording object to save
|
||||||
|
output_path: Output file path
|
||||||
|
output_format: Optional format override
|
||||||
|
overwrite: Whether to overwrite existing files
|
||||||
|
verbose: Verbose output
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
click.ClickException: If save fails
|
||||||
|
"""
|
||||||
|
if output_path is None:
|
||||||
|
# Auto-generate filename
|
||||||
|
timestamp = recording.timestamp
|
||||||
|
rec_id = recording.rec_id[:8]
|
||||||
|
signal_type = recording.metadata.get("signal_type", "signal")
|
||||||
|
output_path = f"{signal_type}_{rec_id}_{int(timestamp)}"
|
||||||
|
|
||||||
|
output_path = Path(output_path)
|
||||||
|
|
||||||
|
# Detect format if not specified
|
||||||
|
if output_format is None:
|
||||||
|
output_format = detect_file_format(output_path)
|
||||||
|
|
||||||
|
# For sigmf, strip extension to get base name
|
||||||
|
if output_format == "sigmf" and output_path.suffix not in [".sigmf-data", ".sigmf-meta", ".sigmf"]:
|
||||||
|
base_name = output_path.name
|
||||||
|
else:
|
||||||
|
base_name = output_path.stem
|
||||||
|
|
||||||
|
output_dir = output_path.parent
|
||||||
|
|
||||||
|
# Create output directory if needed
|
||||||
|
if output_dir and not output_dir.exists():
|
||||||
|
output_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
echo_verbose(f"Created directory: {output_dir}", verbose)
|
||||||
|
|
||||||
|
# Check for overwriting
|
||||||
|
check_for_overwriting(overwrite, output_format, output_path)
|
||||||
|
|
||||||
|
# Save based on format
|
||||||
|
try:
|
||||||
|
if output_format == "sigmf":
|
||||||
|
to_sigmf(recording, filename=base_name, path=str(output_dir), overwrite=overwrite)
|
||||||
|
elif output_format == "npy":
|
||||||
|
to_npy(recording, filename=str(output_path), overwrite=overwrite)
|
||||||
|
elif output_format == "wav":
|
||||||
|
to_wav(recording, filename=str(output_path), overwrite=overwrite)
|
||||||
|
elif output_format == "blue":
|
||||||
|
to_blue(recording, filename=str(output_path), overwrite=overwrite)
|
||||||
|
else:
|
||||||
|
raise click.ClickException(f"Unsupported output format: {output_format}")
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to save output: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def echo_verbose(message: str, verbose: bool):
|
||||||
|
"""Print message only in verbose mode.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message: Message to print
|
||||||
|
verbose: Whether verbose mode is enabled
|
||||||
|
"""
|
||||||
|
if verbose:
|
||||||
|
click.echo(message)
|
||||||
|
|
||||||
|
|
||||||
|
def echo_progress(message: str, quiet: bool = False):
|
||||||
|
"""Print progress message unless in quiet mode.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message: Progress message
|
||||||
|
quiet: Whether quiet mode is enabled
|
||||||
|
"""
|
||||||
|
if not quiet:
|
||||||
|
click.echo(message, err=True)
|
||||||
|
|
||||||
|
|
||||||
|
def confirm_dangerous_operation(message: str, skip_confirm: bool = False) -> bool:
|
||||||
|
"""Ask for confirmation of potentially dangerous operation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message: Warning message
|
||||||
|
skip_confirm: Skip confirmation (for automation)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if user confirmed, False otherwise
|
||||||
|
"""
|
||||||
|
if skip_confirm:
|
||||||
|
return True
|
||||||
|
|
||||||
|
click.echo(click.style("WARNING: ", fg="yellow", bold=True) + message, err=True)
|
||||||
|
return click.confirm("Continue?", default=False)
|
||||||
|
|
||||||
|
|
||||||
|
def check_for_overwriting(overwrite, output_format, output_path):
|
||||||
|
# Check if output exists (unless overwriting)
|
||||||
|
if not overwrite:
|
||||||
|
output_path = Path(output_path)
|
||||||
|
|
||||||
|
if output_format == "sigmf":
|
||||||
|
data_file = output_path.with_suffix(".sigmf-data")
|
||||||
|
meta_file = output_path.with_suffix(".sigmf-meta")
|
||||||
|
if data_file.exists() or meta_file.exists():
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Output files exist: {data_file.name}, {meta_file.name}\n" f"Use --overwrite to replace"
|
||||||
|
)
|
||||||
|
elif output_path.exists():
|
||||||
|
raise click.ClickException(f"Output file '{output_path}' already exists\n" f"Use --overwrite to replace")
|
||||||
|
|
||||||
|
|
||||||
|
def parse_ident(ident: Optional[str]) -> tuple[Optional[str], Optional[str]]:
|
||||||
|
"""
|
||||||
|
Parse device identifier into IP address or name.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
ident: Device identifier (IP address or name=value)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (ip_address, name) where one will be None
|
||||||
|
"""
|
||||||
|
if not ident:
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
if "=" in ident:
|
||||||
|
key, value = ident.split("=", 1)
|
||||||
|
if key.lower() == "name":
|
||||||
|
return None, value
|
||||||
|
else:
|
||||||
|
return ident, None
|
||||||
|
else:
|
||||||
|
return ident, None
|
||||||
|
|
||||||
|
|
||||||
|
def get_sdr_device(device_type: str, ident: Optional[str] = None, tx=False):
|
||||||
|
"""
|
||||||
|
Get TX-capable SDR device instance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
device_type: Type of device (pluto, hackrf, bladerf, usrp)
|
||||||
|
ident: Device identifier (IP address or name=value)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
SDR device instance
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
click.ClickException: If device cannot be initialized or doesn't support TX
|
||||||
|
"""
|
||||||
|
TX_CAPABLE_DEVICES = ["pluto", "hackrf", "bladerf", "usrp"]
|
||||||
|
if tx and device_type not in TX_CAPABLE_DEVICES:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Device '{device_type}' does not support transmission (RX only)\n"
|
||||||
|
f"TX-capable devices: {', '.join(TX_CAPABLE_DEVICES)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
ip_addr, name = parse_ident(ident)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if device_type == "pluto":
|
||||||
|
from utils.sdr.pluto import Pluto
|
||||||
|
|
||||||
|
if ip_addr:
|
||||||
|
return Pluto(identifier=ip_addr)
|
||||||
|
else:
|
||||||
|
return Pluto()
|
||||||
|
|
||||||
|
elif device_type == "hackrf":
|
||||||
|
from utils.sdr.hackrf import HackRF
|
||||||
|
|
||||||
|
return HackRF()
|
||||||
|
|
||||||
|
elif device_type == "bladerf":
|
||||||
|
from utils.sdr.blade import Blade
|
||||||
|
|
||||||
|
return Blade()
|
||||||
|
|
||||||
|
elif device_type == "usrp":
|
||||||
|
from utils.sdr.usrp import USRP
|
||||||
|
|
||||||
|
if ip_addr:
|
||||||
|
return USRP(identifier=f"addr={ip_addr}")
|
||||||
|
elif name:
|
||||||
|
return USRP(identifier=f"name={name}")
|
||||||
|
else:
|
||||||
|
return USRP()
|
||||||
|
|
||||||
|
elif device_type == "rtlsdr":
|
||||||
|
from utils.sdr.rtlsdr import RTLSDR
|
||||||
|
|
||||||
|
return RTLSDR()
|
||||||
|
|
||||||
|
elif device_type == "thinkrf":
|
||||||
|
from utils.sdr.thinkrf import ThinkRF
|
||||||
|
|
||||||
|
if ip_addr:
|
||||||
|
return ThinkRF(identifier=ip_addr)
|
||||||
|
else:
|
||||||
|
return ThinkRF()
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise click.ClickException(f"Unknown device type: {device_type}")
|
||||||
|
|
||||||
|
except ImportError as e:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Failed to import {device_type} driver: {e}\n" f"Ensure required dependencies are installed"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to initialize {device_type}: {e}")
|
||||||
206
ria_toolkit_oss_cli/ria_toolkit_oss/config.py
Normal file
206
ria_toolkit_oss_cli/ria_toolkit_oss/config.py
Normal file
|
|
@ -0,0 +1,206 @@
|
||||||
|
"""Configuration file utilities for Utils CLI.
|
||||||
|
|
||||||
|
This module provides utilities for managing the user configuration file.
|
||||||
|
The core integration (actually using these configs) is TODO for the core team.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
def get_config_path(config_path: Optional[str] = None) -> Path:
|
||||||
|
"""Get path to user config file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_path: Optional custom config path
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Path to config file
|
||||||
|
"""
|
||||||
|
if config_path:
|
||||||
|
return Path(config_path)
|
||||||
|
|
||||||
|
# Try XDG_CONFIG_HOME first (Linux standard)
|
||||||
|
xdg_config = os.environ.get("XDG_CONFIG_HOME")
|
||||||
|
if xdg_config:
|
||||||
|
return Path(xdg_config) / "utils" / "config.yaml"
|
||||||
|
|
||||||
|
# Fall back to ~/.utils/config.yaml
|
||||||
|
return Path.home() / ".utils" / "config.yaml"
|
||||||
|
|
||||||
|
|
||||||
|
def load_user_config(config_path: Optional[str] = None) -> Optional[dict]:
|
||||||
|
"""Load user configuration from file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_path: Optional custom config path
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Config dict if file exists, None otherwise
|
||||||
|
"""
|
||||||
|
path = get_config_path(config_path)
|
||||||
|
|
||||||
|
if not path.exists():
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(path, "r") as f:
|
||||||
|
config = yaml.safe_load(f)
|
||||||
|
return config if config else {}
|
||||||
|
except yaml.YAMLError as e:
|
||||||
|
raise ValueError(f"Invalid YAML in config file: {e}")
|
||||||
|
except Exception as e:
|
||||||
|
raise IOError(f"Error reading config file: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def save_user_config(config: dict, config_path: Optional[str] = None) -> Path:
|
||||||
|
"""Save user configuration to file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config: Configuration dictionary
|
||||||
|
config_path: Optional custom config path
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Path where config was saved
|
||||||
|
"""
|
||||||
|
path = get_config_path(config_path)
|
||||||
|
|
||||||
|
# Create parent directory if it doesn't exist
|
||||||
|
path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Write config
|
||||||
|
with open(path, "w") as f:
|
||||||
|
f.write("# Utils SDR CLI Configuration\n")
|
||||||
|
f.write("# Auto-generated by 'utils init'\n")
|
||||||
|
f.write("# Edit with 'utils init' or modify this file directly\n\n")
|
||||||
|
yaml.dump(config, f, default_flow_style=False, sort_keys=False)
|
||||||
|
|
||||||
|
# Set secure permissions (user read/write only)
|
||||||
|
try:
|
||||||
|
os.chmod(path, 0o600)
|
||||||
|
except Exception:
|
||||||
|
pass # Best effort on Windows
|
||||||
|
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
def validate_config(config: dict) -> list[str]:
|
||||||
|
"""Validate configuration and return list of warnings.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config: Configuration dictionary
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of warning messages (empty if no issues)
|
||||||
|
"""
|
||||||
|
warnings = []
|
||||||
|
|
||||||
|
# Check for empty author
|
||||||
|
if not config.get("author"):
|
||||||
|
warnings.append("Author field is empty - consider setting your name")
|
||||||
|
|
||||||
|
# Check for non-standard license (but allow Proprietary as valid)
|
||||||
|
if "sigmf" in config and "license" in config["sigmf"]:
|
||||||
|
license_id = config["sigmf"]["license"]
|
||||||
|
# Common licenses (Proprietary is valid, not open source)
|
||||||
|
common_licenses = [
|
||||||
|
"Proprietary",
|
||||||
|
"CC0-1.0",
|
||||||
|
"CC-BY-4.0",
|
||||||
|
"CC-BY-SA-4.0",
|
||||||
|
"MIT",
|
||||||
|
"Apache-2.0",
|
||||||
|
"GPL-3.0",
|
||||||
|
"BSD-3-Clause",
|
||||||
|
]
|
||||||
|
if license_id not in common_licenses:
|
||||||
|
warnings.append(
|
||||||
|
f"License '{license_id}' is not a common identifier. "
|
||||||
|
f"Consider: Proprietary, CC-BY-4.0, MIT, or other SPDX identifier"
|
||||||
|
)
|
||||||
|
|
||||||
|
return warnings
|
||||||
|
|
||||||
|
|
||||||
|
def format_config_display(config: dict) -> str:
|
||||||
|
"""Format configuration for display.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config: Configuration dictionary
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted string
|
||||||
|
"""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
# Main metadata
|
||||||
|
if config.get("author"):
|
||||||
|
lines.append(f"Author: {config['author']}")
|
||||||
|
if config.get("organization"):
|
||||||
|
lines.append(f"Organization: {config['organization']}")
|
||||||
|
if config.get("project"):
|
||||||
|
lines.append(f"Project: {config['project']}")
|
||||||
|
if config.get("location"):
|
||||||
|
lines.append(f"Location: {config['location']}")
|
||||||
|
if config.get("testbed"):
|
||||||
|
lines.append(f"Testbed: {config['testbed']}")
|
||||||
|
|
||||||
|
# SigMF metadata
|
||||||
|
if "sigmf" in config:
|
||||||
|
sigmf = config["sigmf"]
|
||||||
|
if sigmf.get("license"):
|
||||||
|
lines.append(f"License: {sigmf['license']}")
|
||||||
|
if sigmf.get("hw"):
|
||||||
|
lines.append(f"Hardware: {sigmf['hw']}")
|
||||||
|
if sigmf.get("dataset"):
|
||||||
|
lines.append(f"Dataset: {sigmf['dataset']}")
|
||||||
|
|
||||||
|
return "\n".join(lines) if lines else "(empty configuration)"
|
||||||
|
|
||||||
|
|
||||||
|
# TODO for core team: Integration functions
|
||||||
|
# These will be implemented when wiring config into core utils logic
|
||||||
|
|
||||||
|
|
||||||
|
def merge_config(user_config: dict, cli_args: dict) -> dict:
|
||||||
|
"""Merge configs with precedence: cli_args > user_config > defaults.
|
||||||
|
|
||||||
|
TODO: Implement this when integrating with capture/convert/transmit commands.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_config: User configuration from file
|
||||||
|
cli_args: Arguments from CLI
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Merged configuration
|
||||||
|
"""
|
||||||
|
# Placeholder implementation
|
||||||
|
merged = user_config.copy()
|
||||||
|
merged.update({k: v for k, v in cli_args.items() if v is not None})
|
||||||
|
return merged
|
||||||
|
|
||||||
|
|
||||||
|
def apply_config_to_metadata(metadata: dict, config: dict) -> dict:
|
||||||
|
"""Apply configuration defaults to recording metadata.
|
||||||
|
|
||||||
|
TODO: Implement this in capture.py, convert.py when core team wires it in.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
metadata: Existing metadata dict
|
||||||
|
config: User configuration
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Updated metadata dict
|
||||||
|
"""
|
||||||
|
# Placeholder implementation
|
||||||
|
updated = metadata.copy()
|
||||||
|
|
||||||
|
# Add config values if not already present
|
||||||
|
for key in ["author", "organization", "project", "location", "testbed"]:
|
||||||
|
if key in config and key not in updated:
|
||||||
|
updated[key] = config[key]
|
||||||
|
|
||||||
|
return updated
|
||||||
303
ria_toolkit_oss_cli/ria_toolkit_oss/convert.py
Normal file
303
ria_toolkit_oss_cli/ria_toolkit_oss/convert.py
Normal file
|
|
@ -0,0 +1,303 @@
|
||||||
|
"""Convert command - Convert recordings between file formats."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import click
|
||||||
|
|
||||||
|
from utils.io.recording import (
|
||||||
|
from_npy,
|
||||||
|
load_recording,
|
||||||
|
to_blue,
|
||||||
|
to_npy,
|
||||||
|
to_sigmf,
|
||||||
|
to_wav,
|
||||||
|
)
|
||||||
|
from utils_cli.utils.common import (
|
||||||
|
check_for_overwriting,
|
||||||
|
detect_file_format,
|
||||||
|
echo_progress,
|
||||||
|
echo_verbose,
|
||||||
|
format_sample_count,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .config import load_user_config
|
||||||
|
|
||||||
|
|
||||||
|
def parse_metadata_override(metadata_str):
|
||||||
|
"""Parse KEY=VALUE metadata string.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
metadata_str: String in format "key=value"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (key, value) where value is converted to appropriate type
|
||||||
|
"""
|
||||||
|
if "=" not in metadata_str:
|
||||||
|
raise click.BadParameter(f"Metadata must be in KEY=VALUE format, got: {metadata_str}")
|
||||||
|
|
||||||
|
key, value = metadata_str.split("=", 1)
|
||||||
|
|
||||||
|
# Try to convert to number if possible
|
||||||
|
try:
|
||||||
|
# Try int first
|
||||||
|
if "." not in value:
|
||||||
|
return (key, int(value))
|
||||||
|
else:
|
||||||
|
return (key, float(value))
|
||||||
|
except ValueError:
|
||||||
|
# Keep as string
|
||||||
|
return (key, value)
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.argument("input", type=click.Path(exists=True))
|
||||||
|
@click.argument("output", type=click.Path(), required=False)
|
||||||
|
@click.option(
|
||||||
|
"--format",
|
||||||
|
"output_format",
|
||||||
|
type=click.Choice(["npy", "sigmf", "wav", "blue"]),
|
||||||
|
help="Output format (required if OUTPUT not specified, otherwise auto-detected from extension)",
|
||||||
|
)
|
||||||
|
@click.option("--output-dir", type=click.Path(), help="Output directory (default: current directory)")
|
||||||
|
@click.option("--legacy", is_flag=True, help="Load input as legacy NPY format")
|
||||||
|
@click.option("--wav-sample-rate", type=float, default=48000, show_default=True, help="Target WAV sample rate in Hz")
|
||||||
|
@click.option(
|
||||||
|
"--wav-bits", type=click.Choice(["16", "32"]), default="32", show_default=True, help="WAV bits per sample"
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--blue-format",
|
||||||
|
type=click.Choice(["CI", "CF", "CD"]),
|
||||||
|
default="CI",
|
||||||
|
show_default=True,
|
||||||
|
help="MIDAS Blue format: CI (int16), CF (float32), CD (float64)",
|
||||||
|
)
|
||||||
|
@click.option("--overwrite", is_flag=True, help="Overwrite output if it exists")
|
||||||
|
@click.option("--metadata", multiple=True, help="Add/override metadata as KEY=VALUE (can be repeated)")
|
||||||
|
@click.option("--verbose", "-v", is_flag=True, help="Verbose output")
|
||||||
|
@click.option("--quiet", "-q", is_flag=True, help="Suppress output")
|
||||||
|
def convert( # noqa: C901
|
||||||
|
input,
|
||||||
|
output,
|
||||||
|
output_format,
|
||||||
|
output_dir,
|
||||||
|
legacy,
|
||||||
|
wav_sample_rate,
|
||||||
|
wav_bits,
|
||||||
|
blue_format,
|
||||||
|
overwrite,
|
||||||
|
metadata,
|
||||||
|
verbose,
|
||||||
|
quiet,
|
||||||
|
):
|
||||||
|
"""Convert recordings between file formats.
|
||||||
|
|
||||||
|
Automatically detects input format and converts to desired output format.
|
||||||
|
Supports SigMF, NumPy (.npy), WAV IQ stereo, and MIDAS Blue formats.
|
||||||
|
|
||||||
|
If OUTPUT is not specified, the input filename is used with a new extension
|
||||||
|
based on the --format option.
|
||||||
|
|
||||||
|
\b
|
||||||
|
Examples:
|
||||||
|
# SigMF to NumPy (explicit output)
|
||||||
|
utils convert recording.sigmf-data output.npy
|
||||||
|
\b
|
||||||
|
# Auto-generate output filename
|
||||||
|
utils convert recording.npy --format sigmf
|
||||||
|
\b
|
||||||
|
# Convert to specific directory
|
||||||
|
utils convert long_path/recording.npy --format sigmf --output-dir converted
|
||||||
|
\b
|
||||||
|
# NumPy to WAV with decimation
|
||||||
|
utils convert high_rate.npy audio.wav --wav-sample-rate 48000
|
||||||
|
\b
|
||||||
|
# Legacy NPY to SigMF
|
||||||
|
utils convert old.npy --format sigmf --legacy --overwrite
|
||||||
|
\b
|
||||||
|
# Add metadata during conversion
|
||||||
|
utils convert raw.npy --format sigmf --metadata "location=lab" --metadata "antenna=dipole"
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Generate output filename if not provided
|
||||||
|
if output is None:
|
||||||
|
if output_format is None:
|
||||||
|
raise click.ClickException(
|
||||||
|
"Either OUTPUT or --format must be specified\n"
|
||||||
|
"Examples:\n"
|
||||||
|
" utils convert input.npy output.sigmf\n"
|
||||||
|
" utils convert input.npy --format sigmf"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get input filename without extension
|
||||||
|
input_path = Path(input)
|
||||||
|
input_stem = input_path.stem
|
||||||
|
|
||||||
|
# For SigMF input, remove .sigmf-data or .sigmf-meta suffix
|
||||||
|
if input_stem.endswith(".sigmf-data") or input_stem.endswith(".sigmf-meta"):
|
||||||
|
input_stem = input_stem[:-11] # Remove '.sigmf-data'/'.sigmf-meta'
|
||||||
|
elif input_stem.endswith(".sigmf"):
|
||||||
|
input_stem = input_stem[:-6] # Remove '.sigmf'
|
||||||
|
|
||||||
|
# Determine output directory
|
||||||
|
if output_dir:
|
||||||
|
out_dir = Path(output_dir)
|
||||||
|
else:
|
||||||
|
out_dir = Path(".") # Current directory
|
||||||
|
|
||||||
|
# Generate output filename with new extension
|
||||||
|
extension_map = {"sigmf": ".sigmf", "npy": ".npy", "wav": ".wav", "blue": ".blue"}
|
||||||
|
output = str(out_dir / f"{input_stem}{extension_map[output_format]}")
|
||||||
|
|
||||||
|
echo_verbose(f"Auto-generated output: {output}", verbose)
|
||||||
|
|
||||||
|
# Detect input and output formats
|
||||||
|
input_format = detect_file_format(input)
|
||||||
|
if output_format is None:
|
||||||
|
output_format = detect_file_format(output)
|
||||||
|
|
||||||
|
# Check for overwriting
|
||||||
|
output_path = Path(output)
|
||||||
|
check_for_overwriting(overwrite, output_format, output_path)
|
||||||
|
|
||||||
|
echo_progress(f"Converting: {os.path.basename(input)} → {os.path.basename(output)}", quiet)
|
||||||
|
echo_progress(f"Input format: {input_format.upper()}", quiet)
|
||||||
|
echo_progress(f"Output format: {output_format.upper()}", quiet)
|
||||||
|
|
||||||
|
# Load input recording
|
||||||
|
echo_verbose("Reading input...", verbose)
|
||||||
|
try:
|
||||||
|
if legacy:
|
||||||
|
echo_verbose("Using legacy NPY loader", verbose)
|
||||||
|
recording = from_npy(input, legacy=True)
|
||||||
|
else:
|
||||||
|
recording = load_recording(input)
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to load input file: {e}")
|
||||||
|
|
||||||
|
# Get sample count
|
||||||
|
if hasattr(recording.data, "shape"):
|
||||||
|
if len(recording.data.shape) == 2:
|
||||||
|
num_samples = recording.data.shape[1]
|
||||||
|
num_channels = recording.data.shape[0]
|
||||||
|
else:
|
||||||
|
num_samples = len(recording.data)
|
||||||
|
num_channels = 1
|
||||||
|
else:
|
||||||
|
num_samples = len(recording.data)
|
||||||
|
num_channels = 1
|
||||||
|
|
||||||
|
echo_progress(f"Samples: {format_sample_count(num_samples)}", quiet)
|
||||||
|
if num_channels > 1:
|
||||||
|
echo_progress(f"Channels: {num_channels}", quiet)
|
||||||
|
echo_verbose("Input loaded successfully", verbose)
|
||||||
|
|
||||||
|
# Load user config and apply default metadata
|
||||||
|
user_config = load_user_config()
|
||||||
|
if user_config:
|
||||||
|
echo_verbose("Applying user config metadata...", verbose)
|
||||||
|
# Add standard metadata fields from config (if not already present)
|
||||||
|
for key in ["author", "organization", "project", "location", "testbed"]:
|
||||||
|
if key in user_config and key not in recording.metadata:
|
||||||
|
recording._metadata[key] = user_config[key]
|
||||||
|
echo_verbose(f" {key} = {user_config[key]} (from config)", verbose)
|
||||||
|
|
||||||
|
# Add SigMF fields from config (if not already present)
|
||||||
|
if "sigmf" in user_config:
|
||||||
|
sigmf = user_config["sigmf"]
|
||||||
|
for key in ["license", "hw", "dataset"]:
|
||||||
|
if key in sigmf and key not in recording.metadata:
|
||||||
|
recording._metadata[key] = sigmf[key]
|
||||||
|
echo_verbose(f" {key} = {sigmf[key]} (from config)", verbose)
|
||||||
|
|
||||||
|
# Apply metadata overrides from CLI (highest priority)
|
||||||
|
if metadata:
|
||||||
|
echo_verbose("Applying metadata overrides from CLI...", verbose)
|
||||||
|
for meta_str in metadata:
|
||||||
|
key, value = parse_metadata_override(meta_str)
|
||||||
|
recording._metadata[key] = value
|
||||||
|
echo_verbose(f" {key} = {value} (CLI override)", verbose)
|
||||||
|
|
||||||
|
# Convert to output format
|
||||||
|
echo_verbose(f"Writing {output_format.upper()} output...", verbose)
|
||||||
|
|
||||||
|
# Split output into directory and filename for functions that need it
|
||||||
|
output_dir = output_path.parent
|
||||||
|
output_filename = output_path.name
|
||||||
|
|
||||||
|
# If output_dir is empty (relative path with no dir), use current directory
|
||||||
|
if str(output_dir) == ".":
|
||||||
|
output_dir = None
|
||||||
|
elif not output_dir.exists():
|
||||||
|
# Create output directory if it doesn't exist
|
||||||
|
output_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Note: All to_* functions use (recording, filename, path) signature
|
||||||
|
# We split the output path into directory and filename components
|
||||||
|
if output_format == "sigmf":
|
||||||
|
to_sigmf(recording, filename=output_filename, path=output_dir, overwrite=overwrite)
|
||||||
|
echo_progress(
|
||||||
|
(
|
||||||
|
f"Conversion complete: {output_path.with_suffix('.sigmf-data').name}, "
|
||||||
|
f"{output_path.with_suffix('.sigmf-meta').name}"
|
||||||
|
),
|
||||||
|
quiet,
|
||||||
|
)
|
||||||
|
|
||||||
|
elif output_format == "npy":
|
||||||
|
to_npy(recording, filename=output_filename, path=output_dir, overwrite=overwrite)
|
||||||
|
echo_progress(f"Conversion complete: {output}", quiet)
|
||||||
|
|
||||||
|
elif output_format == "wav":
|
||||||
|
# Check for multichannel
|
||||||
|
if num_channels > 1:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"WAV export not supported for multichannel recordings\n"
|
||||||
|
f"Input has {num_channels} channels, WAV export requires single channel"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Show decimation info if applicable
|
||||||
|
original_sample_rate = recording.metadata.get("sample_rate", wav_sample_rate)
|
||||||
|
if original_sample_rate > wav_sample_rate:
|
||||||
|
decimation_factor = int(original_sample_rate / wav_sample_rate)
|
||||||
|
new_sample_count = num_samples // decimation_factor
|
||||||
|
echo_progress(f"Original sample rate: {original_sample_rate / 1e6:.1f} MHz", quiet)
|
||||||
|
echo_progress(f"Target sample rate: {wav_sample_rate / 1e3:.1f} kHz", quiet)
|
||||||
|
echo_progress(f"Decimation factor: {decimation_factor}", quiet)
|
||||||
|
echo_progress(f"Output samples: {format_sample_count(new_sample_count)}", quiet)
|
||||||
|
echo_verbose("Decimating...", verbose)
|
||||||
|
|
||||||
|
to_wav(
|
||||||
|
recording,
|
||||||
|
filename=output_filename,
|
||||||
|
path=output_dir,
|
||||||
|
target_sample_rate=wav_sample_rate,
|
||||||
|
bits_per_sample=int(wav_bits),
|
||||||
|
overwrite=overwrite,
|
||||||
|
)
|
||||||
|
echo_progress(f"Conversion complete: {output}", quiet)
|
||||||
|
|
||||||
|
elif output_format == "blue":
|
||||||
|
# Convert blue format string to format expected by to_blue
|
||||||
|
format_map = {"CI": "CI", "CF": "CF", "CD": "CD"} # Complex int16 # Complex float32 # Complex float64
|
||||||
|
blue_data_format = format_map[blue_format]
|
||||||
|
echo_verbose(f"Using MIDAS Blue format: {blue_format} ({blue_data_format})", verbose)
|
||||||
|
|
||||||
|
to_blue(
|
||||||
|
recording, filename=output_filename, path=output_dir, data_format=blue_data_format, overwrite=overwrite
|
||||||
|
)
|
||||||
|
echo_progress(f"Conversion complete: {output}", quiet)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to write output file: {e}")
|
||||||
|
|
||||||
|
# Show metadata preservation info in verbose mode
|
||||||
|
if verbose and recording.metadata:
|
||||||
|
echo_verbose("\nMetadata preserved:", verbose)
|
||||||
|
for key, value in recording.metadata.items():
|
||||||
|
echo_verbose(f" {key}: {value}", verbose)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
convert()
|
||||||
518
ria_toolkit_oss_cli/ria_toolkit_oss/discover.py
Normal file
518
ria_toolkit_oss_cli/ria_toolkit_oss/discover.py
Normal file
|
|
@ -0,0 +1,518 @@
|
||||||
|
"""Device discovery utilities for SDR devices."""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
from typing import Any, Dict, List, Tuple
|
||||||
|
|
||||||
|
import click
|
||||||
|
|
||||||
|
# Track loaded and failed drivers
|
||||||
|
_loaded_drivers = []
|
||||||
|
_failed_drivers = []
|
||||||
|
_failure_reasons = {}
|
||||||
|
|
||||||
|
|
||||||
|
def load_sdr_drivers(verbose: bool = False) -> Tuple[List[str], List[str], Dict[str, str]]:
|
||||||
|
"""
|
||||||
|
Load available SDR drivers.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
verbose: Show detailed error messages
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (loaded_drivers, failed_drivers, failure_reasons)
|
||||||
|
"""
|
||||||
|
global _loaded_drivers, _failed_drivers, _failure_reasons # noqa: F824
|
||||||
|
|
||||||
|
_loaded_drivers.clear()
|
||||||
|
_failed_drivers.clear()
|
||||||
|
_failure_reasons.clear()
|
||||||
|
|
||||||
|
# Try to import each SDR driver
|
||||||
|
drivers = {
|
||||||
|
"pluto": "utils.sdr.pluto",
|
||||||
|
"hackrf": "utils.sdr.hackrf",
|
||||||
|
"bladerf": "utils.sdr.bladerf",
|
||||||
|
"usrp": "utils.sdr.usrp",
|
||||||
|
"rtlsdr": "utils.sdr.rtlsdr",
|
||||||
|
"thinkrf": "utils.sdr.thinkrf",
|
||||||
|
}
|
||||||
|
|
||||||
|
for driver_name, module_path in drivers.items():
|
||||||
|
try:
|
||||||
|
# Attempt to import the driver module
|
||||||
|
if not verbose:
|
||||||
|
# Suppress output for quiet loading
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
with warnings.catch_warnings():
|
||||||
|
warnings.simplefilter("ignore")
|
||||||
|
__import__(module_path)
|
||||||
|
else:
|
||||||
|
__import__(module_path)
|
||||||
|
|
||||||
|
_loaded_drivers.append(driver_name)
|
||||||
|
|
||||||
|
except ImportError as e:
|
||||||
|
_failed_drivers.append(driver_name)
|
||||||
|
error_msg = str(e)
|
||||||
|
if "No module named" in error_msg:
|
||||||
|
module_name = error_msg.split("'")[1] if "'" in error_msg else "unknown"
|
||||||
|
_failure_reasons[driver_name] = f"ModuleNotFoundError: {module_name}"
|
||||||
|
else:
|
||||||
|
_failure_reasons[driver_name] = f"ImportError: {error_msg}"
|
||||||
|
except Exception as e:
|
||||||
|
_failed_drivers.append(driver_name)
|
||||||
|
_failure_reasons[driver_name] = f"{type(e).__name__}: {str(e)}"
|
||||||
|
|
||||||
|
return _loaded_drivers, _failed_drivers, _failure_reasons
|
||||||
|
|
||||||
|
|
||||||
|
def find_hackrf_devices() -> List[Dict[str, Any]]:
|
||||||
|
"""Find HackRF devices using hackrf_info command."""
|
||||||
|
devices = []
|
||||||
|
try:
|
||||||
|
result = subprocess.check_output(["hackrf_info"], universal_newlines=True, stderr=subprocess.STDOUT, timeout=5)
|
||||||
|
|
||||||
|
# Parse device info
|
||||||
|
device = {"type": "HackRF One"}
|
||||||
|
for line in result.split("\n"):
|
||||||
|
if "Index: " in line:
|
||||||
|
if "serial" in device:
|
||||||
|
devices.append(device)
|
||||||
|
device = {"type": "HackRF One", "device_index": line.split(":")[1].strip()}
|
||||||
|
if "Serial number:" in line:
|
||||||
|
device["serial"] = line.split(":")[1].strip()
|
||||||
|
elif "Board ID Number:" in line:
|
||||||
|
device["board_id"] = line.split(":")[1].strip()
|
||||||
|
elif "Firmware Version:" in line:
|
||||||
|
device["firmware"] = line.split(":")[1].strip()
|
||||||
|
|
||||||
|
if "serial" in device:
|
||||||
|
devices.append(device)
|
||||||
|
|
||||||
|
except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
return devices
|
||||||
|
|
||||||
|
|
||||||
|
def find_bladerf_devices() -> List[Dict[str, Any]]:
|
||||||
|
"""Find BladeRF devices using bladeRF-cli command."""
|
||||||
|
devices = []
|
||||||
|
try:
|
||||||
|
result = subprocess.check_output(
|
||||||
|
["bladeRF-cli", "-p"], universal_newlines=True, stderr=subprocess.STDOUT, timeout=5
|
||||||
|
)
|
||||||
|
|
||||||
|
# Parse device info
|
||||||
|
device = {"type": "BladeRF"}
|
||||||
|
for line in result.strip().split("\n"):
|
||||||
|
line = line.strip()
|
||||||
|
if ":" in line:
|
||||||
|
key, value = line.split(":", 1)
|
||||||
|
device[key.strip()] = value.strip()
|
||||||
|
|
||||||
|
if device:
|
||||||
|
devices.append(device)
|
||||||
|
|
||||||
|
except (subprocess.CalledProcessError, FileNotFoundError, subprocess.TimeoutExpired):
|
||||||
|
pass
|
||||||
|
|
||||||
|
return devices
|
||||||
|
|
||||||
|
|
||||||
|
def find_uhd_devices() -> List[Dict[str, Any]]:
|
||||||
|
"""Find USRP/UHD devices using uhd_find_devices command."""
|
||||||
|
devices = []
|
||||||
|
try:
|
||||||
|
result = subprocess.check_output(
|
||||||
|
["uhd_find_devices"], universal_newlines=True, stderr=subprocess.STDOUT, timeout=10
|
||||||
|
)
|
||||||
|
|
||||||
|
# Parse device blocks
|
||||||
|
if "-- UHD Device" in result:
|
||||||
|
device_blocks = result.split("-- UHD Device")[1:]
|
||||||
|
|
||||||
|
for block in device_blocks:
|
||||||
|
device = {}
|
||||||
|
lines = block.strip().split("\n")
|
||||||
|
|
||||||
|
for line in lines:
|
||||||
|
line = line.strip()
|
||||||
|
if ":" in line and not line.startswith("--"):
|
||||||
|
key, value = line.split(":", 1)
|
||||||
|
device[key.strip()] = value.strip()
|
||||||
|
|
||||||
|
if device:
|
||||||
|
devices.append(device)
|
||||||
|
|
||||||
|
except (subprocess.CalledProcessError, FileNotFoundError, subprocess.TimeoutExpired):
|
||||||
|
pass
|
||||||
|
|
||||||
|
return devices
|
||||||
|
|
||||||
|
|
||||||
|
def find_rtlsdr_devices() -> List[Dict[str, Any]]:
|
||||||
|
"""Find RTL-SDR devices using rtl_test command."""
|
||||||
|
devices = []
|
||||||
|
try:
|
||||||
|
result = subprocess.check_output(
|
||||||
|
["rtl_test", "-t"], universal_newlines=True, stderr=subprocess.STDOUT, timeout=5
|
||||||
|
)
|
||||||
|
|
||||||
|
# Parse device count
|
||||||
|
for line in result.split("\n"):
|
||||||
|
if "Found" in line and "device" in line:
|
||||||
|
match = re.search(r"Found (\d+) device", line)
|
||||||
|
if match:
|
||||||
|
count = int(match.group(1))
|
||||||
|
elif "SN: " in line:
|
||||||
|
device_match = re.search(r"(\d+): .*SN: (\w+)", line)
|
||||||
|
if device_match:
|
||||||
|
devices.append(
|
||||||
|
{"type": "RTL-SDR", "device_index": device_match.group(1), "serial": device_match.group(2)}
|
||||||
|
)
|
||||||
|
|
||||||
|
if "count" in locals() and len(devices) != count:
|
||||||
|
raise ValueError("Number of stated devices does not match number of found devices")
|
||||||
|
|
||||||
|
except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
return devices
|
||||||
|
|
||||||
|
|
||||||
|
def ping_ip(ip: str, timeout: int = 1) -> bool:
|
||||||
|
"""
|
||||||
|
Ping an IP address to check if device is reachable.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
ip: IP address to ping
|
||||||
|
timeout: Timeout in seconds
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if ping successful, False otherwise
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
subprocess.check_output(
|
||||||
|
["ping", "-c", "1", "-W", str(timeout), ip], stderr=subprocess.STDOUT, timeout=timeout + 1
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def find_pluto_network() -> List[Dict[str, Any]]:
|
||||||
|
"""Find PlutoSDR devices on the network by pinging common addresses."""
|
||||||
|
devices = []
|
||||||
|
network_candidates = ["pluto.local", "192.168.2.1", "192.168.3.1"]
|
||||||
|
|
||||||
|
for addr in network_candidates:
|
||||||
|
if ping_ip(addr, timeout=1):
|
||||||
|
devices.append(
|
||||||
|
{
|
||||||
|
"type": "PlutoSDR",
|
||||||
|
"uri": f"ip:{addr}",
|
||||||
|
"description": "Network PlutoSDR",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
return devices
|
||||||
|
|
||||||
|
|
||||||
|
def find_pluto_devices() -> List[Dict[str, Any]]:
|
||||||
|
"""Find PlutoSDR devices using pyadi-iio."""
|
||||||
|
devices = []
|
||||||
|
try:
|
||||||
|
import iio
|
||||||
|
|
||||||
|
contexts = iio.scan_contexts()
|
||||||
|
|
||||||
|
for uri, description in contexts.items():
|
||||||
|
if "PlutoSDR" in description or "pluto" in uri.lower():
|
||||||
|
try:
|
||||||
|
ctx = iio.Context(uri)
|
||||||
|
device_info = {
|
||||||
|
"type": "PlutoSDR",
|
||||||
|
"uri": uri,
|
||||||
|
"serial": ctx.attrs.get("hw_serial", "unknown"),
|
||||||
|
"firmware": ctx.attrs.get("fw_version", "unknown"),
|
||||||
|
"ip_addr": ctx.attrs.get("ip,ip-addr", "unknown"),
|
||||||
|
"model": ctx.attrs.get("hw_model", "unknown"),
|
||||||
|
"description": description,
|
||||||
|
}
|
||||||
|
|
||||||
|
unique = True
|
||||||
|
for existing_device in devices:
|
||||||
|
if existing_device["serial"] == device_info["serial"]:
|
||||||
|
unique = False
|
||||||
|
|
||||||
|
if unique:
|
||||||
|
devices.append(device_info)
|
||||||
|
ctx._destroy()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
except ImportError:
|
||||||
|
# Fallback to network ping discovery if pyadi-iio not available
|
||||||
|
devices.extend(find_pluto_network())
|
||||||
|
|
||||||
|
if not devices:
|
||||||
|
usb_devices = get_usb_devices()
|
||||||
|
pluto_usb = [d for d in usb_devices if "PlutoSDR" in d.get("sdr_type", "")]
|
||||||
|
for pluto in pluto_usb:
|
||||||
|
pluto["type"] = "PlutoSDR"
|
||||||
|
pluto["uri"] = "usb:" + pluto["bus"]
|
||||||
|
devices.append(pluto)
|
||||||
|
|
||||||
|
return devices
|
||||||
|
|
||||||
|
|
||||||
|
def find_thinkrf_devices() -> List[Dict[str, Any]]:
|
||||||
|
"""Find ThinkRF devices (placeholder for future implementation)."""
|
||||||
|
# ThinkRF uses network-based discovery with proprietary SDK
|
||||||
|
# TODO: Implement when pyrf is available and working
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def get_usb_devices() -> List[Dict[str, Any]]:
|
||||||
|
"""Get USB devices using lsusb for SDR identification."""
|
||||||
|
sdr_devices = []
|
||||||
|
sdr_ids = {
|
||||||
|
"2cf0:5250": "BladeRF 2.0",
|
||||||
|
"2cf0:5246": "BladeRF 1.0",
|
||||||
|
"0bda:2838": "RTL-SDR",
|
||||||
|
"0456:b673": "PlutoSDR (ADALM-PLUTO)",
|
||||||
|
"2500:0020": "USRP B210",
|
||||||
|
"2500:0021": "USRP B200",
|
||||||
|
"1d50:604b": "HackRF One",
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = subprocess.check_output(["lsusb"], universal_newlines=True, timeout=5)
|
||||||
|
|
||||||
|
for line in result.strip().split("\n"):
|
||||||
|
for vid_pid, device_name in sdr_ids.items():
|
||||||
|
if vid_pid in line:
|
||||||
|
match = re.match(r"Bus (\d+) Device (\d+): ID ([0-9a-f:]+) (.+)", line)
|
||||||
|
if match:
|
||||||
|
bus, device, usb_id, description = match.groups()
|
||||||
|
sdr_devices.append(
|
||||||
|
{
|
||||||
|
"bus": bus,
|
||||||
|
"device": device,
|
||||||
|
"usb_id": usb_id,
|
||||||
|
"description": description,
|
||||||
|
"sdr_type": device_name,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
return sdr_devices
|
||||||
|
|
||||||
|
|
||||||
|
def discover_all_devices(verbose: bool = False, json_output: bool = False) -> int:
|
||||||
|
"""
|
||||||
|
Discover all SDR devices with signal-testbed style output.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
verbose: Show detailed error messages
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A dictionary containing information
|
||||||
|
"""
|
||||||
|
load_sdr_drivers(verbose=verbose)
|
||||||
|
|
||||||
|
uhd_devices = find_uhd_devices()
|
||||||
|
pluto_devices = find_pluto_devices()
|
||||||
|
rtlsdr_devices = find_rtlsdr_devices()
|
||||||
|
bladerf_devices = find_bladerf_devices()
|
||||||
|
hackrf_devices = find_hackrf_devices()
|
||||||
|
|
||||||
|
# Collect all device info
|
||||||
|
all_devices = []
|
||||||
|
all_devices.extend(uhd_devices)
|
||||||
|
all_devices.extend(pluto_devices)
|
||||||
|
all_devices.extend(rtlsdr_devices)
|
||||||
|
all_devices.extend(bladerf_devices)
|
||||||
|
all_devices.extend(hackrf_devices)
|
||||||
|
|
||||||
|
output = {
|
||||||
|
"loaded_drivers": _loaded_drivers,
|
||||||
|
"failed_drivers": _failed_drivers,
|
||||||
|
"devices": all_devices,
|
||||||
|
"total_devices": len(all_devices),
|
||||||
|
}
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
output["failure_reasons"] = _failure_reasons
|
||||||
|
|
||||||
|
if not json_output:
|
||||||
|
output["uhd_devices"] = uhd_devices
|
||||||
|
output["pluto_devices"] = pluto_devices
|
||||||
|
output["rtlsdr_devices"] = rtlsdr_devices
|
||||||
|
output["bladerf_devices"] = bladerf_devices
|
||||||
|
output["hackrf_devices"] = hackrf_devices
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def print_all_devices(device_dict: dict, verbose: bool = False) -> int: # noqa: C901
|
||||||
|
"""
|
||||||
|
Print all SDR devices with signal-testbed style output.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
device_dict: Dictionary containing all device info
|
||||||
|
verbose: Show detailed error messages
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Total number of devices found
|
||||||
|
"""
|
||||||
|
total_devices = 0
|
||||||
|
|
||||||
|
# USRP/UHD Discovery - Try command-line tool even if driver failed to load
|
||||||
|
uhd_devices = device_dict["uhd_devices"]
|
||||||
|
if uhd_devices:
|
||||||
|
click.echo(f"\n📡 USRP/UHD devices ({len(uhd_devices)}):")
|
||||||
|
for device in uhd_devices:
|
||||||
|
name = device.get("name", "Unknown")
|
||||||
|
product = device.get("product", "Unknown")
|
||||||
|
serial = device.get("serial", "Unknown")
|
||||||
|
click.echo(f" ✅ {name} ({product}) - Serial: {serial}")
|
||||||
|
total_devices += len(uhd_devices)
|
||||||
|
else:
|
||||||
|
if verbose:
|
||||||
|
click.echo("\n📡 USRP/UHD devices: None found")
|
||||||
|
|
||||||
|
# PlutoSDR Discovery - Try both pyadi-iio and USB detection
|
||||||
|
pluto_devices = device_dict["pluto_devices"]
|
||||||
|
pluto_count = len(pluto_devices)
|
||||||
|
|
||||||
|
if pluto_count > 0:
|
||||||
|
click.echo(f"\n📱 PlutoSDR devices ({pluto_count}):")
|
||||||
|
for device in pluto_devices:
|
||||||
|
# Determine if network or USB based on URI
|
||||||
|
uri = device["uri"]
|
||||||
|
if uri.startswith("ip:"):
|
||||||
|
click.echo(f" ✅ Network: {uri.replace('ip:', '')}")
|
||||||
|
elif uri.startswith("usb:"):
|
||||||
|
click.echo(f" ✅ USB: {device['description']} (Bus {uri.replace('usb:', '').split('.')[0]})")
|
||||||
|
else:
|
||||||
|
click.echo(f" ✅ {uri}")
|
||||||
|
|
||||||
|
total_devices += pluto_count
|
||||||
|
else:
|
||||||
|
if verbose:
|
||||||
|
click.echo("\n📱 PlutoSDR devices: None found")
|
||||||
|
|
||||||
|
# RTL-SDR Discovery
|
||||||
|
if "rtlsdr" in _loaded_drivers:
|
||||||
|
rtl_devices = device_dict["rtlsdr_devices"]
|
||||||
|
if rtl_devices:
|
||||||
|
click.echo(f"\n📻 RTL-SDR devices ({len(rtl_devices)}):")
|
||||||
|
for device in rtl_devices:
|
||||||
|
idx = device.get("device_index", 0)
|
||||||
|
click.echo(f" ✅ Device {idx}: {device.get('type', 'RTL-SDR')}")
|
||||||
|
total_devices += len(rtl_devices)
|
||||||
|
else:
|
||||||
|
if verbose:
|
||||||
|
click.echo("\n📻 RTL-SDR devices: None found")
|
||||||
|
|
||||||
|
# BladeRF Discovery
|
||||||
|
if "bladerf" in _loaded_drivers:
|
||||||
|
bladerf_devices = device_dict["bladerf_devices"]
|
||||||
|
if bladerf_devices:
|
||||||
|
click.echo(f"\n⚡ BladeRF devices ({len(bladerf_devices)}):")
|
||||||
|
for device in bladerf_devices:
|
||||||
|
desc = device.get("Description", "BladeRF")
|
||||||
|
serial = device.get("Serial", "Unknown")
|
||||||
|
click.echo(f" ✅ {desc} - Serial: {serial}")
|
||||||
|
total_devices += len(bladerf_devices)
|
||||||
|
else:
|
||||||
|
if verbose:
|
||||||
|
click.echo("\n⚡ BladeRF devices: None found")
|
||||||
|
|
||||||
|
# HackRF Discovery
|
||||||
|
if "hackrf" in _loaded_drivers:
|
||||||
|
hackrf_devices = device_dict["hackrf_devices"]
|
||||||
|
if hackrf_devices:
|
||||||
|
click.echo(f"\n🔧 HackRF devices ({len(hackrf_devices)}):")
|
||||||
|
for device in hackrf_devices:
|
||||||
|
serial = device.get("serial", "Unknown")
|
||||||
|
board = device.get("board_id", "")
|
||||||
|
firmware = device.get("firmware", "")
|
||||||
|
info = f"Serial: {serial}"
|
||||||
|
if board:
|
||||||
|
info += f" - Board ID: {board}"
|
||||||
|
if firmware:
|
||||||
|
info += f" - FW: {firmware}"
|
||||||
|
click.echo(f" ✅ {device.get('type', 'HackRF')} - {info}")
|
||||||
|
total_devices += len(hackrf_devices)
|
||||||
|
else:
|
||||||
|
if verbose:
|
||||||
|
click.echo("\n🔧 HackRF devices: None found")
|
||||||
|
|
||||||
|
# ThinkRF Discovery
|
||||||
|
if "thinkrf" in _loaded_drivers:
|
||||||
|
if verbose:
|
||||||
|
click.echo("\n🌐 ThinkRF devices: Discovery not yet implemented")
|
||||||
|
|
||||||
|
return total_devices
|
||||||
|
|
||||||
|
|
||||||
|
@click.command(help="Discover connected SDR devices")
|
||||||
|
@click.option("--verbose", "-v", is_flag=True, help="Show detailed information and errors")
|
||||||
|
@click.option("--json-output", is_flag=True, help="Output in JSON format")
|
||||||
|
def discover(verbose, json_output):
|
||||||
|
"""Discover connected SDR devices with driver loading."""
|
||||||
|
|
||||||
|
device_dict = discover_all_devices(verbose=verbose, json_output=json_output)
|
||||||
|
|
||||||
|
# JSON mode: Load drivers and return structured data
|
||||||
|
if json_output:
|
||||||
|
click.echo(json.dumps(device_dict, indent=2))
|
||||||
|
return
|
||||||
|
|
||||||
|
# Human-readable mode: Signal-testbed style
|
||||||
|
|
||||||
|
# Print loaded drivers
|
||||||
|
if _loaded_drivers:
|
||||||
|
click.echo(f"\n✅ Loaded drivers ({len(_loaded_drivers)}):")
|
||||||
|
for driver in _loaded_drivers:
|
||||||
|
click.echo(f" {driver}")
|
||||||
|
else:
|
||||||
|
click.echo("\n❌ No drivers loaded successfully")
|
||||||
|
|
||||||
|
# Print failed drivers
|
||||||
|
if _failed_drivers:
|
||||||
|
click.echo(f"\n❌ Failed drivers ({len(_failed_drivers)}):")
|
||||||
|
for driver in _failed_drivers:
|
||||||
|
if verbose and driver in _failure_reasons:
|
||||||
|
click.echo(f" {driver}: {_failure_reasons[driver]}")
|
||||||
|
else:
|
||||||
|
click.echo(f" {driver}")
|
||||||
|
|
||||||
|
if not verbose and _failed_drivers:
|
||||||
|
click.echo("\nRun with --verbose to see failure reasons")
|
||||||
|
|
||||||
|
# Device discovery
|
||||||
|
click.echo("\n" + "=" * 40)
|
||||||
|
click.echo("Attached Devices")
|
||||||
|
click.echo("=" * 40)
|
||||||
|
|
||||||
|
total_devices = print_all_devices(device_dict=device_dict, verbose=verbose)
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
click.echo("\n" + "=" * 40)
|
||||||
|
click.echo("Discovery Summary")
|
||||||
|
click.echo("=" * 40)
|
||||||
|
click.echo(f"Loaded drivers: {len(_loaded_drivers)}")
|
||||||
|
click.echo(f"Failed drivers: {len(_failed_drivers)}")
|
||||||
|
click.echo(f"Detected devices: {total_devices}")
|
||||||
|
|
||||||
|
if total_devices == 0:
|
||||||
|
click.echo("\n💡 No devices detected - ensure they are connected and powered on")
|
||||||
1586
ria_toolkit_oss_cli/ria_toolkit_oss/generate.py
Normal file
1586
ria_toolkit_oss_cli/ria_toolkit_oss/generate.py
Normal file
File diff suppressed because it is too large
Load Diff
318
ria_toolkit_oss_cli/ria_toolkit_oss/init.py
Normal file
318
ria_toolkit_oss_cli/ria_toolkit_oss/init.py
Normal file
|
|
@ -0,0 +1,318 @@
|
||||||
|
"""Init command - Initialize user configuration."""
|
||||||
|
|
||||||
|
import click
|
||||||
|
|
||||||
|
from .config import (
|
||||||
|
format_config_display,
|
||||||
|
get_config_path,
|
||||||
|
load_user_config,
|
||||||
|
save_user_config,
|
||||||
|
validate_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def prompt_with_default(text: str, default: str = "") -> str:
|
||||||
|
"""Prompt user with optional default value.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
text: Prompt text
|
||||||
|
default: Default value
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
User input or default
|
||||||
|
"""
|
||||||
|
if default:
|
||||||
|
result = click.prompt(text, default=default, show_default=True)
|
||||||
|
else:
|
||||||
|
result = click.prompt(text, default="", show_default=False)
|
||||||
|
if result == "":
|
||||||
|
return None
|
||||||
|
return result if result else None
|
||||||
|
|
||||||
|
|
||||||
|
def init_show(config_file_path, config_path):
|
||||||
|
if not config_file_path.exists():
|
||||||
|
click.echo(f"No configuration file found at: {config_file_path}")
|
||||||
|
click.echo("\nRun 'utils init' to create a configuration.")
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
config = load_user_config(config_path)
|
||||||
|
click.echo(f"Current Configuration ({config_file_path}):")
|
||||||
|
click.echo("=" * 60)
|
||||||
|
click.echo()
|
||||||
|
click.echo(format_config_display(config))
|
||||||
|
click.echo()
|
||||||
|
click.echo("To update: utils init")
|
||||||
|
click.echo("To reset: utils init --reset")
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(f"Error reading configuration: {e}", err=True)
|
||||||
|
click.echo("\nRun 'utils init --reset' to recreate the configuration.")
|
||||||
|
|
||||||
|
|
||||||
|
def init_reset(config_file_path, config_path, yes):
|
||||||
|
if not config_file_path.exists():
|
||||||
|
click.echo(f"No configuration file found at: {config_file_path}")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Show current config
|
||||||
|
try:
|
||||||
|
config = load_user_config(config_path)
|
||||||
|
click.echo(f"This will delete your configuration file at: {config_file_path}")
|
||||||
|
click.echo()
|
||||||
|
click.echo("Current configuration:")
|
||||||
|
for line in format_config_display(config).split("\n"):
|
||||||
|
click.echo(f" {line}")
|
||||||
|
click.echo()
|
||||||
|
except Exception:
|
||||||
|
click.echo(f"Configuration file exists but may be corrupted: {config_file_path}")
|
||||||
|
click.echo()
|
||||||
|
|
||||||
|
# Confirm deletion
|
||||||
|
if not yes:
|
||||||
|
if not click.confirm("Are you sure you want to reset?", default=False):
|
||||||
|
click.echo("Reset cancelled.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Delete config file
|
||||||
|
try:
|
||||||
|
config_file_path.unlink()
|
||||||
|
click.echo("\n✓ Configuration deleted.")
|
||||||
|
click.echo("\nRun 'utils init' to create a new configuration.")
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(f"Error deleting configuration: {e}", err=True)
|
||||||
|
|
||||||
|
|
||||||
|
def build_config(author, organization, project, location, testbed):
|
||||||
|
# Build configuration
|
||||||
|
config = {}
|
||||||
|
|
||||||
|
if author:
|
||||||
|
config["author"] = author
|
||||||
|
if organization:
|
||||||
|
config["organization"] = organization
|
||||||
|
if project:
|
||||||
|
config["project"] = project
|
||||||
|
if location:
|
||||||
|
config["location"] = location
|
||||||
|
if testbed:
|
||||||
|
config["testbed"] = testbed
|
||||||
|
|
||||||
|
return config
|
||||||
|
|
||||||
|
|
||||||
|
def build_sigmf(license_id, hardware, dataset):
|
||||||
|
# Build SigMF section
|
||||||
|
sigmf = {}
|
||||||
|
|
||||||
|
if license_id:
|
||||||
|
sigmf["license"] = license_id
|
||||||
|
if hardware:
|
||||||
|
sigmf["hw"] = hardware
|
||||||
|
if dataset:
|
||||||
|
sigmf["dataset"] = dataset
|
||||||
|
|
||||||
|
return sigmf
|
||||||
|
|
||||||
|
|
||||||
|
def save_config(config, config_path, use_interactive, warnings):
|
||||||
|
# Save configuration
|
||||||
|
try:
|
||||||
|
saved_path = save_user_config(config, config_path)
|
||||||
|
click.echo(f"\n✓ Configuration saved to: {saved_path}")
|
||||||
|
|
||||||
|
if use_interactive:
|
||||||
|
click.echo()
|
||||||
|
click.echo("You can view your config anytime with: utils init --show")
|
||||||
|
click.echo("You can update values by running: utils init")
|
||||||
|
|
||||||
|
# Show warnings in non-interactive mode
|
||||||
|
elif warnings:
|
||||||
|
click.echo()
|
||||||
|
click.echo("Warnings:")
|
||||||
|
for warning in warnings:
|
||||||
|
click.echo(f" ⚠️ {warning}")
|
||||||
|
|
||||||
|
# TODO message for core team
|
||||||
|
click.echo()
|
||||||
|
click.echo("NOTE: Automatic config integration is not yet implemented.")
|
||||||
|
click.echo("Config values must currently be applied manually with --metadata flags.")
|
||||||
|
click.echo("(Core team TODO: wire config into capture/convert/transmit commands)")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(f"\nError saving configuration: {e}", err=True)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.option("--author", help="Author name (your name)")
|
||||||
|
@click.option("--organization", help="Organization/institution name")
|
||||||
|
@click.option("--project", help="Project name or identifier")
|
||||||
|
@click.option("--location", help="Physical location (lab name, site, etc.)")
|
||||||
|
@click.option("--testbed", help="Testbed identifier")
|
||||||
|
@click.option("--license", "license_id", help="Data license (SPDX identifier, default: Proprietary)")
|
||||||
|
@click.option("--hw", "hardware", help="Hardware description (e.g., PlutoSDR, USRP B210)")
|
||||||
|
@click.option("--dataset", help="Dataset identifier")
|
||||||
|
@click.option("--show", is_flag=True, help="Display current configuration and exit")
|
||||||
|
@click.option("--reset", is_flag=True, help="Delete existing config")
|
||||||
|
@click.option("--config-path", type=click.Path(), help="Use alternate config file location")
|
||||||
|
@click.option("--interactive/--no-interactive", default=None, help="Force interactive mode on/off")
|
||||||
|
@click.option("--yes", "-y", is_flag=True, help="Skip confirmation prompts")
|
||||||
|
def init(
|
||||||
|
author,
|
||||||
|
organization,
|
||||||
|
project,
|
||||||
|
location,
|
||||||
|
testbed,
|
||||||
|
license_id,
|
||||||
|
hardware,
|
||||||
|
dataset,
|
||||||
|
show,
|
||||||
|
reset,
|
||||||
|
config_path,
|
||||||
|
interactive,
|
||||||
|
yes,
|
||||||
|
):
|
||||||
|
"""Initialize user configuration.
|
||||||
|
|
||||||
|
Creates a configuration file at ~/.utils/config.yaml with default metadata
|
||||||
|
values that will be used across CLI commands.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
\b
|
||||||
|
# Interactive setup
|
||||||
|
utils init
|
||||||
|
|
||||||
|
\b
|
||||||
|
# Non-interactive setup
|
||||||
|
utils init --author "Jane Doe" --project "RF_Analysis" --location "Lab_A"
|
||||||
|
|
||||||
|
\b
|
||||||
|
# Show current configuration
|
||||||
|
utils init --show
|
||||||
|
|
||||||
|
\b
|
||||||
|
# Reset configuration
|
||||||
|
utils init --reset
|
||||||
|
"""
|
||||||
|
|
||||||
|
config_file_path = get_config_path(config_path)
|
||||||
|
|
||||||
|
# Handle --show flag
|
||||||
|
if show:
|
||||||
|
init_show(config_file_path, config_path)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Handle --reset flag
|
||||||
|
if reset:
|
||||||
|
init_reset(config_file_path, config_path, yes)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Determine if we should use interactive mode
|
||||||
|
# Interactive if: no CLI args provided OR --interactive flag OR config file doesn't exist
|
||||||
|
has_cli_args = any([author, organization, project, location, testbed, hardware, dataset])
|
||||||
|
|
||||||
|
if interactive is None:
|
||||||
|
# Auto-detect: interactive if no args provided
|
||||||
|
use_interactive = not has_cli_args
|
||||||
|
else:
|
||||||
|
use_interactive = interactive
|
||||||
|
|
||||||
|
# Load existing config if it exists
|
||||||
|
existing_config = None
|
||||||
|
if config_file_path.exists():
|
||||||
|
try:
|
||||||
|
existing_config = load_user_config(config_path)
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(f"Warning: Could not load existing config: {e}", err=True)
|
||||||
|
click.echo("Creating new configuration...\n")
|
||||||
|
|
||||||
|
# Interactive mode
|
||||||
|
if use_interactive:
|
||||||
|
click.echo()
|
||||||
|
click.echo("Welcome to Utils SDR CLI Configuration!")
|
||||||
|
click.echo("=" * 60)
|
||||||
|
click.echo()
|
||||||
|
click.echo(f"This will create a configuration file at: {config_file_path}")
|
||||||
|
click.echo()
|
||||||
|
click.echo("These values will be automatically added to recordings and conversions.")
|
||||||
|
click.echo("You can always change these later by running 'utils init' again.")
|
||||||
|
click.echo()
|
||||||
|
click.echo("Press Enter to skip optional fields.")
|
||||||
|
click.echo()
|
||||||
|
|
||||||
|
# Required information
|
||||||
|
click.echo("Required Information:")
|
||||||
|
click.echo("-" * 20)
|
||||||
|
|
||||||
|
# Use existing values as defaults
|
||||||
|
author_default = existing_config.get("author", "") if existing_config else ""
|
||||||
|
org_default = existing_config.get("organization", "") if existing_config else ""
|
||||||
|
proj_default = existing_config.get("project", "") if existing_config else ""
|
||||||
|
loc_default = existing_config.get("location", "") if existing_config else ""
|
||||||
|
test_default = existing_config.get("testbed", "") if existing_config else ""
|
||||||
|
|
||||||
|
author = click.prompt(
|
||||||
|
"Author name (your name)", default=author_default or "", show_default=bool(author_default)
|
||||||
|
)
|
||||||
|
organization = prompt_with_default("Organization (optional)", org_default)
|
||||||
|
project = prompt_with_default("Project name (optional)", proj_default)
|
||||||
|
location = prompt_with_default("Location (optional)", loc_default)
|
||||||
|
testbed = prompt_with_default("Testbed name (optional)", test_default)
|
||||||
|
|
||||||
|
# SigMF metadata
|
||||||
|
click.echo()
|
||||||
|
click.echo("SigMF Metadata (optional):")
|
||||||
|
click.echo("-" * 27)
|
||||||
|
|
||||||
|
sigmf_defaults = existing_config.get("sigmf", {}) if existing_config else {}
|
||||||
|
license_default = sigmf_defaults.get("license", "Proprietary")
|
||||||
|
hw_default = sigmf_defaults.get("hw", "")
|
||||||
|
dataset_default = sigmf_defaults.get("dataset", "")
|
||||||
|
|
||||||
|
license_id = click.prompt(
|
||||||
|
"License (e.g., Proprietary, CC-BY-4.0, MIT)", default=license_default, show_default=True
|
||||||
|
)
|
||||||
|
hardware = prompt_with_default("Hardware description (e.g., PlutoSDR)", hw_default)
|
||||||
|
dataset = prompt_with_default("Dataset name (optional)", dataset_default)
|
||||||
|
|
||||||
|
# Build configuration
|
||||||
|
config = build_config(author, organization, project, location, testbed)
|
||||||
|
|
||||||
|
# SigMF section
|
||||||
|
sigmf = build_sigmf(license_id, hardware, dataset)
|
||||||
|
if sigmf:
|
||||||
|
config["sigmf"] = sigmf
|
||||||
|
|
||||||
|
# Validate configuration
|
||||||
|
warnings = validate_config(config)
|
||||||
|
|
||||||
|
# Show configuration summary
|
||||||
|
if use_interactive:
|
||||||
|
click.echo()
|
||||||
|
click.echo("Configuration Summary:")
|
||||||
|
click.echo("-" * 22)
|
||||||
|
click.echo(format_config_display(config))
|
||||||
|
click.echo()
|
||||||
|
|
||||||
|
# Show warnings
|
||||||
|
if warnings:
|
||||||
|
click.echo("Warnings:")
|
||||||
|
for warning in warnings:
|
||||||
|
click.echo(f" ⚠️ {warning}")
|
||||||
|
click.echo()
|
||||||
|
|
||||||
|
# Confirm save
|
||||||
|
if not yes:
|
||||||
|
if not click.confirm("Save this configuration?", default=True):
|
||||||
|
click.echo("Configuration not saved.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Save configuration
|
||||||
|
return save_config(config, config_path, use_interactive, warnings)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
init()
|
||||||
421
ria_toolkit_oss_cli/ria_toolkit_oss/split.py
Normal file
421
ria_toolkit_oss_cli/ria_toolkit_oss/split.py
Normal file
|
|
@ -0,0 +1,421 @@
|
||||||
|
"""Split command - Split, trim, and extract portions of recordings."""
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import click
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from utils.io import from_npy_legacy, load_recording
|
||||||
|
from utils_cli.utils.common import (
|
||||||
|
detect_file_format,
|
||||||
|
echo_progress,
|
||||||
|
echo_verbose,
|
||||||
|
format_sample_count,
|
||||||
|
save_recording,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_output_extension(format_name):
|
||||||
|
"""Get file extension for format name."""
|
||||||
|
extension_map = {"sigmf": ".sigmf", "npy": ".npy", "wav": ".wav", "blue": ".blue"}
|
||||||
|
return extension_map[format_name]
|
||||||
|
|
||||||
|
|
||||||
|
def validate_operation(split_at, split_every, split_duration, trim, extract_annotations):
|
||||||
|
# Validate operation selection
|
||||||
|
operations = sum(
|
||||||
|
[split_at is not None, split_every is not None, split_duration is not None, trim, extract_annotations]
|
||||||
|
)
|
||||||
|
|
||||||
|
if operations == 0:
|
||||||
|
raise click.ClickException(
|
||||||
|
"No operation specified. Use one of:\n"
|
||||||
|
" --split-at SAMPLE\n"
|
||||||
|
" --split-every N\n"
|
||||||
|
" --split-duration SECONDS\n"
|
||||||
|
" --trim (with --start and --length or --end)\n"
|
||||||
|
" --extract-annotations"
|
||||||
|
)
|
||||||
|
|
||||||
|
if operations > 1:
|
||||||
|
raise click.ClickException(
|
||||||
|
"Multiple operations specified. Use only one of:\n"
|
||||||
|
" --split-at, --split-every, --split-duration, --trim, --extract-annotations"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.argument("input", type=click.Path(exists=True))
|
||||||
|
@click.option("--split-at", type=int, metavar="SAMPLE", help="Split into two files at sample index")
|
||||||
|
@click.option("--split-every", type=int, metavar="N", help="Split into chunks of N samples")
|
||||||
|
@click.option(
|
||||||
|
"--split-duration",
|
||||||
|
type=float,
|
||||||
|
metavar="SECONDS",
|
||||||
|
help="Split into chunks of specified duration (requires sample_rate in metadata)",
|
||||||
|
)
|
||||||
|
@click.option("--trim", is_flag=True, help="Extract portion of recording (use with --start and --length or --end)")
|
||||||
|
@click.option(
|
||||||
|
"--start", "start_sample", type=int, default=0, show_default=True, help="Start sample for trim operation"
|
||||||
|
)
|
||||||
|
@click.option("--length", "num_samples", type=int, help="Number of samples for trim operation")
|
||||||
|
@click.option("--end", "end_sample", type=int, help="End sample for trim operation (alternative to --length)")
|
||||||
|
@click.option("--extract-annotations", is_flag=True, help="Extract each annotated region to separate file")
|
||||||
|
@click.option("--annotation-label", type=str, help="Only extract annotations with this label")
|
||||||
|
@click.option("--annotation-index", type=int, help="Extract specific annotation by index")
|
||||||
|
@click.option("--output-dir", type=click.Path(), help="Output directory (default: current directory)")
|
||||||
|
@click.option("--output-prefix", type=str, help="Prefix for output filenames")
|
||||||
|
@click.option(
|
||||||
|
"--output-format",
|
||||||
|
type=click.Choice(["npy", "sigmf", "wav", "blue"]),
|
||||||
|
help="Force output format (default: same as input)",
|
||||||
|
)
|
||||||
|
@click.option("--overwrite", is_flag=True, help="Overwrite existing output files")
|
||||||
|
@click.option("--legacy", is_flag=True, help="Load input as legacy NPY format")
|
||||||
|
@click.option("--verbose", "-v", is_flag=True, help="Verbose output")
|
||||||
|
@click.option("--quiet", "-q", is_flag=True, help="Suppress output")
|
||||||
|
def split( # noqa: C901
|
||||||
|
input,
|
||||||
|
split_at,
|
||||||
|
split_every,
|
||||||
|
split_duration,
|
||||||
|
trim,
|
||||||
|
start_sample,
|
||||||
|
num_samples,
|
||||||
|
end_sample,
|
||||||
|
extract_annotations,
|
||||||
|
annotation_label,
|
||||||
|
annotation_index,
|
||||||
|
output_dir,
|
||||||
|
output_prefix,
|
||||||
|
output_format,
|
||||||
|
overwrite,
|
||||||
|
legacy,
|
||||||
|
verbose,
|
||||||
|
quiet,
|
||||||
|
):
|
||||||
|
"""Split, trim, and extract portions of recordings.
|
||||||
|
|
||||||
|
Split recordings into multiple files, extract portions, or extract annotated regions.
|
||||||
|
|
||||||
|
\b
|
||||||
|
Examples:
|
||||||
|
# Split at specific sample
|
||||||
|
utils split recording.sigmf --split-at 500000 --output-dir split_output
|
||||||
|
|
||||||
|
\b
|
||||||
|
# Split into equal chunks
|
||||||
|
utils split capture.npy --split-every 100000 --output-dir chunks
|
||||||
|
|
||||||
|
\b
|
||||||
|
# Split by duration (requires sample_rate in metadata)
|
||||||
|
utils split recording.sigmf --split-duration 1.0 --output-dir segments
|
||||||
|
|
||||||
|
\b
|
||||||
|
# Trim recording
|
||||||
|
utils split signal.npy --trim --start 1000 --length 5000 --output-dir trimmed
|
||||||
|
|
||||||
|
\b
|
||||||
|
# Trim with end index
|
||||||
|
utils split signal.npy --trim --start 1000 --end 6000 --output-dir trimmed
|
||||||
|
|
||||||
|
\b
|
||||||
|
# Extract all annotated regions
|
||||||
|
utils split annotated.sigmf --extract-annotations --output-dir annotations
|
||||||
|
|
||||||
|
\b
|
||||||
|
# Extract specific annotation label
|
||||||
|
utils split annotated.sigmf --extract-annotations --annotation-label "payload"
|
||||||
|
|
||||||
|
\b
|
||||||
|
# Extract specific annotation by index
|
||||||
|
utils split annotated.sigmf --extract-annotations --annotation-index 1
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Validate operation selection
|
||||||
|
validate_operation(split_at, split_every, split_duration, trim, extract_annotations)
|
||||||
|
|
||||||
|
# Validate trim parameters
|
||||||
|
if trim:
|
||||||
|
if num_samples is None and end_sample is None:
|
||||||
|
raise click.ClickException("Trim operation requires either --length or --end")
|
||||||
|
if num_samples is not None and end_sample is not None:
|
||||||
|
raise click.ClickException("Cannot specify both --length and --end")
|
||||||
|
|
||||||
|
# Load input recording
|
||||||
|
input_path = Path(input)
|
||||||
|
input_format = detect_file_format(input_path)
|
||||||
|
|
||||||
|
echo_progress(f"Loading: {input_path.name}", quiet)
|
||||||
|
echo_verbose(f"Input format: {input_format.upper()}", verbose)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if legacy:
|
||||||
|
echo_verbose("Using legacy NPY loader", verbose)
|
||||||
|
recording = from_npy_legacy(input)
|
||||||
|
else:
|
||||||
|
recording = load_recording(input)
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to load input file: {e}")
|
||||||
|
|
||||||
|
# Get recording info
|
||||||
|
if hasattr(recording.data, "shape") and len(recording.data.shape) == 2:
|
||||||
|
total_samples = recording.data.shape[1]
|
||||||
|
else:
|
||||||
|
total_samples = len(recording.data)
|
||||||
|
|
||||||
|
echo_progress(f"Total samples: {format_sample_count(total_samples)}", quiet)
|
||||||
|
|
||||||
|
# Determine output format
|
||||||
|
if output_format is None:
|
||||||
|
output_format = input_format
|
||||||
|
|
||||||
|
echo_verbose(f"Output format: {output_format.upper()}", verbose)
|
||||||
|
|
||||||
|
# Determine output directory
|
||||||
|
if output_dir:
|
||||||
|
out_dir = Path(output_dir)
|
||||||
|
else:
|
||||||
|
out_dir = Path(".") # Current directory
|
||||||
|
|
||||||
|
# Get base filename for outputs
|
||||||
|
if output_prefix:
|
||||||
|
base_name = output_prefix
|
||||||
|
else:
|
||||||
|
# Get input stem without format-specific suffixes
|
||||||
|
base_name = input_path.stem
|
||||||
|
if base_name.endswith(".sigmf-data") or base_name.endswith(".sigmf-meta"):
|
||||||
|
base_name = base_name[:-11]
|
||||||
|
elif base_name.endswith(".sigmf"):
|
||||||
|
base_name = base_name[:-6]
|
||||||
|
|
||||||
|
# Execute operation
|
||||||
|
if split_at is not None:
|
||||||
|
# Split at specific sample
|
||||||
|
if split_at < 0 or split_at >= total_samples:
|
||||||
|
raise click.ClickException(f"Invalid split point: {split_at}\n" f"Must be between 0 and {total_samples-1}")
|
||||||
|
|
||||||
|
echo_progress(f"\nSplitting at sample {format_sample_count(split_at)}...", quiet)
|
||||||
|
|
||||||
|
# Create two parts
|
||||||
|
part1 = recording.trim(start_sample=0, num_samples=split_at)
|
||||||
|
part2 = recording.trim(start_sample=split_at, num_samples=total_samples - split_at)
|
||||||
|
|
||||||
|
# Add metadata about original file
|
||||||
|
part1._metadata["original_file"] = str(input_path.name)
|
||||||
|
part1._metadata["original_start_sample"] = 0
|
||||||
|
part1._metadata["original_end_sample"] = split_at
|
||||||
|
part1._metadata["split_operation"] = "split_at"
|
||||||
|
|
||||||
|
part2._metadata["original_file"] = str(input_path.name)
|
||||||
|
part2._metadata["original_start_sample"] = split_at
|
||||||
|
part2._metadata["original_end_sample"] = total_samples
|
||||||
|
part2._metadata["split_operation"] = "split_at"
|
||||||
|
|
||||||
|
# Save parts
|
||||||
|
ext = get_output_extension(output_format)
|
||||||
|
output1 = out_dir / f"{base_name}_part1{ext}"
|
||||||
|
output2 = out_dir / f"{base_name}_part2{ext}"
|
||||||
|
|
||||||
|
echo_progress(
|
||||||
|
f" Part 1: samples 0-{format_sample_count(split_at-1)} ({format_sample_count(split_at)} samples)", quiet
|
||||||
|
)
|
||||||
|
save_recording(part1, output1, output_format, overwrite, verbose)
|
||||||
|
|
||||||
|
echo_progress(
|
||||||
|
message=(
|
||||||
|
f" Part 2: samples {format_sample_count(split_at)}-{format_sample_count(total_samples-1)} "
|
||||||
|
f"({format_sample_count(total_samples - split_at)} samples)"
|
||||||
|
),
|
||||||
|
quiet=quiet,
|
||||||
|
)
|
||||||
|
save_recording(part2, output2, output_format, overwrite, verbose)
|
||||||
|
|
||||||
|
echo_progress("\nSaved:", quiet)
|
||||||
|
echo_progress(f" {output1}", quiet)
|
||||||
|
echo_progress(f" {output2}", quiet)
|
||||||
|
|
||||||
|
elif split_every is not None or split_duration is not None:
|
||||||
|
# Split into equal chunks
|
||||||
|
if split_duration is not None:
|
||||||
|
# Convert duration to samples
|
||||||
|
sample_rate = recording.metadata.get("sample_rate")
|
||||||
|
if not sample_rate:
|
||||||
|
raise click.ClickException(
|
||||||
|
"Cannot split by duration: no sample_rate in metadata\n"
|
||||||
|
"Use --split-every with sample count instead"
|
||||||
|
)
|
||||||
|
split_samples = int(split_duration * sample_rate)
|
||||||
|
echo_progress(
|
||||||
|
f"\nSplitting into {split_duration}s chunks ({format_sample_count(split_samples)} samples)...", quiet
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
split_samples = split_every
|
||||||
|
echo_progress(f"\nSplitting into chunks of {format_sample_count(split_samples)} samples...", quiet)
|
||||||
|
|
||||||
|
if split_samples <= 0:
|
||||||
|
raise click.ClickException(f"Invalid chunk size: {split_samples}")
|
||||||
|
|
||||||
|
# Calculate number of chunks
|
||||||
|
num_chunks = int(np.ceil(total_samples / split_samples))
|
||||||
|
|
||||||
|
echo_progress(f"Creating {num_chunks} chunks...", quiet)
|
||||||
|
|
||||||
|
# Create chunks
|
||||||
|
ext = get_output_extension(output_format)
|
||||||
|
created_files = []
|
||||||
|
|
||||||
|
for i in range(num_chunks):
|
||||||
|
start = i * split_samples
|
||||||
|
length = min(split_samples, total_samples - start)
|
||||||
|
end = start + length - 1
|
||||||
|
|
||||||
|
# Trim chunk
|
||||||
|
chunk = recording.trim(start_sample=start, num_samples=length)
|
||||||
|
|
||||||
|
# Add metadata
|
||||||
|
chunk._metadata["original_file"] = str(input_path.name)
|
||||||
|
chunk._metadata["original_start_sample"] = start
|
||||||
|
chunk._metadata["original_end_sample"] = start + length
|
||||||
|
chunk._metadata["split_operation"] = "split_every"
|
||||||
|
chunk._metadata["chunk_index"] = i + 1
|
||||||
|
chunk._metadata["total_chunks"] = num_chunks
|
||||||
|
|
||||||
|
# Generate output filename
|
||||||
|
chunk_num = str(i + 1).zfill(len(str(num_chunks)))
|
||||||
|
output_path = out_dir / f"{base_name}_chunk{chunk_num}{ext}"
|
||||||
|
|
||||||
|
echo_progress(
|
||||||
|
f" Chunk {i+1}/{num_chunks}: samples {format_sample_count(start)}-{format_sample_count(end)}...",
|
||||||
|
quiet,
|
||||||
|
)
|
||||||
|
save_recording(chunk, output_path, output_format, overwrite, verbose)
|
||||||
|
created_files.append(output_path)
|
||||||
|
|
||||||
|
echo_progress(f"\nCreated {num_chunks} chunks in {out_dir}/", quiet)
|
||||||
|
|
||||||
|
elif trim:
|
||||||
|
# Trim operation
|
||||||
|
if end_sample is not None:
|
||||||
|
if end_sample <= start_sample:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Invalid range: end ({end_sample}) must be greater than start ({start_sample})"
|
||||||
|
)
|
||||||
|
num_samples = end_sample - start_sample
|
||||||
|
|
||||||
|
if start_sample < 0 or num_samples < 0:
|
||||||
|
raise click.ClickException("Invalid trim range: start and length must be non-negative")
|
||||||
|
|
||||||
|
if start_sample + num_samples > total_samples:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Invalid trim range\n"
|
||||||
|
f"Start: {format_sample_count(start_sample)}, Length: {format_sample_count(num_samples)}, "
|
||||||
|
f"End: {format_sample_count(start_sample + num_samples)}\n"
|
||||||
|
f"Recording only has {format_sample_count(total_samples)} samples "
|
||||||
|
f"(indices 0-{format_sample_count(total_samples-1)})"
|
||||||
|
)
|
||||||
|
|
||||||
|
echo_progress("\nTrimming recording...", quiet)
|
||||||
|
echo_progress(f" Start: {format_sample_count(start_sample)}", quiet)
|
||||||
|
echo_progress(f" Length: {format_sample_count(num_samples)} samples", quiet)
|
||||||
|
echo_progress(f" End: {format_sample_count(start_sample + num_samples - 1)}", quiet)
|
||||||
|
|
||||||
|
# Trim recording
|
||||||
|
trimmed = recording.trim(start_sample=start_sample, num_samples=num_samples)
|
||||||
|
|
||||||
|
# Add metadata
|
||||||
|
trimmed._metadata["original_file"] = str(input_path.name)
|
||||||
|
trimmed._metadata["original_start_sample"] = start_sample
|
||||||
|
trimmed._metadata["original_end_sample"] = start_sample + num_samples
|
||||||
|
trimmed._metadata["split_operation"] = "trim"
|
||||||
|
|
||||||
|
# Save trimmed recording
|
||||||
|
ext = get_output_extension(output_format)
|
||||||
|
output_path = out_dir / f"{base_name}{ext}"
|
||||||
|
|
||||||
|
save_recording(trimmed, output_path, output_format, overwrite, verbose)
|
||||||
|
|
||||||
|
echo_progress(f"\nOutput: {output_path}", quiet)
|
||||||
|
echo_progress("Done.", quiet)
|
||||||
|
|
||||||
|
elif extract_annotations:
|
||||||
|
# Extract annotated regions
|
||||||
|
if not recording.annotations:
|
||||||
|
raise click.ClickException(
|
||||||
|
"No annotations found in recording\n" "Use 'utils annotate' to add annotations first"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Filter annotations
|
||||||
|
annotations_to_extract = recording.annotations
|
||||||
|
|
||||||
|
if annotation_index is not None:
|
||||||
|
if annotation_index < 0 or annotation_index >= len(annotations_to_extract):
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Invalid annotation index: {annotation_index}\n"
|
||||||
|
f"Recording has {len(annotations_to_extract)} annotations "
|
||||||
|
f"(indices 0-{len(annotations_to_extract)-1})"
|
||||||
|
)
|
||||||
|
annotations_to_extract = [annotations_to_extract[annotation_index]]
|
||||||
|
|
||||||
|
if annotation_label is not None:
|
||||||
|
filtered = [ann for ann in annotations_to_extract if ann.label == annotation_label]
|
||||||
|
if not filtered:
|
||||||
|
available_labels = list(set(ann.label for ann in recording.annotations))
|
||||||
|
raise click.ClickException(
|
||||||
|
f"No annotations with label '{annotation_label}'\n"
|
||||||
|
f"Available labels: {', '.join(available_labels)}"
|
||||||
|
)
|
||||||
|
annotations_to_extract = filtered
|
||||||
|
|
||||||
|
echo_progress(f"\nExtracting {len(annotations_to_extract)} annotated region(s)...", quiet)
|
||||||
|
|
||||||
|
# Extract each annotation
|
||||||
|
ext = get_output_extension(output_format)
|
||||||
|
created_files = []
|
||||||
|
|
||||||
|
for ann in annotations_to_extract:
|
||||||
|
# Get annotation bounds
|
||||||
|
start = ann.sample_start
|
||||||
|
count = ann.sample_count
|
||||||
|
end = start + count - 1
|
||||||
|
|
||||||
|
# Trim to annotation bounds
|
||||||
|
chunk = recording.trim(start_sample=start, num_samples=count)
|
||||||
|
|
||||||
|
# Clear annotations - the trimmed chunk IS the annotation,
|
||||||
|
# and trim() may produce invalid annotations
|
||||||
|
chunk._annotations = []
|
||||||
|
|
||||||
|
# Add metadata
|
||||||
|
chunk._metadata["original_file"] = str(input_path.name)
|
||||||
|
chunk._metadata["original_start_sample"] = start
|
||||||
|
chunk._metadata["original_end_sample"] = start + count
|
||||||
|
chunk._metadata["split_operation"] = "extract_annotation"
|
||||||
|
chunk._metadata["annotation_label"] = ann.label
|
||||||
|
|
||||||
|
# Generate filename
|
||||||
|
label_safe = ann.label.replace(" ", "_").replace("/", "_")
|
||||||
|
output_filename = f"{base_name}_{label_safe}_{start}-{start+count}{ext}"
|
||||||
|
output_path = out_dir / output_filename
|
||||||
|
|
||||||
|
# Get original index in full annotation list if we filtered
|
||||||
|
if annotation_index is not None:
|
||||||
|
display_idx = annotation_index
|
||||||
|
else:
|
||||||
|
display_idx = recording.annotations.index(ann)
|
||||||
|
|
||||||
|
echo_progress(
|
||||||
|
message=(
|
||||||
|
f" [{display_idx}] {ann.label} ({format_sample_count(start)}"
|
||||||
|
f"-{format_sample_count(end)}): {output_filename}"
|
||||||
|
),
|
||||||
|
quiet=quiet,
|
||||||
|
)
|
||||||
|
save_recording(chunk, output_path, output_format, overwrite, verbose)
|
||||||
|
created_files.append(output_path)
|
||||||
|
|
||||||
|
echo_progress(f"\nExtracted {len(annotations_to_extract)} annotated region(s).", quiet)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
split()
|
||||||
732
ria_toolkit_oss_cli/ria_toolkit_oss/transform.py
Normal file
732
ria_toolkit_oss_cli/ria_toolkit_oss/transform.py
Normal file
|
|
@ -0,0 +1,732 @@
|
||||||
|
"""Transform command - Apply signal transformations to recordings."""
|
||||||
|
|
||||||
|
import importlib
|
||||||
|
import importlib.util
|
||||||
|
import inspect
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import click
|
||||||
|
|
||||||
|
from utils.data.recording import Recording
|
||||||
|
from utils.io.recording import load_recording
|
||||||
|
from utils.transforms import iq_augmentations, iq_channel_models, iq_impairments
|
||||||
|
from utils_cli.utils.common import (
|
||||||
|
echo_progress,
|
||||||
|
echo_verbose,
|
||||||
|
format_sample_count,
|
||||||
|
save_recording,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_available_transforms(module):
|
||||||
|
"""Get list of public transform functions from a module.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
module: Python module to inspect
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: {name: function} for all public callables
|
||||||
|
"""
|
||||||
|
transforms = {}
|
||||||
|
for name, obj in inspect.getmembers(module, inspect.isfunction):
|
||||||
|
if not name.startswith("_"):
|
||||||
|
transforms[name] = obj
|
||||||
|
return transforms
|
||||||
|
|
||||||
|
|
||||||
|
def get_transform_help(func):
|
||||||
|
"""Extract help info from a transform function.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
func: Transform function to inspect
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: {description, params}
|
||||||
|
"""
|
||||||
|
sig = inspect.signature(func)
|
||||||
|
doc = inspect.getdoc(func) or ""
|
||||||
|
|
||||||
|
# Get first line of docstring as description
|
||||||
|
description = doc.split("\n")[0] if doc else "No description"
|
||||||
|
|
||||||
|
# Extract parameters from signature (skip 'signal')
|
||||||
|
params = {}
|
||||||
|
for param_name, param in sig.parameters.items():
|
||||||
|
if param_name == "signal":
|
||||||
|
continue
|
||||||
|
|
||||||
|
default = param.default
|
||||||
|
param_type = "optional" if default != inspect.Parameter.empty else "required"
|
||||||
|
default_str = f" (default: {default})" if default != inspect.Parameter.empty else ""
|
||||||
|
|
||||||
|
params[param_name] = {
|
||||||
|
"type": param_type,
|
||||||
|
"default": default,
|
||||||
|
"annotation": str(param.annotation) if param.annotation != inspect.Parameter.empty else "any",
|
||||||
|
"display": f"{param_name} ({param_type}){default_str}",
|
||||||
|
}
|
||||||
|
|
||||||
|
return {"description": description, "full_doc": doc, "params": params}
|
||||||
|
|
||||||
|
|
||||||
|
def show_transform_help(transform_name, func):
|
||||||
|
"""Display compact help for a specific transform."""
|
||||||
|
info = get_transform_help(func)
|
||||||
|
|
||||||
|
click.echo(f"\n{transform_name}")
|
||||||
|
click.echo("-" * 50)
|
||||||
|
click.echo(info["description"])
|
||||||
|
|
||||||
|
if info["params"]:
|
||||||
|
click.echo("\nParameters:")
|
||||||
|
for param_name, param_info in sorted(info["params"].items()):
|
||||||
|
click.echo(f" {param_name:20} {param_info['display']}")
|
||||||
|
|
||||||
|
click.echo()
|
||||||
|
|
||||||
|
|
||||||
|
def quick_view_transform(recording, output_path, title="Transform Result"):
|
||||||
|
"""Create a quick PNG visualization of transformed recording using constellation plot."""
|
||||||
|
try:
|
||||||
|
from utils.view.view_signal_simple import view_simple_sig
|
||||||
|
|
||||||
|
# Create PNG in same directory as output
|
||||||
|
output_dir = Path(output_path).parent
|
||||||
|
base_name = Path(output_path).stem
|
||||||
|
png_path = output_dir / f"{base_name}_preview.png"
|
||||||
|
|
||||||
|
# Use simple view with constellation
|
||||||
|
view_simple_sig(recording, output_path=str(png_path), constellation_mode=True, title=title, saveplot=True)
|
||||||
|
|
||||||
|
click.echo(f"Visualization saved to: {png_path}")
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(f"Warning: Could not create visualization: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def generate_transform_suffix(transform_name, params):
|
||||||
|
"""Generate a short suffix for the output filename based on transform and params.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
transform_name: Name of the transform
|
||||||
|
params: Dict of parameters
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: A short suffix like "awgn15" or "freqoffset10k"
|
||||||
|
"""
|
||||||
|
suffix = transform_name.replace("_", "")
|
||||||
|
|
||||||
|
# Add key parameter values
|
||||||
|
if "snr_db" in params:
|
||||||
|
suffix += f"{int(params['snr_db'])}"
|
||||||
|
elif "snr" in params:
|
||||||
|
suffix += f"{int(params['snr'])}"
|
||||||
|
elif "amplitude_variance" in params:
|
||||||
|
suffix += f"{int(params['amplitude_variance']*100)}av"
|
||||||
|
elif "phase_variance" in params:
|
||||||
|
suffix += f"{int(params['phase_variance']*100000)}pv"
|
||||||
|
elif "compression_gain" in params:
|
||||||
|
suffix += f"{params['compression_gain']:.2f}".rstrip("0").rstrip(".")
|
||||||
|
elif "offset_hz" in params:
|
||||||
|
hz = params["offset_hz"]
|
||||||
|
if abs(hz) >= 1e6:
|
||||||
|
suffix += f"{hz/1e6:.0f}m"
|
||||||
|
elif abs(hz) >= 1e3:
|
||||||
|
suffix += f"{hz/1e3:.0f}k"
|
||||||
|
else:
|
||||||
|
suffix += f"{hz:.0f}"
|
||||||
|
elif "offset" in params:
|
||||||
|
suffix += f"{params['offset']:.2f}".rstrip("0").rstrip(".")
|
||||||
|
elif "doppler_hz" in params:
|
||||||
|
suffix += f"{params['doppler_hz']:.0f}"
|
||||||
|
|
||||||
|
return suffix
|
||||||
|
|
||||||
|
|
||||||
|
def parse_transform_params(param_strings):
|
||||||
|
"""Parse transform parameters from CLI options.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
param_strings: List of 'KEY=VALUE' strings
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: {key: value} with types inferred
|
||||||
|
"""
|
||||||
|
params = {}
|
||||||
|
if not param_strings:
|
||||||
|
return params
|
||||||
|
|
||||||
|
for param_str in param_strings:
|
||||||
|
if "=" not in param_str:
|
||||||
|
raise click.BadParameter(f"Parameter must be KEY=VALUE, got: {param_str}")
|
||||||
|
|
||||||
|
key, value = param_str.split("=", 1)
|
||||||
|
key = key.strip()
|
||||||
|
value = value.strip()
|
||||||
|
|
||||||
|
# Try to infer type
|
||||||
|
try:
|
||||||
|
# Try to parse scientific notation and floats
|
||||||
|
if "e" in value.lower() or "." in value:
|
||||||
|
params[key] = float(value)
|
||||||
|
else:
|
||||||
|
params[key] = int(value)
|
||||||
|
except ValueError:
|
||||||
|
# Keep as string
|
||||||
|
params[key] = value
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
def load_custom_transforms(transform_dir):
|
||||||
|
"""Load custom transform functions from a directory.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
transform_dir: Path to directory containing .py files with transform functions
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: {transform_name: function} for all public functions in all .py files
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
click.ClickException: If directory doesn't exist or no transforms found
|
||||||
|
"""
|
||||||
|
transform_dir = Path(transform_dir)
|
||||||
|
|
||||||
|
if not transform_dir.exists():
|
||||||
|
raise click.ClickException(f"Transform directory does not exist: {transform_dir}")
|
||||||
|
|
||||||
|
if not transform_dir.is_dir():
|
||||||
|
raise click.ClickException(f"Path is not a directory: {transform_dir}")
|
||||||
|
|
||||||
|
transforms = {}
|
||||||
|
py_files = list(transform_dir.glob("*.py"))
|
||||||
|
|
||||||
|
if not py_files:
|
||||||
|
raise click.ClickException(f"No .py files found in {transform_dir}")
|
||||||
|
|
||||||
|
for py_file in py_files:
|
||||||
|
try:
|
||||||
|
# Load module dynamically
|
||||||
|
spec = importlib.util.spec_from_file_location(py_file.stem, py_file)
|
||||||
|
if spec is None or spec.loader is None:
|
||||||
|
click.echo(f"Warning: Could not load {py_file.name}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
module = importlib.util.module_from_spec(spec)
|
||||||
|
spec.loader.exec_module(module)
|
||||||
|
|
||||||
|
# Extract all public functions
|
||||||
|
for name, obj in inspect.getmembers(module, inspect.isfunction):
|
||||||
|
if not name.startswith("_"):
|
||||||
|
# Store with source file info for metadata
|
||||||
|
obj._transform_source_file = py_file.name
|
||||||
|
transforms[name] = obj
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to load {py_file.name}: {e}")
|
||||||
|
|
||||||
|
return transforms
|
||||||
|
|
||||||
|
|
||||||
|
def check_input_errors(item_name: str, item, available, input, help_transform):
|
||||||
|
if item is None:
|
||||||
|
if help_transform:
|
||||||
|
raise click.UsageError(f"{item_name.upper()} must be specified for --help-transform")
|
||||||
|
else:
|
||||||
|
raise click.UsageError(f"{item_name.upper()} must be specified (or use --list)")
|
||||||
|
if item not in available:
|
||||||
|
raise click.ClickException(f"Unknown {item_name}: {item}\n" f"Use --list to see available options")
|
||||||
|
if input is None and not help_transform:
|
||||||
|
raise click.UsageError("INPUT must be specified")
|
||||||
|
|
||||||
|
|
||||||
|
def load_input(input, verbose):
|
||||||
|
# Load input
|
||||||
|
try:
|
||||||
|
recording = load_recording(input)
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to load input: {e}")
|
||||||
|
|
||||||
|
echo_verbose(f"Loaded {format_sample_count(recording.data.shape[-1])} samples", verbose)
|
||||||
|
return recording
|
||||||
|
|
||||||
|
@click.group()
|
||||||
|
def transform():
|
||||||
|
"""Apply signal transformations to recordings.
|
||||||
|
|
||||||
|
Transform supports three categories of operations:
|
||||||
|
- augment: Modify signal to create new ML examples
|
||||||
|
- impair: Degrade signal with noise, distortion, etc.
|
||||||
|
- apply_channel: Apply channel models (fading, Doppler, etc.)
|
||||||
|
|
||||||
|
Each operation is applied independently. Chain multiple transforms by
|
||||||
|
running this command multiple times.
|
||||||
|
|
||||||
|
\b
|
||||||
|
Examples:
|
||||||
|
# List available augmentations
|
||||||
|
utils transform augment --list
|
||||||
|
\b
|
||||||
|
# Apply channel swap
|
||||||
|
utils transform augment channel_swap input.npy
|
||||||
|
\b
|
||||||
|
# Apply AWGN impairment
|
||||||
|
utils transform impair awgn input.npy --snr-db 15
|
||||||
|
\b
|
||||||
|
# Apply Rayleigh fading channel
|
||||||
|
utils transform apply_channel rayleigh input.npy --num-paths 5
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@transform.command(name="augment")
|
||||||
|
@click.argument("augmentation", required=False)
|
||||||
|
@click.argument("input", type=click.Path(exists=True), required=False)
|
||||||
|
@click.argument("output", type=click.Path(), required=False)
|
||||||
|
@click.option("--list", "list_transforms", is_flag=True, help="List available augmentations")
|
||||||
|
@click.option("--help-transform", is_flag=True, help="Show parameters for this augmentation")
|
||||||
|
@click.option("--params", multiple=True, help="Transform parameters as KEY=VALUE (can be repeated)")
|
||||||
|
@click.option("--view", is_flag=True, help="Save visualization PNG with constellation plot")
|
||||||
|
@click.option("--overwrite", is_flag=True, help="Overwrite output if it exists")
|
||||||
|
@click.option("--verbose", "-v", is_flag=True, help="Verbose output")
|
||||||
|
@click.option("--quiet", "-q", is_flag=True, help="Suppress output")
|
||||||
|
def augment(augmentation, input, output, list_transforms, help_transform, params, view, overwrite, verbose, quiet):
|
||||||
|
"""Apply augmentation transforms to recordings.
|
||||||
|
|
||||||
|
Augmentations modify signals to create new training examples without
|
||||||
|
degrading quality (e.g., channel swap, time reversal, quantization).
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
# List all augmentations
|
||||||
|
\b
|
||||||
|
utils transform augment --list
|
||||||
|
|
||||||
|
# Show parameters for an augmentation
|
||||||
|
\b
|
||||||
|
utils transform augment channel_swap --help-transform
|
||||||
|
|
||||||
|
# Apply augmentation
|
||||||
|
\b
|
||||||
|
utils transform augment channel_swap input.npy
|
||||||
|
|
||||||
|
# Apply with parameters and save visualization
|
||||||
|
\b
|
||||||
|
utils transform augment drop_samples input.npy --params max_section_size=5 --view
|
||||||
|
"""
|
||||||
|
available = get_available_transforms(iq_augmentations)
|
||||||
|
|
||||||
|
if list_transforms:
|
||||||
|
click.echo("Available augmentations:")
|
||||||
|
for name in sorted(available.keys()):
|
||||||
|
func = available[name]
|
||||||
|
docstring = (func.__doc__ or "").split("\n")[0].strip()
|
||||||
|
click.echo(f" {name:30} {docstring}")
|
||||||
|
return
|
||||||
|
|
||||||
|
if help_transform:
|
||||||
|
check_input_errors("augmentation", augmentation, available, input, help_transform)
|
||||||
|
show_transform_help(augmentation, available[augmentation])
|
||||||
|
return
|
||||||
|
|
||||||
|
check_input_errors("augmentation", augmentation, available, input, help_transform)
|
||||||
|
|
||||||
|
# Generate output filename if not provided
|
||||||
|
if output is None:
|
||||||
|
input_path = Path(input)
|
||||||
|
input_stem = input_path.stem
|
||||||
|
ext = input_path.suffix
|
||||||
|
suffix = generate_transform_suffix(augmentation, parse_transform_params(params))
|
||||||
|
output = str(input_path.parent / f"{input_stem}_{suffix}{ext}")
|
||||||
|
echo_verbose(f"Auto-generated output: {output}", verbose)
|
||||||
|
|
||||||
|
# Check if output exists
|
||||||
|
if not overwrite and Path(output).exists():
|
||||||
|
raise click.ClickException(f"Output file '{output}' already exists\n" f"Use --overwrite to replace")
|
||||||
|
|
||||||
|
echo_progress(f"Augmenting: {os.path.basename(input)} → {os.path.basename(output)}", quiet)
|
||||||
|
echo_verbose(f"Transform: {augmentation}", verbose)
|
||||||
|
|
||||||
|
# Load input
|
||||||
|
recording = load_input(input, verbose)
|
||||||
|
|
||||||
|
# Parse and apply transform
|
||||||
|
try:
|
||||||
|
transform_func = available[augmentation]
|
||||||
|
transform_params = parse_transform_params(params)
|
||||||
|
echo_verbose(f"Parameters: {transform_params}", verbose)
|
||||||
|
|
||||||
|
result = transform_func(recording, **transform_params)
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Transform failed: {e}")
|
||||||
|
|
||||||
|
# Track transform in metadata (Recording.metadata is a property that returns a copy)
|
||||||
|
# So we need to work with a copy and create a new Recording with updated metadata
|
||||||
|
updated_metadata = result.metadata.copy()
|
||||||
|
if "transforms_applied" not in updated_metadata:
|
||||||
|
updated_metadata["transforms_applied"] = []
|
||||||
|
|
||||||
|
updated_metadata["transforms_applied"].append(
|
||||||
|
{"type": "augment", "name": augmentation, "params": parse_transform_params(params)}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create new recording with updated metadata
|
||||||
|
result = Recording(data=result.data, metadata=updated_metadata, annotations=result.annotations)
|
||||||
|
|
||||||
|
# Save output
|
||||||
|
try:
|
||||||
|
save_recording(result, output, overwrite=overwrite, verbose=verbose)
|
||||||
|
echo_progress(f"Saved to: {output}", quiet)
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to save output: {e}")
|
||||||
|
|
||||||
|
# Optional: Create visualization
|
||||||
|
if view:
|
||||||
|
echo_verbose("Creating visualization...", verbose)
|
||||||
|
quick_view_transform(result, output, title=f"{augmentation.replace('_', ' ').title()} - {Path(output).name}")
|
||||||
|
|
||||||
|
|
||||||
|
@transform.command(name="impair")
|
||||||
|
@click.argument("impairment", required=False)
|
||||||
|
@click.argument("input", type=click.Path(exists=True), required=False)
|
||||||
|
@click.argument("output", type=click.Path(), required=False)
|
||||||
|
@click.option("--list", "list_transforms", is_flag=True, help="List available impairments")
|
||||||
|
@click.option("--help-transform", is_flag=True, help="Show parameters for this impairment")
|
||||||
|
@click.option("--params", multiple=True, help="Transform parameters as KEY=VALUE (can be repeated)")
|
||||||
|
@click.option("--view", is_flag=True, help="Save visualization PNG with constellation plot")
|
||||||
|
@click.option("--overwrite", is_flag=True, help="Overwrite output if it exists")
|
||||||
|
@click.option("--verbose", "-v", is_flag=True, help="Verbose output")
|
||||||
|
@click.option("--quiet", "-q", is_flag=True, help="Suppress output")
|
||||||
|
def impair(impairment, input, output, list_transforms, help_transform, params, view, overwrite, verbose, quiet):
|
||||||
|
"""Apply impairment transforms to recordings.
|
||||||
|
|
||||||
|
Impairments degrade signals by adding noise, distortion, and other
|
||||||
|
channel effects (e.g., AWGN, phase noise, IQ imbalance).
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
# List all impairments
|
||||||
|
\b
|
||||||
|
utils transform impair --list
|
||||||
|
|
||||||
|
# Show parameters for an impairment
|
||||||
|
\b
|
||||||
|
utils transform impair add_awgn_to_signal --help-transform
|
||||||
|
|
||||||
|
# Apply impairment
|
||||||
|
\b
|
||||||
|
utils transform impair add_awgn_to_signal input.npy --params snr=10
|
||||||
|
|
||||||
|
# Apply with visualization
|
||||||
|
\b
|
||||||
|
utils transform impair add_phase_noise input.npy --params phase_variance=0.001 --view
|
||||||
|
"""
|
||||||
|
available = get_available_transforms(iq_impairments)
|
||||||
|
|
||||||
|
if list_transforms:
|
||||||
|
click.echo("Available impairments:")
|
||||||
|
for name in sorted(available.keys()):
|
||||||
|
func = available[name]
|
||||||
|
docstring = (func.__doc__ or "").split("\n")[0].strip()
|
||||||
|
click.echo(f" {name:30} {docstring}")
|
||||||
|
return
|
||||||
|
|
||||||
|
if help_transform:
|
||||||
|
check_input_errors("impairment", impairment, available, input, help_transform)
|
||||||
|
show_transform_help(impairment, available[impairment])
|
||||||
|
return
|
||||||
|
|
||||||
|
check_input_errors("impairment", impairment, available, input, help_transform)
|
||||||
|
|
||||||
|
# Generate output filename if not provided
|
||||||
|
if output is None:
|
||||||
|
input_path = Path(input)
|
||||||
|
input_stem = input_path.stem
|
||||||
|
ext = input_path.suffix
|
||||||
|
suffix = generate_transform_suffix(impairment, parse_transform_params(params))
|
||||||
|
output = str(input_path.parent / f"{input_stem}_{suffix}{ext}")
|
||||||
|
echo_verbose(f"Auto-generated output: {output}", verbose)
|
||||||
|
|
||||||
|
# Check if output exists
|
||||||
|
if not overwrite and Path(output).exists():
|
||||||
|
raise click.ClickException(f"Output file '{output}' already exists\n" f"Use --overwrite to replace")
|
||||||
|
|
||||||
|
echo_progress(f"Impairing: {os.path.basename(input)} → {os.path.basename(output)}", quiet)
|
||||||
|
echo_verbose(f"Transform: {impairment}", verbose)
|
||||||
|
|
||||||
|
# Load input
|
||||||
|
recording = load_input(input, verbose)
|
||||||
|
|
||||||
|
# Parse and apply transform
|
||||||
|
try:
|
||||||
|
transform_func = available[impairment]
|
||||||
|
transform_params = parse_transform_params(params)
|
||||||
|
echo_verbose(f"Parameters: {transform_params}", verbose)
|
||||||
|
|
||||||
|
result = transform_func(recording, **transform_params)
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Transform failed: {e}")
|
||||||
|
|
||||||
|
# Track transform in metadata (Recording.metadata is a property that returns a copy)
|
||||||
|
updated_metadata = result.metadata.copy()
|
||||||
|
if "transforms_applied" not in updated_metadata:
|
||||||
|
updated_metadata["transforms_applied"] = []
|
||||||
|
|
||||||
|
updated_metadata["transforms_applied"].append(
|
||||||
|
{"type": "impair", "name": impairment, "params": parse_transform_params(params)}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create new recording with updated metadata
|
||||||
|
result = Recording(data=result.data, metadata=updated_metadata, annotations=result.annotations)
|
||||||
|
|
||||||
|
# Save output
|
||||||
|
try:
|
||||||
|
save_recording(result, output, overwrite=overwrite, verbose=verbose)
|
||||||
|
echo_progress(f"Saved to: {output}", quiet)
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to save output: {e}")
|
||||||
|
|
||||||
|
# Optional: Create visualization
|
||||||
|
if view:
|
||||||
|
echo_verbose("Creating visualization...", verbose)
|
||||||
|
quick_view_transform(result, output, title=f"{impairment.replace('_', ' ').title()} - {Path(output).name}")
|
||||||
|
|
||||||
|
|
||||||
|
@transform.command(name="apply_channel")
|
||||||
|
@click.argument("channel_model", required=False)
|
||||||
|
@click.argument("input", type=click.Path(exists=True), required=False)
|
||||||
|
@click.argument("output", type=click.Path(), required=False)
|
||||||
|
@click.option("--list", "list_transforms", is_flag=True, help="List available channel models")
|
||||||
|
@click.option("--help-transform", is_flag=True, help="Show parameters for this channel model")
|
||||||
|
@click.option("--params", multiple=True, help="Transform parameters as KEY=VALUE (can be repeated)")
|
||||||
|
@click.option("--view", is_flag=True, help="Save visualization PNG with constellation plot")
|
||||||
|
@click.option("--overwrite", is_flag=True, help="Overwrite output if it exists")
|
||||||
|
@click.option("--verbose", "-v", is_flag=True, help="Verbose output")
|
||||||
|
@click.option("--quiet", "-q", is_flag=True, help="Suppress output")
|
||||||
|
def apply_channel(
|
||||||
|
channel_model, input, output, list_transforms, help_transform, params, view, overwrite, verbose, quiet
|
||||||
|
):
|
||||||
|
"""Apply channel models to recordings.
|
||||||
|
|
||||||
|
Channel models simulate RF propagation effects like fading, Doppler shift,
|
||||||
|
and multipath reflections.
|
||||||
|
|
||||||
|
Use --list to see available channel models and their parameters.
|
||||||
|
|
||||||
|
\b
|
||||||
|
Examples:
|
||||||
|
utils transform apply_channel rayleigh_fading_channel input.npy --params num_paths=3 snr_db=15
|
||||||
|
|
||||||
|
\b
|
||||||
|
utils transform apply_channel doppler_channel recordings/input.npy \\
|
||||||
|
--params satellite_velocity=7500 \\
|
||||||
|
--params satellite_initial_distance=400000 \\
|
||||||
|
--params frequency=1e9 \\
|
||||||
|
--params sample_rate=2e6
|
||||||
|
"""
|
||||||
|
available = get_available_transforms(iq_channel_models)
|
||||||
|
|
||||||
|
if list_transforms:
|
||||||
|
click.echo("Available channel models:")
|
||||||
|
for name in sorted(available.keys()):
|
||||||
|
func = available[name]
|
||||||
|
docstring = (func.__doc__ or "").split("\n")[0].strip()
|
||||||
|
click.echo(f" {name:30} {docstring}")
|
||||||
|
return
|
||||||
|
|
||||||
|
if help_transform:
|
||||||
|
check_input_errors("channel_model", channel_model, available, input, help_transform)
|
||||||
|
show_transform_help(channel_model, available[channel_model])
|
||||||
|
return
|
||||||
|
|
||||||
|
check_input_errors("channel_model", channel_model, available, input, help_transform)
|
||||||
|
|
||||||
|
# Generate output filename if not provided
|
||||||
|
if output is None:
|
||||||
|
input_path = Path(input)
|
||||||
|
input_stem = input_path.stem
|
||||||
|
ext = input_path.suffix
|
||||||
|
suffix = generate_transform_suffix(channel_model, parse_transform_params(params))
|
||||||
|
output = str(input_path.parent / f"{input_stem}_{suffix}{ext}")
|
||||||
|
echo_verbose(f"Auto-generated output: {output}", verbose)
|
||||||
|
|
||||||
|
# Check if output exists
|
||||||
|
if not overwrite and Path(output).exists():
|
||||||
|
raise click.ClickException(f"Output file '{output}' already exists\n" f"Use --overwrite to replace")
|
||||||
|
|
||||||
|
echo_progress(f"Applying channel: {os.path.basename(input)} → {os.path.basename(output)}", quiet)
|
||||||
|
echo_verbose(f"Channel model: {channel_model}", verbose)
|
||||||
|
|
||||||
|
# Load input
|
||||||
|
recording = load_input(input, verbose)
|
||||||
|
|
||||||
|
# Parse and apply transform
|
||||||
|
try:
|
||||||
|
transform_func = available[channel_model]
|
||||||
|
transform_params = parse_transform_params(params)
|
||||||
|
echo_verbose(f"Parameters: {transform_params}", verbose)
|
||||||
|
|
||||||
|
result = transform_func(recording, **transform_params)
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Transform failed: {e}")
|
||||||
|
|
||||||
|
# Track transform in metadata (Recording.metadata is a property that returns a copy)
|
||||||
|
updated_metadata = result.metadata.copy()
|
||||||
|
if "transforms_applied" not in updated_metadata:
|
||||||
|
updated_metadata["transforms_applied"] = []
|
||||||
|
|
||||||
|
updated_metadata["transforms_applied"].append(
|
||||||
|
{"type": "channel", "name": channel_model, "params": parse_transform_params(params)}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create new recording with updated metadata
|
||||||
|
result = Recording(data=result.data, metadata=updated_metadata, annotations=result.annotations)
|
||||||
|
|
||||||
|
# Save output
|
||||||
|
try:
|
||||||
|
save_recording(result, output, overwrite=overwrite, verbose=verbose)
|
||||||
|
echo_progress(f"Saved to: {output}", quiet)
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to save output: {e}")
|
||||||
|
|
||||||
|
# Optional: Create visualization
|
||||||
|
if view:
|
||||||
|
echo_verbose("Creating visualization...", verbose)
|
||||||
|
quick_view_transform(result, output, title=f"{channel_model.replace('_', ' ').title()} - {Path(output).name}")
|
||||||
|
|
||||||
|
|
||||||
|
@transform.command(name="custom")
|
||||||
|
@click.argument("transform_name", required=False)
|
||||||
|
@click.argument("input", type=click.Path(exists=True), required=False)
|
||||||
|
@click.argument("output", type=click.Path(), required=False)
|
||||||
|
@click.option(
|
||||||
|
"--transform-dir",
|
||||||
|
type=click.Path(exists=True),
|
||||||
|
required=True,
|
||||||
|
help="Path to directory containing custom transform .py files",
|
||||||
|
)
|
||||||
|
@click.option("--list", "list_transforms", is_flag=True, help="List available custom transforms")
|
||||||
|
@click.option("--help-transform", is_flag=True, help="Show parameters for this transform")
|
||||||
|
@click.option("--params", multiple=True, help="Transform parameters as KEY=VALUE (can be repeated)")
|
||||||
|
@click.option("--view", is_flag=True, help="Save visualization PNG with constellation plot")
|
||||||
|
@click.option("--overwrite", is_flag=True, help="Overwrite output if it exists")
|
||||||
|
@click.option("--verbose", "-v", is_flag=True, help="Verbose output")
|
||||||
|
@click.option("--quiet", "-q", is_flag=True, help="Suppress output")
|
||||||
|
def custom(
|
||||||
|
transform_name,
|
||||||
|
input,
|
||||||
|
output,
|
||||||
|
transform_dir,
|
||||||
|
list_transforms,
|
||||||
|
help_transform,
|
||||||
|
params,
|
||||||
|
view,
|
||||||
|
overwrite,
|
||||||
|
verbose,
|
||||||
|
quiet,
|
||||||
|
):
|
||||||
|
"""Apply custom user-defined transforms to recordings.
|
||||||
|
|
||||||
|
Custom transforms are Python functions loaded from user-specified directory.
|
||||||
|
Each .py file in the directory is scanned for public functions that can be used.
|
||||||
|
|
||||||
|
Transform functions must have signature:
|
||||||
|
def my_transform(signal, **kwargs) -> signal_or_recording
|
||||||
|
where signal is a complex CxN array or Recording object.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
# List all custom transforms in directory
|
||||||
|
\b
|
||||||
|
utils transform custom --transform-dir ~/my_transforms --list
|
||||||
|
|
||||||
|
# Show parameters for a transform
|
||||||
|
\b
|
||||||
|
utils transform custom my_filter --transform-dir ~/my_transforms --help-transform
|
||||||
|
|
||||||
|
# Apply custom transform
|
||||||
|
\b
|
||||||
|
utils transform custom my_filter input.npy --transform-dir ~/my_transforms
|
||||||
|
|
||||||
|
# With parameters and visualization
|
||||||
|
\b
|
||||||
|
utils transform custom my_filter input.npy --transform-dir ~/my_transforms \\
|
||||||
|
--params cutoff_freq=5000 order=4 --view
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
available = load_custom_transforms(transform_dir)
|
||||||
|
except click.ClickException:
|
||||||
|
raise
|
||||||
|
|
||||||
|
if list_transforms:
|
||||||
|
click.echo(f"Available custom transforms in {transform_dir}:")
|
||||||
|
for name in sorted(available.keys()):
|
||||||
|
func = available[name]
|
||||||
|
source_file = getattr(func, "_transform_source_file", "unknown")
|
||||||
|
docstring = (func.__doc__ or "").split("\n")[0].strip()
|
||||||
|
click.echo(f" {name:30} {docstring:40} [{source_file}]")
|
||||||
|
return
|
||||||
|
|
||||||
|
if help_transform:
|
||||||
|
check_input_errors("transform_name", transform_name, available, input, help_transform)
|
||||||
|
show_transform_help(transform_name, available[transform_name])
|
||||||
|
return
|
||||||
|
|
||||||
|
check_input_errors("transform_name", transform_name, available, input, help_transform)
|
||||||
|
|
||||||
|
# Generate output filename if not provided
|
||||||
|
if output is None:
|
||||||
|
input_path = Path(input)
|
||||||
|
input_stem = input_path.stem
|
||||||
|
ext = input_path.suffix
|
||||||
|
suffix = generate_transform_suffix(transform_name, parse_transform_params(params))
|
||||||
|
output = str(input_path.parent / f"{input_stem}_{suffix}{ext}")
|
||||||
|
echo_verbose(f"Auto-generated output: {output}", verbose)
|
||||||
|
|
||||||
|
# Check if output exists
|
||||||
|
if not overwrite and Path(output).exists():
|
||||||
|
raise click.ClickException(f"Output file '{output}' already exists\n" f"Use --overwrite to replace")
|
||||||
|
|
||||||
|
echo_progress(f"Applying custom: {os.path.basename(input)} → {os.path.basename(output)}", quiet)
|
||||||
|
echo_verbose(f"Transform: {transform_name}", verbose)
|
||||||
|
|
||||||
|
# Load input
|
||||||
|
recording = load_input(input, verbose)
|
||||||
|
|
||||||
|
# Parse and apply transform
|
||||||
|
try:
|
||||||
|
transform_func = available[transform_name]
|
||||||
|
transform_params = parse_transform_params(params)
|
||||||
|
echo_verbose(f"Parameters: {transform_params}", verbose)
|
||||||
|
|
||||||
|
result = transform_func(recording, **transform_params)
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Transform failed: {e}")
|
||||||
|
|
||||||
|
# Track transform in metadata
|
||||||
|
updated_metadata = result.metadata.copy()
|
||||||
|
if "transforms_applied" not in updated_metadata:
|
||||||
|
updated_metadata["transforms_applied"] = []
|
||||||
|
|
||||||
|
updated_metadata["transforms_applied"].append(
|
||||||
|
{
|
||||||
|
"type": "custom",
|
||||||
|
"name": transform_name,
|
||||||
|
"source_file": getattr(available[transform_name], "_transform_source_file", "unknown"),
|
||||||
|
"params": parse_transform_params(params),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create new recording with updated metadata
|
||||||
|
result = Recording(data=result.data, metadata=updated_metadata, annotations=result.annotations)
|
||||||
|
|
||||||
|
# Save output
|
||||||
|
try:
|
||||||
|
save_recording(result, output, overwrite=overwrite, verbose=verbose)
|
||||||
|
echo_progress(f"Saved to: {output}", quiet)
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Failed to save output: {e}")
|
||||||
|
|
||||||
|
# Optional: Create visualization
|
||||||
|
if view:
|
||||||
|
echo_verbose("Creating visualization...", verbose)
|
||||||
|
quick_view_transform(result, output, title=f"{transform_name.replace('_', ' ').title()} - {Path(output).name}")
|
||||||
499
ria_toolkit_oss_cli/ria_toolkit_oss/transmit.py
Normal file
499
ria_toolkit_oss_cli/ria_toolkit_oss/transmit.py
Normal file
|
|
@ -0,0 +1,499 @@
|
||||||
|
"""Transmit command for SDR devices."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import signal
|
||||||
|
import time
|
||||||
|
|
||||||
|
import click
|
||||||
|
|
||||||
|
from utils.data import Recording
|
||||||
|
from utils.io import from_npy_legacy, load_recording
|
||||||
|
|
||||||
|
from .common import (
|
||||||
|
echo_progress,
|
||||||
|
echo_verbose,
|
||||||
|
format_frequency,
|
||||||
|
format_sample_rate,
|
||||||
|
get_sdr_device,
|
||||||
|
load_yaml_config,
|
||||||
|
parse_frequency,
|
||||||
|
)
|
||||||
|
from .discover import (
|
||||||
|
find_bladerf_devices,
|
||||||
|
find_hackrf_devices,
|
||||||
|
find_pluto_devices,
|
||||||
|
find_uhd_devices,
|
||||||
|
load_sdr_drivers,
|
||||||
|
)
|
||||||
|
|
||||||
|
# TX-capable devices (RTL-SDR and ThinkRF are RX-only)
|
||||||
|
TX_CAPABLE_DEVICES = ["pluto", "hackrf", "bladerf", "usrp"]
|
||||||
|
|
||||||
|
|
||||||
|
def auto_select_tx_device(quiet: bool = False) -> str:
|
||||||
|
"""
|
||||||
|
Auto-select TX-capable device if only one is connected.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
quiet: Suppress warning messages
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Device type string
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
click.ClickException: If no TX devices or multiple devices found
|
||||||
|
"""
|
||||||
|
# Load drivers and collect TX-capable devices only
|
||||||
|
load_sdr_drivers(verbose=False)
|
||||||
|
|
||||||
|
tx_devices = []
|
||||||
|
tx_devices.extend(find_uhd_devices())
|
||||||
|
tx_devices.extend(find_pluto_devices())
|
||||||
|
tx_devices.extend(find_hackrf_devices())
|
||||||
|
tx_devices.extend(find_bladerf_devices())
|
||||||
|
# Note: RTL-SDR and ThinkRF excluded (RX-only)
|
||||||
|
|
||||||
|
if len(tx_devices) == 0:
|
||||||
|
raise click.ClickException(
|
||||||
|
"No TX-capable SDR devices found.\n"
|
||||||
|
"TX-capable devices: PlutoSDR, HackRF, BladeRF, USRP\n"
|
||||||
|
"Run 'utils discover' to see all devices."
|
||||||
|
)
|
||||||
|
|
||||||
|
elif len(tx_devices) == 1:
|
||||||
|
device = tx_devices[0]
|
||||||
|
device_type = device.get("type", "Unknown").lower().replace("-", "").replace(" ", "")
|
||||||
|
|
||||||
|
# Map device type names to internal names
|
||||||
|
type_map = {
|
||||||
|
"plutosdr": "pluto",
|
||||||
|
"hackrf": "hackrf",
|
||||||
|
"hackrfone": "hackrf",
|
||||||
|
"bladerf": "bladerf",
|
||||||
|
"usrp": "usrp",
|
||||||
|
"b200": "usrp",
|
||||||
|
"b210": "usrp",
|
||||||
|
}
|
||||||
|
|
||||||
|
device_type = type_map.get(device_type, device_type)
|
||||||
|
|
||||||
|
if not quiet:
|
||||||
|
click.echo(
|
||||||
|
click.style("Warning: ", fg="yellow")
|
||||||
|
+ f"No device specified. Auto-detected {device.get('type', 'Unknown')}",
|
||||||
|
err=True,
|
||||||
|
)
|
||||||
|
click.echo(f"Use --device {device_type} to suppress this warning.\n", err=True)
|
||||||
|
|
||||||
|
return device_type
|
||||||
|
|
||||||
|
else:
|
||||||
|
device_list = "\n".join(f" - {d.get('type', 'Unknown')}" for d in tx_devices)
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Multiple TX-capable devices found. Specify with --device\n\n"
|
||||||
|
f"Available TX devices:\n{device_list}\n\n"
|
||||||
|
f"Run 'utils discover' for more details."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def load_input_file(input_file: str, legacy: bool = False) -> Recording:
|
||||||
|
"""
|
||||||
|
Load recording from file with auto-format detection.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_file: Path to input file
|
||||||
|
legacy: Use legacy NPY loader
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Recording object
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
click.ClickException: If file not found or format unsupported
|
||||||
|
"""
|
||||||
|
if not os.path.exists(input_file):
|
||||||
|
raise click.ClickException(f"Input file not found: {input_file}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
if legacy:
|
||||||
|
echo_progress("Loading legacy NPY file...", quiet=False)
|
||||||
|
recording = from_npy_legacy(input_file)
|
||||||
|
else:
|
||||||
|
echo_progress("Loading input file...", quiet=False)
|
||||||
|
recording = load_recording(input_file)
|
||||||
|
|
||||||
|
return recording
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"Could not load '{input_file}': {e}\n"
|
||||||
|
f"Supported formats: .sigmf, .npy, .wav, .blue\n"
|
||||||
|
f"Use --legacy for old NPY format files"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def select_params(device, sample_rate, gain, bandwidth, quiet, verbose):
|
||||||
|
# Auto-select device if not specified
|
||||||
|
if device is None:
|
||||||
|
device = auto_select_tx_device(quiet)
|
||||||
|
|
||||||
|
# Apply device-specific defaults (matching signal-testbed but conservative for TX)
|
||||||
|
if sample_rate is None:
|
||||||
|
# TX sample rate defaults (same as RX)
|
||||||
|
device_sample_rates = {
|
||||||
|
"pluto": 20e6, # PlutoSDR up to 61 MHz, 20 MHz safe
|
||||||
|
"hackrf": 20e6, # HackRF up to 20 MHz
|
||||||
|
"bladerf": 40e6, # BladeRF up to 61 MHz, 40 MHz safe
|
||||||
|
"usrp": 50e6, # USRP up to 200 MHz, 50 MHz default
|
||||||
|
}
|
||||||
|
sample_rate = device_sample_rates.get(device, 20e6)
|
||||||
|
|
||||||
|
if gain is None:
|
||||||
|
# TX gain defaults (conservative for ISM band to avoid interference)
|
||||||
|
default_tx_gains = {
|
||||||
|
"pluto": -20, # PlutoSDR: -20 dB (safe, low power)
|
||||||
|
"hackrf": 0, # HackRF: 0 dB (moderate)
|
||||||
|
"bladerf": -10, # BladeRF: -10 dB (conservative)
|
||||||
|
"usrp": -10, # USRP: -10 dB (conservative)
|
||||||
|
}
|
||||||
|
gain = default_tx_gains.get(device, -10)
|
||||||
|
echo_verbose(f"Using default TX gain: {gain} dB for {device}", verbose)
|
||||||
|
|
||||||
|
if bandwidth is None:
|
||||||
|
# Bandwidth defaults (match sample rate)
|
||||||
|
device_bandwidths = {
|
||||||
|
"pluto": sample_rate,
|
||||||
|
"hackrf": sample_rate,
|
||||||
|
"bladerf": sample_rate,
|
||||||
|
"usrp": sample_rate,
|
||||||
|
}
|
||||||
|
bandwidth = device_bandwidths.get(device)
|
||||||
|
|
||||||
|
return device, sample_rate, gain, bandwidth
|
||||||
|
|
||||||
|
|
||||||
|
def validate_tx_gain(device_type: str, gain: float) -> None:
|
||||||
|
"""
|
||||||
|
Validate TX gain is within device limits and warn if at extremes.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
device_type: Type of device
|
||||||
|
gain: TX gain in dB
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
click.ClickException: If gain is out of range
|
||||||
|
"""
|
||||||
|
gain_ranges = {
|
||||||
|
"pluto": (-89, 0),
|
||||||
|
"hackrf": (0, 47),
|
||||||
|
"bladerf": (-15, 60),
|
||||||
|
"usrp": (-30, 20), # Approximate, varies by model
|
||||||
|
}
|
||||||
|
|
||||||
|
if device_type in gain_ranges:
|
||||||
|
min_gain, max_gain = gain_ranges[device_type]
|
||||||
|
|
||||||
|
if gain < min_gain or gain > max_gain:
|
||||||
|
raise click.ClickException(
|
||||||
|
f"TX gain {gain} dB is out of range for {device_type}\n" f"Valid range: {min_gain} to {max_gain} dB"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Warn if at maximum
|
||||||
|
if gain >= max_gain - 3:
|
||||||
|
click.echo(
|
||||||
|
click.style("WARNING: ", fg="yellow", bold=True) + f"Transmitting at high gain level ({gain} dB)\n"
|
||||||
|
f"Maximum for {device_type}: {max_gain} dB",
|
||||||
|
err=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_recording(generate, input_file, sample_rate, verbose, legacy):
|
||||||
|
# Generate signal or load from file
|
||||||
|
if generate or input_file is None:
|
||||||
|
# Generate signal instead of loading from file
|
||||||
|
from utils.signal.basic_signal_generator import (
|
||||||
|
chirp,
|
||||||
|
lfm_chirp_complex,
|
||||||
|
sine,
|
||||||
|
square,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate number of samples for signal generation (default: 0.1 second = 100ms)
|
||||||
|
# Shorter duration to avoid buffer issues with large sample rates
|
||||||
|
num_samples = int(sample_rate * 0.1) # 100ms of signal
|
||||||
|
|
||||||
|
if generate == "lfm" or (generate is None and input_file is None):
|
||||||
|
# Generate LFM chirp (default - visible on spectrogram)
|
||||||
|
echo_verbose("Generating LFM chirp signal...", verbose)
|
||||||
|
recording = lfm_chirp_complex(
|
||||||
|
sample_rate=int(sample_rate),
|
||||||
|
width=int(sample_rate * 0.4), # 40% of sample rate (safe for filter)
|
||||||
|
chirp_period=0.001, # 1ms chirp period
|
||||||
|
sigfc=0, # Baseband
|
||||||
|
total_time=num_samples / sample_rate,
|
||||||
|
chirp_type="up",
|
||||||
|
)
|
||||||
|
echo_verbose(f"Generated {len(recording.data)} sample LFM chirp", verbose)
|
||||||
|
|
||||||
|
elif generate == "chirp":
|
||||||
|
# Generate simple chirp
|
||||||
|
echo_verbose("Generating chirp signal...", verbose)
|
||||||
|
recording = chirp(sample_rate=int(sample_rate), num_samples=num_samples, center_frequency=0) # Baseband
|
||||||
|
echo_verbose(f"Generated {len(recording.data)} sample chirp", verbose)
|
||||||
|
|
||||||
|
elif generate == "sine":
|
||||||
|
# Generate sine wave at 10% offset from center
|
||||||
|
echo_verbose("Generating sine wave signal...", verbose)
|
||||||
|
recording = sine(
|
||||||
|
sample_rate=int(sample_rate),
|
||||||
|
length=num_samples,
|
||||||
|
frequency=sample_rate * 0.1, # 10% offset
|
||||||
|
amplitude=0.8,
|
||||||
|
)
|
||||||
|
echo_verbose(f"Generated {len(recording.data)} sample sine wave", verbose)
|
||||||
|
|
||||||
|
elif generate == "pulse":
|
||||||
|
# Generate pulse using square wave
|
||||||
|
echo_verbose("Generating pulse signal...", verbose)
|
||||||
|
recording = square(
|
||||||
|
sample_rate=int(sample_rate),
|
||||||
|
length=num_samples,
|
||||||
|
frequency=1000, # 1 kHz pulse
|
||||||
|
amplitude=0.8,
|
||||||
|
duty_cycle=0.1, # 10% duty cycle for pulse
|
||||||
|
)
|
||||||
|
echo_verbose(f"Generated {len(recording.data)} sample pulse", verbose)
|
||||||
|
|
||||||
|
return recording
|
||||||
|
|
||||||
|
elif input_file:
|
||||||
|
# Load input file
|
||||||
|
return load_input_file(input_file, legacy=legacy)
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise click.ClickException("Either --input or --generate must be specified")
|
||||||
|
|
||||||
|
|
||||||
|
def check_sample_rate_mismatch(recording: Recording, specified_rate: float, quiet: bool) -> None:
|
||||||
|
"""
|
||||||
|
Check if recording sample rate differs from specified rate.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
recording: Recording object
|
||||||
|
specified_rate: Specified sample rate
|
||||||
|
quiet: Suppress warnings
|
||||||
|
"""
|
||||||
|
if hasattr(recording, "metadata") and recording.metadata:
|
||||||
|
recorded_rate = recording.metadata.get("sample_rate")
|
||||||
|
if recorded_rate and abs(recorded_rate - specified_rate) > 1:
|
||||||
|
if not quiet:
|
||||||
|
click.echo(
|
||||||
|
click.style("Warning: ", fg="yellow")
|
||||||
|
+ f"Recording sample rate ({format_sample_rate(recorded_rate)}) differs "
|
||||||
|
f"from specified rate ({format_sample_rate(specified_rate)})\n"
|
||||||
|
f"Using specified rate. Signal may be distorted.",
|
||||||
|
err=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def repeated_transmission(sdr, recording, repeat, tx_delay, quiet, verbose):
|
||||||
|
for i in range(repeat):
|
||||||
|
if repeat > 1:
|
||||||
|
echo_progress(f"\nTransmission {i + 1}/{repeat}...", quiet)
|
||||||
|
|
||||||
|
sdr.tx_recording(recording)
|
||||||
|
|
||||||
|
if repeat > 1:
|
||||||
|
echo_progress(f"Transmission {i + 1}/{repeat} complete.", quiet)
|
||||||
|
|
||||||
|
# Delay between transmissions
|
||||||
|
if i < repeat - 1 and tx_delay > 0:
|
||||||
|
echo_verbose(f"Waiting {tx_delay}s before next transmission...", verbose)
|
||||||
|
time.sleep(tx_delay)
|
||||||
|
|
||||||
|
if repeat > 1:
|
||||||
|
echo_progress(f"\nAll {repeat} transmissions complete.", quiet)
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.option("--device", "-d", type=click.Choice(TX_CAPABLE_DEVICES), help="Device type (TX-capable only)")
|
||||||
|
@click.option("--ident", "-i", help="Device identifier (IP address or name=value, e.g., 192.168.2.1 or name=myb210)")
|
||||||
|
@click.option(
|
||||||
|
"--config", "-c", "config_file", type=click.Path(exists=True), help="Load parameters from YAML config file"
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--sample-rate", "-s", type=float, default=None, help="Sample rate in Hz (e.g., 2e6) [default: device-specific]"
|
||||||
|
)
|
||||||
|
@click.option(
|
||||||
|
"--center-frequency",
|
||||||
|
"-f",
|
||||||
|
type=str,
|
||||||
|
default="2440M",
|
||||||
|
show_default=True,
|
||||||
|
help="Center frequency (e.g., 915e6, 2.4G)",
|
||||||
|
)
|
||||||
|
@click.option("--gain", "-g", type=float, help="TX gain in dB [default: device-specific safe level]")
|
||||||
|
@click.option("--bandwidth", "-b", type=float, help="Bandwidth in Hz (if supported) [default: device-specific]")
|
||||||
|
@click.option(
|
||||||
|
"--input",
|
||||||
|
"-in",
|
||||||
|
"input_file",
|
||||||
|
type=click.Path(),
|
||||||
|
help=(
|
||||||
|
"Input recording file (auto-detects format). "
|
||||||
|
"If omitted and --generate not specified, generates default LFM chirp."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
@click.option("--legacy", is_flag=True, help="Use legacy NPY format loader")
|
||||||
|
@click.option(
|
||||||
|
"--generate",
|
||||||
|
type=click.Choice(["lfm", "chirp", "sine", "pulse"]),
|
||||||
|
help="Generate signal instead of loading from file (overrides --input)",
|
||||||
|
)
|
||||||
|
@click.option("--repeat", "-r", type=int, default=1, help="Repeat transmission N times (default: 1)")
|
||||||
|
@click.option("--continuous", is_flag=True, help="Transmit continuously until Ctrl+C")
|
||||||
|
@click.option("--tx-delay", type=float, default=0, help="Delay between transmissions in seconds")
|
||||||
|
@click.option("--yes", "-y", is_flag=True, help="Skip safety confirmations")
|
||||||
|
@click.option("--verbose", "-v", is_flag=True, help="Verbose output")
|
||||||
|
@click.option("--quiet", "-q", is_flag=True, help="Suppress progress output")
|
||||||
|
def transmit(
|
||||||
|
device,
|
||||||
|
ident,
|
||||||
|
config_file,
|
||||||
|
sample_rate,
|
||||||
|
center_frequency,
|
||||||
|
gain,
|
||||||
|
bandwidth,
|
||||||
|
input_file,
|
||||||
|
legacy,
|
||||||
|
generate,
|
||||||
|
repeat,
|
||||||
|
continuous,
|
||||||
|
tx_delay,
|
||||||
|
yes,
|
||||||
|
verbose,
|
||||||
|
quiet,
|
||||||
|
):
|
||||||
|
"""Transmit IQ samples from file using SDR device.
|
||||||
|
|
||||||
|
\b
|
||||||
|
Examples:
|
||||||
|
utils transmit -d hackrf --generate lfm --continuous
|
||||||
|
utils transmit -d pluto -f 2.44G -g -10 -in recordings/rec_HackRF_2MHz_2025-12-01_15-36-21_80fc33f.sigmf-data
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Load config file if specified
|
||||||
|
config = {}
|
||||||
|
if config_file:
|
||||||
|
config = load_yaml_config(config_file)
|
||||||
|
echo_verbose(f"Loaded config from: {config_file}", verbose)
|
||||||
|
|
||||||
|
# Command-line args override config file
|
||||||
|
device = device or config.get("device")
|
||||||
|
ident = ident or config.get("ident") or config.get("serial") # Support legacy 'serial' in config
|
||||||
|
sample_rate = sample_rate or config.get("sample_rate")
|
||||||
|
center_frequency = center_frequency or config.get("center_frequency")
|
||||||
|
gain = gain or config.get("gain")
|
||||||
|
bandwidth = bandwidth or config.get("bandwidth")
|
||||||
|
input_file = input_file or config.get("input")
|
||||||
|
generate = generate or config.get("generate")
|
||||||
|
repeat = repeat if repeat != 1 else config.get("repeat", 1)
|
||||||
|
continuous = continuous or config.get("continuous", False)
|
||||||
|
tx_delay = tx_delay or config.get("tx_delay", 0)
|
||||||
|
|
||||||
|
device, sample_rate, gain, bandwidth = select_params(device, sample_rate, gain, bandwidth, quiet, verbose)
|
||||||
|
|
||||||
|
# Parse frequency
|
||||||
|
center_freq_hz = parse_frequency(center_frequency)
|
||||||
|
|
||||||
|
# Validate TX gain
|
||||||
|
validate_tx_gain(device, gain)
|
||||||
|
|
||||||
|
# Generate signal or load from file
|
||||||
|
recording = generate_recording(generate, input_file, sample_rate, verbose, legacy)
|
||||||
|
# Check sample rate mismatch
|
||||||
|
check_sample_rate_mismatch(recording, sample_rate, quiet)
|
||||||
|
|
||||||
|
# Safety warnings for continuous mode
|
||||||
|
if continuous and not yes:
|
||||||
|
click.echo(
|
||||||
|
click.style("WARNING: ", fg="red", bold=True) + "Continuous transmission mode enabled\n"
|
||||||
|
"This will transmit indefinitely until stopped.\n"
|
||||||
|
"Ensure proper cooling and monitoring.",
|
||||||
|
err=True,
|
||||||
|
)
|
||||||
|
if not click.confirm("Continue?", default=False):
|
||||||
|
click.echo("Transmission cancelled.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Show transmission parameters
|
||||||
|
num_samples = len(recording.data[0]) if len(recording.data.shape) > 1 else len(recording.data)
|
||||||
|
echo_progress(f"Transmitting from {device.upper()}...", quiet)
|
||||||
|
echo_progress(f"Sample rate: {format_sample_rate(sample_rate)}", quiet)
|
||||||
|
echo_progress(f"Center frequency: {format_frequency(center_freq_hz)}", quiet)
|
||||||
|
echo_progress(f"TX gain: {gain} dB", quiet)
|
||||||
|
if bandwidth:
|
||||||
|
echo_progress(f"Bandwidth: {format_sample_rate(bandwidth)}", quiet)
|
||||||
|
|
||||||
|
# Show signal source
|
||||||
|
if input_file:
|
||||||
|
echo_progress(f"Input: {os.path.basename(input_file)} ({num_samples} samples)", quiet)
|
||||||
|
else:
|
||||||
|
signal_type = generate if generate else "lfm"
|
||||||
|
echo_progress(f"Signal: Generated {signal_type.upper()} ({num_samples} samples)", quiet)
|
||||||
|
|
||||||
|
if continuous:
|
||||||
|
echo_progress("Mode: Continuous (Ctrl+C to stop)", quiet)
|
||||||
|
elif repeat > 1:
|
||||||
|
echo_progress(f"Repeat: {repeat} times with {tx_delay}s delay", quiet)
|
||||||
|
|
||||||
|
# Initialize device
|
||||||
|
echo_verbose("Initializing TX device...", verbose)
|
||||||
|
sdr = get_sdr_device(device, ident, True)
|
||||||
|
|
||||||
|
# Set up Ctrl+C handler for continuous mode
|
||||||
|
stop_transmission = False
|
||||||
|
|
||||||
|
def signal_handler(sig, frame):
|
||||||
|
nonlocal stop_transmission
|
||||||
|
stop_transmission = True
|
||||||
|
click.echo("\n\nStopping transmission...")
|
||||||
|
|
||||||
|
if continuous:
|
||||||
|
signal.signal(signal.SIGINT, signal_handler)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Initialize TX with parameters
|
||||||
|
sdr.init_tx(
|
||||||
|
sample_rate=sample_rate, center_frequency=center_freq_hz, gain=gain, channel=0 # Default to channel 0
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set bandwidth if supported (after init_tx)
|
||||||
|
if bandwidth is not None and hasattr(sdr, "set_tx_bandwidth"):
|
||||||
|
sdr.set_tx_bandwidth(bandwidth)
|
||||||
|
|
||||||
|
# Transmission loop
|
||||||
|
if continuous:
|
||||||
|
echo_progress("\nTransmitting continuously... [Press Ctrl+C to stop]", quiet)
|
||||||
|
|
||||||
|
transmission_count = 0
|
||||||
|
while not stop_transmission:
|
||||||
|
sdr.tx_recording(recording)
|
||||||
|
transmission_count += 1
|
||||||
|
|
||||||
|
if verbose and transmission_count % 10 == 0:
|
||||||
|
echo_verbose(f"Transmitted {transmission_count} times", verbose)
|
||||||
|
|
||||||
|
echo_progress(f"\nTransmitted {transmission_count} times total", quiet)
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Repeat mode or single transmission
|
||||||
|
repeated_transmission(sdr, recording, repeat, tx_delay, quiet, verbose)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
# Clean up device
|
||||||
|
echo_verbose("Closing TX device...", verbose)
|
||||||
|
if hasattr(sdr, "close"):
|
||||||
|
sdr.close()
|
||||||
|
|
||||||
|
echo_progress("Transmission complete!", quiet)
|
||||||
418
ria_toolkit_oss_cli/ria_toolkit_oss/view.py
Normal file
418
ria_toolkit_oss_cli/ria_toolkit_oss/view.py
Normal file
|
|
@ -0,0 +1,418 @@
|
||||||
|
"""View command - Create visualizations from recordings."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import click
|
||||||
|
|
||||||
|
from utils.io.recording import from_npy, load_recording
|
||||||
|
from utils.view.view_signal import view_annotations, view_channels, view_sig
|
||||||
|
from utils.view.view_signal_simple import view_simple_sig
|
||||||
|
|
||||||
|
from .common import echo_progress, echo_verbose, load_yaml_config
|
||||||
|
|
||||||
|
# Map visualization types to their functions and parameters
|
||||||
|
VISUALIZATION_TYPES = {
|
||||||
|
"simple": {
|
||||||
|
"function": view_simple_sig,
|
||||||
|
"description": "Simple time-domain and spectrogram view",
|
||||||
|
"options": ["fast_mode", "compact_mode", "horizontal_mode", "constellation_mode", "labels_mode", "slice"],
|
||||||
|
},
|
||||||
|
"full": {
|
||||||
|
"function": view_sig,
|
||||||
|
"description": "Full-featured plot with spectrogram, IQ, FFT, constellation, and metadata",
|
||||||
|
"options": [
|
||||||
|
"plot_length",
|
||||||
|
"plot_spectrogram",
|
||||||
|
"iq",
|
||||||
|
"frequency",
|
||||||
|
"constellation",
|
||||||
|
"metadata",
|
||||||
|
"logo",
|
||||||
|
"dark",
|
||||||
|
"spines",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
"annotations": {
|
||||||
|
"function": view_annotations,
|
||||||
|
"description": "Annotation-focused spectrogram view",
|
||||||
|
"options": ["channel", "dark"],
|
||||||
|
},
|
||||||
|
"channels": {"function": view_channels, "description": "Multi-channel IQ and spectrogram view", "options": []},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def parse_slice(slice_str: str) -> tuple:
|
||||||
|
"""Parse slice string in format 'start:end' or 'start:end:step'.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
slice_str: Slice string (e.g., "1000:5000" or "::2")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (start, end) or (start, end, step)
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
click.BadParameter: If slice format is invalid
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
parts = slice_str.split(":")
|
||||||
|
if len(parts) == 2:
|
||||||
|
start = int(parts[0]) if parts[0] else None
|
||||||
|
end = int(parts[1]) if parts[1] else None
|
||||||
|
return (start, end)
|
||||||
|
elif len(parts) == 3:
|
||||||
|
start = int(parts[0]) if parts[0] else None
|
||||||
|
end = int(parts[1]) if parts[1] else None
|
||||||
|
step = int(parts[2]) if parts[2] else None
|
||||||
|
return (start, end, step)
|
||||||
|
else:
|
||||||
|
raise ValueError("Slice must have 2 or 3 parts")
|
||||||
|
except (ValueError, IndexError):
|
||||||
|
raise click.BadParameter(
|
||||||
|
f"Invalid slice format: '{slice_str}'. "
|
||||||
|
f"Expected formats: 'start:end' or 'start:end:step' (e.g., '1000:5000' or '::2')"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_figsize(figsize_str: str) -> tuple:
|
||||||
|
"""Parse figure size string in format 'WxH'.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
figsize_str: Figure size string (e.g., "10x6")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (width, height) in inches
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
click.BadParameter: If format is invalid
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
parts = figsize_str.lower().split("x")
|
||||||
|
if len(parts) != 2:
|
||||||
|
raise ValueError("Must have width and height")
|
||||||
|
width = float(parts[0])
|
||||||
|
height = float(parts[1])
|
||||||
|
if width <= 0 or height <= 0:
|
||||||
|
raise ValueError("Dimensions must be positive")
|
||||||
|
return (width, height)
|
||||||
|
except (ValueError, IndexError):
|
||||||
|
raise click.BadParameter(
|
||||||
|
f"Invalid figure size: '{figsize_str}'. " f"Expected format: 'WxH' (e.g., '10x6', '12.5x8')"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_output_path(input_path: str, output_path: Optional[str], format: str) -> str:
|
||||||
|
"""Generate output path if not specified.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_path: Input file path
|
||||||
|
output_path: User-specified output path (or None)
|
||||||
|
format: Output format (png, pdf, svg, jpg)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Full output path
|
||||||
|
"""
|
||||||
|
if output_path:
|
||||||
|
return output_path
|
||||||
|
|
||||||
|
# Auto-generate: input.sigmf -> input.png
|
||||||
|
input_path = Path(input_path)
|
||||||
|
|
||||||
|
# Handle SigMF files specially (remove -data/-meta suffixes)
|
||||||
|
stem = input_path.stem
|
||||||
|
if stem.endswith("-data") or stem.endswith("-meta"):
|
||||||
|
stem = stem.rsplit("-", 1)[0]
|
||||||
|
|
||||||
|
# Generate output filename
|
||||||
|
output_filename = f"{stem}.{format}"
|
||||||
|
return str(input_path.parent / output_filename)
|
||||||
|
|
||||||
|
|
||||||
|
def load_recording_with_legacy(input_path: str, legacy: bool, verbose: bool):
|
||||||
|
"""Load recording, handling legacy NPY format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_path: Path to input file
|
||||||
|
legacy: Whether to use legacy NPY loader
|
||||||
|
verbose: Verbose output
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Recording object
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
click.ClickException: If loading fails
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if legacy:
|
||||||
|
echo_verbose(f"Loading as legacy NPY format: {input_path}", verbose)
|
||||||
|
recording = from_npy(input_path, legacy=True)
|
||||||
|
else:
|
||||||
|
echo_verbose(f"Loading recording: {input_path}", verbose)
|
||||||
|
recording = load_recording(input_path)
|
||||||
|
|
||||||
|
return recording
|
||||||
|
except FileNotFoundError:
|
||||||
|
raise click.ClickException(f"Input file not found: {input_path}")
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Error loading recording: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def get_view_output_path(should_save, overwrite, input, output, output_format):
|
||||||
|
if should_save:
|
||||||
|
output_path = generate_output_path(input, output, output_format)
|
||||||
|
|
||||||
|
# Check if output exists
|
||||||
|
if os.path.exists(output_path) and not overwrite:
|
||||||
|
raise click.ClickException(f"Output file '{output_path}' already exists. " f"Use --overwrite to replace.")
|
||||||
|
else:
|
||||||
|
output_path = None
|
||||||
|
|
||||||
|
return output_path
|
||||||
|
|
||||||
|
|
||||||
|
def print_metadata(recording, quiet):
|
||||||
|
# Print metadata to console
|
||||||
|
if not quiet:
|
||||||
|
click.echo("\nRecording Metadata:")
|
||||||
|
click.echo("-" * 40)
|
||||||
|
if recording._metadata:
|
||||||
|
for key, value in sorted(recording._metadata.items()):
|
||||||
|
# Format large numbers nicely
|
||||||
|
if isinstance(value, (int, float)) and abs(value) >= 1000:
|
||||||
|
if isinstance(value, float) and value >= 1e6:
|
||||||
|
click.echo(f" {key}: {value:,.0f}")
|
||||||
|
elif isinstance(value, float):
|
||||||
|
click.echo(f" {key}: {value:,.2f}")
|
||||||
|
else:
|
||||||
|
click.echo(f" {key}: {value:,}")
|
||||||
|
else:
|
||||||
|
click.echo(f" {key}: {value}")
|
||||||
|
else:
|
||||||
|
click.echo(" (no metadata)")
|
||||||
|
click.echo("-" * 40)
|
||||||
|
click.echo()
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.argument("input", type=click.Path(exists=True))
|
||||||
|
@click.option(
|
||||||
|
"--type",
|
||||||
|
"viz_type",
|
||||||
|
type=click.Choice(list(VISUALIZATION_TYPES.keys())),
|
||||||
|
default="simple",
|
||||||
|
show_default=True,
|
||||||
|
help="Visualization type",
|
||||||
|
)
|
||||||
|
@click.option("--output", type=click.Path(), help="Output file path (default: auto-generated)")
|
||||||
|
@click.option(
|
||||||
|
"--format",
|
||||||
|
"output_format",
|
||||||
|
type=click.Choice(["png", "pdf", "svg", "jpg"]),
|
||||||
|
default="png",
|
||||||
|
show_default=True,
|
||||||
|
help="Output format",
|
||||||
|
)
|
||||||
|
@click.option("--show", is_flag=True, help="Display interactive plot")
|
||||||
|
@click.option("--no-save", is_flag=True, help="Don't save file (only with --show)")
|
||||||
|
@click.option("--dpi", type=int, default=300, show_default=True, help="Output DPI (PNG only)")
|
||||||
|
@click.option("--figsize", type=str, help="Figure size in inches (e.g., '10x6')")
|
||||||
|
@click.option("--title", type=str, help="Custom plot title")
|
||||||
|
@click.option("--legacy", is_flag=True, help="Load input as legacy NPY format")
|
||||||
|
@click.option("--config", type=click.Path(exists=True), help="YAML config file")
|
||||||
|
# Type-specific options for 'simple' mode
|
||||||
|
@click.option("--fast", is_flag=True, help="[simple] Fast mode - reduced quality for speed")
|
||||||
|
@click.option("--compact", is_flag=True, help="[simple] Compact mode - minimal labels")
|
||||||
|
@click.option("--horizontal", is_flag=True, help="[simple] Horizontal layout")
|
||||||
|
@click.option("--constellation", is_flag=True, help="[simple] Show constellation plot")
|
||||||
|
@click.option("--labels", is_flag=True, help="[simple] Show detailed labels")
|
||||||
|
@click.option("--slice", type=str, help="[simple] Slice of signal (e.g., '1000:5000')")
|
||||||
|
# Type-specific options for 'full' mode
|
||||||
|
@click.option("--plot-length", type=int, help="[full] Number of samples to plot")
|
||||||
|
@click.option("--no-spectrogram", is_flag=True, help="[full] Disable spectrogram")
|
||||||
|
@click.option("--no-iq", is_flag=True, help="[full] Disable IQ plot")
|
||||||
|
@click.option("--no-frequency", is_flag=True, help="[full] Disable frequency plot")
|
||||||
|
@click.option("--no-constellation", is_flag=True, help="[full] Disable constellation")
|
||||||
|
@click.option("--no-metadata", is_flag=True, help="[full] Disable metadata display")
|
||||||
|
@click.option("--no-logo", is_flag=True, help="[full] Disable logo")
|
||||||
|
@click.option("--light", is_flag=True, help="[full/annotations] Use light theme")
|
||||||
|
@click.option("--spines", is_flag=True, help="[full] Show plot spines (borders)")
|
||||||
|
# Type-specific options for 'annotations' mode
|
||||||
|
@click.option("--channel", type=int, default=0, show_default=True, help="[annotations/channels] Channel to visualize")
|
||||||
|
# Common options
|
||||||
|
@click.option("--verbose", "-v", is_flag=True, help="Verbose output")
|
||||||
|
@click.option("--quiet", "-q", is_flag=True, help="Suppress output")
|
||||||
|
@click.option("--overwrite", is_flag=True, help="Overwrite existing output file")
|
||||||
|
def view(
|
||||||
|
input,
|
||||||
|
viz_type,
|
||||||
|
output,
|
||||||
|
output_format,
|
||||||
|
show,
|
||||||
|
no_save,
|
||||||
|
dpi,
|
||||||
|
figsize,
|
||||||
|
title,
|
||||||
|
legacy,
|
||||||
|
config,
|
||||||
|
fast,
|
||||||
|
compact,
|
||||||
|
horizontal,
|
||||||
|
constellation,
|
||||||
|
labels,
|
||||||
|
slice,
|
||||||
|
plot_length,
|
||||||
|
no_spectrogram,
|
||||||
|
no_iq,
|
||||||
|
no_frequency,
|
||||||
|
no_constellation,
|
||||||
|
no_metadata,
|
||||||
|
no_logo,
|
||||||
|
light,
|
||||||
|
spines,
|
||||||
|
channel,
|
||||||
|
verbose,
|
||||||
|
quiet,
|
||||||
|
overwrite,
|
||||||
|
):
|
||||||
|
"""Create visualizations from recordings.
|
||||||
|
|
||||||
|
INPUT is the recording file (SigMF, NPY, WAV, or MIDAS Blue format).
|
||||||
|
|
||||||
|
\b
|
||||||
|
Examples:
|
||||||
|
# Basic visualization (saves to recording.png)
|
||||||
|
utils view recording.sigmf
|
||||||
|
\b
|
||||||
|
# Spectrogram with custom output
|
||||||
|
utils view capture.npy --output spec.png
|
||||||
|
\b
|
||||||
|
# Interactive display
|
||||||
|
utils view signal.npy --show --no-save
|
||||||
|
\b
|
||||||
|
# High-resolution PDF
|
||||||
|
utils view recording.blue --format pdf --dpi 600
|
||||||
|
\b
|
||||||
|
# Simple mode with constellation
|
||||||
|
utils view qam.wav --type simple --constellation --labels
|
||||||
|
\b
|
||||||
|
# Full-featured plot
|
||||||
|
utils view capture.sigmf --type full --title "Lab Test"
|
||||||
|
\b
|
||||||
|
# Legacy NPY file
|
||||||
|
utils view old_capture.npy --legacy --type simple
|
||||||
|
"""
|
||||||
|
# Load config file if specified
|
||||||
|
if config:
|
||||||
|
_ = load_yaml_config(config)
|
||||||
|
# Config file overrides can be implemented here
|
||||||
|
echo_verbose(f"Loaded config from: {config}", verbose)
|
||||||
|
|
||||||
|
# Determine if we should save
|
||||||
|
should_save = not no_save
|
||||||
|
|
||||||
|
# Generate output path if needed
|
||||||
|
output_path = get_view_output_path(should_save, overwrite, input, output, output_format)
|
||||||
|
|
||||||
|
# Load recording
|
||||||
|
echo_progress(f"Loading recording: {input}", quiet)
|
||||||
|
recording = load_recording_with_legacy(input, legacy, verbose)
|
||||||
|
|
||||||
|
num_samples = len(recording.data[0]) if len(recording.data.shape) > 1 else len(recording.data)
|
||||||
|
echo_verbose(f"Loaded {num_samples:,} samples", verbose)
|
||||||
|
|
||||||
|
# Print metadata to console
|
||||||
|
print_metadata(recording, quiet)
|
||||||
|
|
||||||
|
# Get visualization info
|
||||||
|
viz_info = VISUALIZATION_TYPES[viz_type]
|
||||||
|
|
||||||
|
# Type-specific parameters
|
||||||
|
# Note: view_simple_sig has 'saveplot' param, others don't
|
||||||
|
if viz_type == "simple":
|
||||||
|
params = {
|
||||||
|
"recording": recording,
|
||||||
|
"output_path": output_path or "temp.png",
|
||||||
|
"saveplot": should_save,
|
||||||
|
"fast_mode": fast,
|
||||||
|
"compact_mode": compact,
|
||||||
|
"horizontal_mode": horizontal,
|
||||||
|
"constellation_mode": constellation,
|
||||||
|
"labels_mode": labels,
|
||||||
|
}
|
||||||
|
|
||||||
|
if slice:
|
||||||
|
parsed_slice = parse_slice(slice)
|
||||||
|
params["slice"] = parsed_slice
|
||||||
|
echo_verbose(f"Using slice: {parsed_slice}", verbose)
|
||||||
|
|
||||||
|
elif viz_type == "full":
|
||||||
|
params = {
|
||||||
|
"recording": recording,
|
||||||
|
"output_path": output_path or "temp.png",
|
||||||
|
"dpi": dpi,
|
||||||
|
"plot_spectrogram": not no_spectrogram,
|
||||||
|
"iq": not no_iq,
|
||||||
|
"frequency": not no_frequency,
|
||||||
|
"constellation": not no_constellation,
|
||||||
|
"metadata": not no_metadata,
|
||||||
|
"logo": not no_logo,
|
||||||
|
"dark": not light,
|
||||||
|
"spines": spines,
|
||||||
|
}
|
||||||
|
if plot_length:
|
||||||
|
params["plot_length"] = plot_length
|
||||||
|
echo_verbose(f"Plot length: {plot_length:,} samples", verbose)
|
||||||
|
|
||||||
|
elif viz_type == "annotations":
|
||||||
|
params = {
|
||||||
|
"recording": recording,
|
||||||
|
"output_path": output_path or "temp.png",
|
||||||
|
"channel": channel,
|
||||||
|
"dpi": dpi,
|
||||||
|
"dark": not light,
|
||||||
|
}
|
||||||
|
|
||||||
|
elif viz_type == "channels":
|
||||||
|
params = {
|
||||||
|
"recording": recording,
|
||||||
|
"output_path": output_path or "temp.png",
|
||||||
|
}
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise click.ClickException(f"Unknown visualization type: {viz_type}")
|
||||||
|
|
||||||
|
if not should_save and not show and viz_type != "simple":
|
||||||
|
raise click.ClickException(f"--no-save is not supported with --type {viz_type} (always saves)")
|
||||||
|
if title:
|
||||||
|
params["title"] = title
|
||||||
|
|
||||||
|
# Generate visualization
|
||||||
|
viz_func = viz_info["function"]
|
||||||
|
echo_progress(f"Generating {viz_type} visualization...", quiet)
|
||||||
|
echo_verbose(f"Using function: {viz_func.__name__}", verbose)
|
||||||
|
|
||||||
|
try:
|
||||||
|
_ = viz_func(**params)
|
||||||
|
|
||||||
|
if should_save:
|
||||||
|
echo_progress(f"Saved: {output_path}", quiet)
|
||||||
|
|
||||||
|
# Show file size
|
||||||
|
if verbose and os.path.exists(output_path):
|
||||||
|
size_kb = os.path.getsize(output_path) / 1024
|
||||||
|
echo_verbose(f"File size: {size_kb:.1f} KB", verbose)
|
||||||
|
|
||||||
|
# Show plot if requested
|
||||||
|
if show:
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
echo_verbose("Displaying plot...", verbose)
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise click.ClickException(f"Error generating visualization: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
# For CLI registration
|
||||||
|
__all__ = ["view"]
|
||||||
12
src/ria_toolkit_oss/view/__init__.py
Normal file
12
src/ria_toolkit_oss/view/__init__.py
Normal file
|
|
@ -0,0 +1,12 @@
|
||||||
|
"""
|
||||||
|
The package contains assorted plotting and report generation utilities to help visualize RIA components such as
|
||||||
|
recordings and radio datasets.
|
||||||
|
"""
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"view_annotations",
|
||||||
|
"view_channels",
|
||||||
|
"view_sig",
|
||||||
|
]
|
||||||
|
|
||||||
|
from .view_signal import view_annotations, view_channels, view_sig
|
||||||
63
src/ria_toolkit_oss/view/dataset.py
Normal file
63
src/ria_toolkit_oss/view/dataset.py
Normal file
|
|
@ -0,0 +1,63 @@
|
||||||
|
import os
|
||||||
|
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import numpy as np
|
||||||
|
from matplotlib.backends.backend_pdf import PdfPages
|
||||||
|
|
||||||
|
from utils.io.recording import from_npy
|
||||||
|
|
||||||
|
|
||||||
|
def create_dataset_pdf(dataset_path, output_path, div=64, metadata_keys=None):
|
||||||
|
i = 0
|
||||||
|
with PdfPages(output_path) as pdf:
|
||||||
|
for root, _, files in os.walk(dataset_path):
|
||||||
|
for file in files:
|
||||||
|
if file.endswith(".npy"):
|
||||||
|
i = i + 1
|
||||||
|
|
||||||
|
print(f"{i}/{len(files)}")
|
||||||
|
|
||||||
|
full_path = os.path.join(root, file)
|
||||||
|
|
||||||
|
recording = from_npy(full_path)
|
||||||
|
|
||||||
|
samples = recording.data[0]
|
||||||
|
|
||||||
|
metadata = recording.metadata
|
||||||
|
|
||||||
|
if metadata_keys is not None:
|
||||||
|
metadata_to_print = {}
|
||||||
|
for key in metadata_keys:
|
||||||
|
metadata_to_print[key] = metadata.get(key, "None")
|
||||||
|
else:
|
||||||
|
metadata_to_print = metadata
|
||||||
|
|
||||||
|
signal_length = len(samples)
|
||||||
|
nfft = max(2 ** int(np.log2(signal_length // div)), 64)
|
||||||
|
|
||||||
|
dict_text = dict_text = "\n".join([f"{key}: {value}" for key, value in metadata_to_print.items()])
|
||||||
|
|
||||||
|
fig, axs = plt.subplots(2, 1, figsize=(10, 10), gridspec_kw={"height_ratios": [4, 1]})
|
||||||
|
|
||||||
|
# Create the spectrogram in the first subplot
|
||||||
|
axs[0].specgram(samples, NFFT=nfft, Fs=metadata["sample_rate"], cmap="twilight", noverlap=128)
|
||||||
|
axs[0].set_title(file)
|
||||||
|
axs[0].set_xlabel("Time (s)")
|
||||||
|
axs[0].set_ylabel("Frequency (Hz)")
|
||||||
|
# axs[0].colorbar(label='Intensity (dB)')
|
||||||
|
|
||||||
|
# Adjust layout so that there's enough space for the second subplot (text)
|
||||||
|
plt.subplots_adjust(hspace=0.5)
|
||||||
|
|
||||||
|
# Add the text in the second subplot
|
||||||
|
axs[1].text(0.1, 0.5, dict_text, ha="left", va="center", fontsize=10, color="black", wrap=True)
|
||||||
|
axs[1].axis("off") # Turn off axes for the text subplot
|
||||||
|
|
||||||
|
# Save the figure (spectrogram and text) to the PDF
|
||||||
|
pdf.savefig(fig)
|
||||||
|
plt.close()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
|
||||||
|
create_dataset_pdf("/mnt/hddstorage/alec/qesa1_c4/nov15/low_mod2", "dataset.pdf")
|
||||||
192
src/ria_toolkit_oss/view/recording.py
Normal file
192
src/ria_toolkit_oss/view/recording.py
Normal file
|
|
@ -0,0 +1,192 @@
|
||||||
|
import numpy as np
|
||||||
|
import plotly.graph_objects as go
|
||||||
|
import scipy.signal as signal
|
||||||
|
from plotly.graph_objs import Figure
|
||||||
|
from scipy.fft import fft, fftshift
|
||||||
|
|
||||||
|
from utils.data import Recording
|
||||||
|
|
||||||
|
|
||||||
|
def spectrogram(rec: Recording, thumbnail: bool = False) -> Figure:
|
||||||
|
"""Create a spectrogram for the recording.
|
||||||
|
|
||||||
|
:param rec: Signal to plot.
|
||||||
|
:type rec: utils.data.Recording
|
||||||
|
:param thumbnail: Whether to return a small thumbnail version or full plot.
|
||||||
|
:type thumbnail: bool
|
||||||
|
|
||||||
|
:return: Spectrogram, as a Plotly figure.
|
||||||
|
"""
|
||||||
|
complex_signal = rec.data[0]
|
||||||
|
sample_rate = int(rec.metadata.get("sample_rate", 1))
|
||||||
|
plot_length = len(complex_signal)
|
||||||
|
|
||||||
|
# Determine FFT size
|
||||||
|
if plot_length < 2000:
|
||||||
|
fft_size = 64
|
||||||
|
elif plot_length < 10000:
|
||||||
|
fft_size = 256
|
||||||
|
elif plot_length < 1000000:
|
||||||
|
fft_size = 1024
|
||||||
|
else:
|
||||||
|
fft_size = 2048
|
||||||
|
|
||||||
|
frequencies, times, Sxx = signal.spectrogram(
|
||||||
|
complex_signal,
|
||||||
|
fs=sample_rate,
|
||||||
|
nfft=fft_size,
|
||||||
|
nperseg=fft_size,
|
||||||
|
noverlap=fft_size // 8,
|
||||||
|
scaling="density",
|
||||||
|
mode="complex",
|
||||||
|
return_onesided=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convert complex values to amplitude and then to log scale for visualization
|
||||||
|
Sxx_magnitude = np.abs(Sxx)
|
||||||
|
Sxx_log = np.log10(Sxx_magnitude + 1e-6)
|
||||||
|
|
||||||
|
# Normalize spectrogram values between 0 and 1 for plotting
|
||||||
|
Sxx_log_shifted = Sxx_log - np.min(Sxx_log)
|
||||||
|
Sxx_log_norm = Sxx_log_shifted / np.max(Sxx_log_shifted)
|
||||||
|
|
||||||
|
# Shift frequency bins and spectrogram rows so frequencies run from negative to positive
|
||||||
|
frequencies_shifted = np.fft.fftshift(frequencies)
|
||||||
|
Sxx_shifted = np.fft.fftshift(Sxx_log_norm, axes=0)
|
||||||
|
|
||||||
|
fig = go.Figure(
|
||||||
|
data=go.Heatmap(
|
||||||
|
z=Sxx_shifted,
|
||||||
|
x=times / 1e6,
|
||||||
|
y=frequencies_shifted,
|
||||||
|
colorscale="Viridis",
|
||||||
|
zmin=0,
|
||||||
|
zmax=1,
|
||||||
|
reversescale=False,
|
||||||
|
showscale=False,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
if thumbnail:
|
||||||
|
fig.update_xaxes(showticklabels=False)
|
||||||
|
fig.update_yaxes(showticklabels=False)
|
||||||
|
fig.update_layout(
|
||||||
|
template="plotly_dark",
|
||||||
|
width=200,
|
||||||
|
height=100,
|
||||||
|
margin=dict(l=5, r=5, t=5, b=5),
|
||||||
|
xaxis=dict(scaleanchor=None),
|
||||||
|
yaxis=dict(scaleanchor=None),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
fig.update_layout(
|
||||||
|
title="Spectrogram",
|
||||||
|
xaxis_title="Time [s]",
|
||||||
|
yaxis_title="Frequency [Hz]",
|
||||||
|
template="plotly_dark",
|
||||||
|
height=300,
|
||||||
|
width=800,
|
||||||
|
)
|
||||||
|
|
||||||
|
return fig
|
||||||
|
|
||||||
|
|
||||||
|
def iq_time_series(rec: Recording) -> Figure:
|
||||||
|
"""Create a time series plot of the real and imaginary parts of signal.
|
||||||
|
|
||||||
|
:param rec: Signal to plot.
|
||||||
|
:type rec: utils.data.Recording
|
||||||
|
|
||||||
|
:return: Time series plot as a Plotly figure.
|
||||||
|
"""
|
||||||
|
complex_signal = rec.data[0]
|
||||||
|
sample_rate = int(rec.metadata.get("sample_rate", 1))
|
||||||
|
plot_length = len(complex_signal)
|
||||||
|
t = np.arange(0, plot_length, 1) / sample_rate
|
||||||
|
|
||||||
|
fig = go.Figure()
|
||||||
|
fig.add_trace(go.Scatter(x=t, y=complex_signal.real, mode="lines", name="I (In-phase)", line=dict(width=0.6)))
|
||||||
|
fig.add_trace(go.Scatter(x=t, y=complex_signal.imag, mode="lines", name="Q (Quadrature)", line=dict(width=0.6)))
|
||||||
|
|
||||||
|
fig.update_layout(
|
||||||
|
title="IQ Time Series",
|
||||||
|
xaxis_title="Time [s]",
|
||||||
|
yaxis_title="Amplitude",
|
||||||
|
template="plotly_dark",
|
||||||
|
height=300,
|
||||||
|
width=800,
|
||||||
|
showlegend=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
return fig
|
||||||
|
|
||||||
|
|
||||||
|
def frequency_spectrum(rec: Recording) -> Figure:
|
||||||
|
"""Create a frequency spectrum plot from the recording.
|
||||||
|
|
||||||
|
:param rec: Input signal to plot.
|
||||||
|
:type rec: utils.data.Recording
|
||||||
|
|
||||||
|
:return: Frequency spectrum as a Plotly figure.
|
||||||
|
"""
|
||||||
|
complex_signal = rec.data[0]
|
||||||
|
center_frequency = int(rec.metadata.get("center_frequency", 0))
|
||||||
|
sample_rate = int(rec.metadata.get("sample_rate", 1))
|
||||||
|
|
||||||
|
epsilon = 1e-10
|
||||||
|
spectrum = np.abs(fftshift(fft(complex_signal)))
|
||||||
|
freqs = np.linspace(-sample_rate / 2, sample_rate / 2, len(complex_signal)) + center_frequency
|
||||||
|
log_spectrum = np.log10(spectrum + epsilon)
|
||||||
|
scaled_log_spectrum = (log_spectrum - log_spectrum.min()) / (log_spectrum.max() - log_spectrum.min())
|
||||||
|
|
||||||
|
fig = go.Figure()
|
||||||
|
fig.add_trace(go.Scatter(x=freqs, y=scaled_log_spectrum, mode="lines", name="Spectrum", line=dict(width=0.4)))
|
||||||
|
|
||||||
|
fig.update_layout(
|
||||||
|
title="Frequency Spectrum",
|
||||||
|
xaxis_title="Frequency [Hz]",
|
||||||
|
yaxis_title="Magnitude",
|
||||||
|
yaxis_type="log",
|
||||||
|
template="plotly_dark",
|
||||||
|
height=300,
|
||||||
|
width=800,
|
||||||
|
showlegend=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
return fig
|
||||||
|
|
||||||
|
|
||||||
|
def constellation(rec: Recording) -> Figure:
|
||||||
|
"""Create a constellation plot from the recording.
|
||||||
|
|
||||||
|
:param rec: Input signal to plot.
|
||||||
|
:type rec: utils.data.Recording
|
||||||
|
|
||||||
|
:return: Constellation as a Plotly figure.
|
||||||
|
"""
|
||||||
|
complex_signal = rec.data[0]
|
||||||
|
|
||||||
|
# Downsample the IQ samples to a target number of points
|
||||||
|
# This reduces the amount of data plotted, improving performance and interactivity
|
||||||
|
# without losing significant detail in the constellation visualization.
|
||||||
|
target_number_of_points = 5000
|
||||||
|
step = max(1, len(complex_signal) // target_number_of_points)
|
||||||
|
i_ds = complex_signal.real[::step]
|
||||||
|
q_ds = complex_signal.imag[::step]
|
||||||
|
|
||||||
|
fig = go.Figure()
|
||||||
|
fig.add_trace(go.Scatter(x=i_ds, y=q_ds, mode="lines", name="Constellation", line=dict(width=0.2)))
|
||||||
|
|
||||||
|
fig.update_layout(
|
||||||
|
title="Constellation",
|
||||||
|
xaxis_title="In-phase (I)",
|
||||||
|
yaxis_title="Quadrature (Q)",
|
||||||
|
template="plotly_dark",
|
||||||
|
height=400,
|
||||||
|
width=400,
|
||||||
|
showlegend=False,
|
||||||
|
xaxis=dict(range=[-1.1, 1.1]),
|
||||||
|
yaxis=dict(range=[-1.1, 1.1]),
|
||||||
|
)
|
||||||
|
|
||||||
|
return fig
|
||||||
Loading…
Reference in New Issue
Block a user