cli #15

Merged
madrigal merged 28 commits from cli into main 2025-12-22 10:42:57 -05:00
116 changed files with 18310 additions and 126 deletions

11
.gitignore vendored
View File

@ -88,3 +88,14 @@ cython_debug/
# pyenv # pyenv
.python-version .python-version
# Generated files
*.dot
*.hdf5
*.npy
*.png
*.sigmf-data
*.sigmf-meta
*.blue
*.wav
images/

667
poetry.lock generated
View File

@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand. # This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand.
[[package]] [[package]]
name = "alabaster" name = "alabaster"
@ -359,7 +359,7 @@ version = "8.2.1"
description = "Composable command line interface toolkit" description = "Composable command line interface toolkit"
optional = false optional = false
python-versions = ">=3.10" python-versions = ">=3.10"
groups = ["dev", "docs"] groups = ["main", "dev", "docs"]
files = [ files = [
{file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"},
{file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"},
@ -374,12 +374,199 @@ version = "0.4.6"
description = "Cross-platform colored terminal text." description = "Cross-platform colored terminal text."
optional = false optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
groups = ["dev", "docs", "test"] groups = ["main", "dev", "docs", "test"]
files = [ files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
] ]
markers = {dev = "platform_system == \"Windows\" or sys_platform == \"win32\""} markers = {main = "platform_system == \"Windows\"", dev = "platform_system == \"Windows\" or sys_platform == \"win32\""}
[[package]]
name = "contourpy"
version = "1.3.2"
description = "Python library for calculating contours of 2D quadrilateral grids"
optional = false
python-versions = ">=3.10"
groups = ["main"]
markers = "python_version == \"3.10\""
files = [
{file = "contourpy-1.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ba38e3f9f330af820c4b27ceb4b9c7feee5fe0493ea53a8720f4792667465934"},
{file = "contourpy-1.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dc41ba0714aa2968d1f8674ec97504a8f7e334f48eeacebcaa6256213acb0989"},
{file = "contourpy-1.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9be002b31c558d1ddf1b9b415b162c603405414bacd6932d031c5b5a8b757f0d"},
{file = "contourpy-1.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8d2e74acbcba3bfdb6d9d8384cdc4f9260cae86ed9beee8bd5f54fee49a430b9"},
{file = "contourpy-1.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e259bced5549ac64410162adc973c5e2fb77f04df4a439d00b478e57a0e65512"},
{file = "contourpy-1.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad687a04bc802cbe8b9c399c07162a3c35e227e2daccf1668eb1f278cb698631"},
{file = "contourpy-1.3.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cdd22595308f53ef2f891040ab2b93d79192513ffccbd7fe19be7aa773a5e09f"},
{file = "contourpy-1.3.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b4f54d6a2defe9f257327b0f243612dd051cc43825587520b1bf74a31e2f6ef2"},
{file = "contourpy-1.3.2-cp310-cp310-win32.whl", hash = "sha256:f939a054192ddc596e031e50bb13b657ce318cf13d264f095ce9db7dc6ae81c0"},
{file = "contourpy-1.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:c440093bbc8fc21c637c03bafcbef95ccd963bc6e0514ad887932c18ca2a759a"},
{file = "contourpy-1.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6a37a2fb93d4df3fc4c0e363ea4d16f83195fc09c891bc8ce072b9d084853445"},
{file = "contourpy-1.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b7cd50c38f500bbcc9b6a46643a40e0913673f869315d8e70de0438817cb7773"},
{file = "contourpy-1.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6658ccc7251a4433eebd89ed2672c2ed96fba367fd25ca9512aa92a4b46c4f1"},
{file = "contourpy-1.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:70771a461aaeb335df14deb6c97439973d253ae70660ca085eec25241137ef43"},
{file = "contourpy-1.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65a887a6e8c4cd0897507d814b14c54a8c2e2aa4ac9f7686292f9769fcf9a6ab"},
{file = "contourpy-1.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3859783aefa2b8355697f16642695a5b9792e7a46ab86da1118a4a23a51a33d7"},
{file = "contourpy-1.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:eab0f6db315fa4d70f1d8ab514e527f0366ec021ff853d7ed6a2d33605cf4b83"},
{file = "contourpy-1.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d91a3ccc7fea94ca0acab82ceb77f396d50a1f67412efe4c526f5d20264e6ecd"},
{file = "contourpy-1.3.2-cp311-cp311-win32.whl", hash = "sha256:1c48188778d4d2f3d48e4643fb15d8608b1d01e4b4d6b0548d9b336c28fc9b6f"},
{file = "contourpy-1.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:5ebac872ba09cb8f2131c46b8739a7ff71de28a24c869bcad554477eb089a878"},
{file = "contourpy-1.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4caf2bcd2969402bf77edc4cb6034c7dd7c0803213b3523f111eb7460a51b8d2"},
{file = "contourpy-1.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:82199cb78276249796419fe36b7386bd8d2cc3f28b3bc19fe2454fe2e26c4c15"},
{file = "contourpy-1.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:106fab697af11456fcba3e352ad50effe493a90f893fca6c2ca5c033820cea92"},
{file = "contourpy-1.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d14f12932a8d620e307f715857107b1d1845cc44fdb5da2bc8e850f5ceba9f87"},
{file = "contourpy-1.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:532fd26e715560721bb0d5fc7610fce279b3699b018600ab999d1be895b09415"},
{file = "contourpy-1.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26b383144cf2d2c29f01a1e8170f50dacf0eac02d64139dcd709a8ac4eb3cfe"},
{file = "contourpy-1.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c49f73e61f1f774650a55d221803b101d966ca0c5a2d6d5e4320ec3997489441"},
{file = "contourpy-1.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3d80b2c0300583228ac98d0a927a1ba6a2ba6b8a742463c564f1d419ee5b211e"},
{file = "contourpy-1.3.2-cp312-cp312-win32.whl", hash = "sha256:90df94c89a91b7362e1142cbee7568f86514412ab8a2c0d0fca72d7e91b62912"},
{file = "contourpy-1.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:8c942a01d9163e2e5cfb05cb66110121b8d07ad438a17f9e766317bcb62abf73"},
{file = "contourpy-1.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:de39db2604ae755316cb5967728f4bea92685884b1e767b7c24e983ef5f771cb"},
{file = "contourpy-1.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3f9e896f447c5c8618f1edb2bafa9a4030f22a575ec418ad70611450720b5b08"},
{file = "contourpy-1.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71e2bd4a1c4188f5c2b8d274da78faab884b59df20df63c34f74aa1813c4427c"},
{file = "contourpy-1.3.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de425af81b6cea33101ae95ece1f696af39446db9682a0b56daaa48cfc29f38f"},
{file = "contourpy-1.3.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:977e98a0e0480d3fe292246417239d2d45435904afd6d7332d8455981c408b85"},
{file = "contourpy-1.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:434f0adf84911c924519d2b08fc10491dd282b20bdd3fa8f60fd816ea0b48841"},
{file = "contourpy-1.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c66c4906cdbc50e9cba65978823e6e00b45682eb09adbb78c9775b74eb222422"},
{file = "contourpy-1.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8b7fc0cd78ba2f4695fd0a6ad81a19e7e3ab825c31b577f384aa9d7817dc3bef"},
{file = "contourpy-1.3.2-cp313-cp313-win32.whl", hash = "sha256:15ce6ab60957ca74cff444fe66d9045c1fd3e92c8936894ebd1f3eef2fff075f"},
{file = "contourpy-1.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:e1578f7eafce927b168752ed7e22646dad6cd9bca673c60bff55889fa236ebf9"},
{file = "contourpy-1.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0475b1f6604896bc7c53bb070e355e9321e1bc0d381735421a2d2068ec56531f"},
{file = "contourpy-1.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:c85bb486e9be652314bb5b9e2e3b0d1b2e643d5eec4992c0fbe8ac71775da739"},
{file = "contourpy-1.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:745b57db7758f3ffc05a10254edd3182a2a83402a89c00957a8e8a22f5582823"},
{file = "contourpy-1.3.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:970e9173dbd7eba9b4e01aab19215a48ee5dd3f43cef736eebde064a171f89a5"},
{file = "contourpy-1.3.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6c4639a9c22230276b7bffb6a850dfc8258a2521305e1faefe804d006b2e532"},
{file = "contourpy-1.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc829960f34ba36aad4302e78eabf3ef16a3a100863f0d4eeddf30e8a485a03b"},
{file = "contourpy-1.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:d32530b534e986374fc19eaa77fcb87e8a99e5431499949b828312bdcd20ac52"},
{file = "contourpy-1.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e298e7e70cf4eb179cc1077be1c725b5fd131ebc81181bf0c03525c8abc297fd"},
{file = "contourpy-1.3.2-cp313-cp313t-win32.whl", hash = "sha256:d0e589ae0d55204991450bb5c23f571c64fe43adaa53f93fc902a84c96f52fe1"},
{file = "contourpy-1.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:78e9253c3de756b3f6a5174d024c4835acd59eb3f8e2ca13e775dbffe1558f69"},
{file = "contourpy-1.3.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:fd93cc7f3139b6dd7aab2f26a90dde0aa9fc264dbf70f6740d498a70b860b82c"},
{file = "contourpy-1.3.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:107ba8a6a7eec58bb475329e6d3b95deba9440667c4d62b9b6063942b61d7f16"},
{file = "contourpy-1.3.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ded1706ed0c1049224531b81128efbd5084598f18d8a2d9efae833edbd2b40ad"},
{file = "contourpy-1.3.2-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5f5964cdad279256c084b69c3f412b7801e15356b16efa9d78aa974041903da0"},
{file = "contourpy-1.3.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49b65a95d642d4efa8f64ba12558fcb83407e58a2dfba9d796d77b63ccfcaff5"},
{file = "contourpy-1.3.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:8c5acb8dddb0752bf252e01a3035b21443158910ac16a3b0d20e7fed7d534ce5"},
{file = "contourpy-1.3.2.tar.gz", hash = "sha256:b6945942715a034c671b7fc54f9588126b0b8bf23db2696e3ca8328f3ff0ab54"},
]
[package.dependencies]
numpy = ">=1.23"
[package.extras]
bokeh = ["bokeh", "selenium"]
docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"]
mypy = ["bokeh", "contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.15.0)", "types-Pillow"]
test = ["Pillow", "contourpy[test-no-images]", "matplotlib"]
test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist", "wurlitzer"]
[[package]]
name = "contourpy"
version = "1.3.3"
description = "Python library for calculating contours of 2D quadrilateral grids"
optional = false
python-versions = ">=3.11"
groups = ["main"]
markers = "python_version >= \"3.11\""
files = [
{file = "contourpy-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:709a48ef9a690e1343202916450bc48b9e51c049b089c7f79a267b46cffcdaa1"},
{file = "contourpy-1.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:23416f38bfd74d5d28ab8429cc4d63fa67d5068bd711a85edb1c3fb0c3e2f381"},
{file = "contourpy-1.3.3-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:929ddf8c4c7f348e4c0a5a3a714b5c8542ffaa8c22954862a46ca1813b667ee7"},
{file = "contourpy-1.3.3-cp311-cp311-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:9e999574eddae35f1312c2b4b717b7885d4edd6cb46700e04f7f02db454e67c1"},
{file = "contourpy-1.3.3-cp311-cp311-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0bf67e0e3f482cb69779dd3061b534eb35ac9b17f163d851e2a547d56dba0a3a"},
{file = "contourpy-1.3.3-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:51e79c1f7470158e838808d4a996fa9bac72c498e93d8ebe5119bc1e6becb0db"},
{file = "contourpy-1.3.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:598c3aaece21c503615fd59c92a3598b428b2f01bfb4b8ca9c4edeecc2438620"},
{file = "contourpy-1.3.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:322ab1c99b008dad206d406bb61d014cf0174df491ae9d9d0fac6a6fda4f977f"},
{file = "contourpy-1.3.3-cp311-cp311-win32.whl", hash = "sha256:fd907ae12cd483cd83e414b12941c632a969171bf90fc937d0c9f268a31cafff"},
{file = "contourpy-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:3519428f6be58431c56581f1694ba8e50626f2dd550af225f82fb5f5814d2a42"},
{file = "contourpy-1.3.3-cp311-cp311-win_arm64.whl", hash = "sha256:15ff10bfada4bf92ec8b31c62bf7c1834c244019b4a33095a68000d7075df470"},
{file = "contourpy-1.3.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b08a32ea2f8e42cf1d4be3169a98dd4be32bafe4f22b6c4cb4ba810fa9e5d2cb"},
{file = "contourpy-1.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:556dba8fb6f5d8742f2923fe9457dbdd51e1049c4a43fd3986a0b14a1d815fc6"},
{file = "contourpy-1.3.3-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:92d9abc807cf7d0e047b95ca5d957cf4792fcd04e920ca70d48add15c1a90ea7"},
{file = "contourpy-1.3.3-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b2e8faa0ed68cb29af51edd8e24798bb661eac3bd9f65420c1887b6ca89987c8"},
{file = "contourpy-1.3.3-cp312-cp312-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:626d60935cf668e70a5ce6ff184fd713e9683fb458898e4249b63be9e28286ea"},
{file = "contourpy-1.3.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4d00e655fcef08aba35ec9610536bfe90267d7ab5ba944f7032549c55a146da1"},
{file = "contourpy-1.3.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:451e71b5a7d597379ef572de31eeb909a87246974d960049a9848c3bc6c41bf7"},
{file = "contourpy-1.3.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:459c1f020cd59fcfe6650180678a9993932d80d44ccde1fa1868977438f0b411"},
{file = "contourpy-1.3.3-cp312-cp312-win32.whl", hash = "sha256:023b44101dfe49d7d53932be418477dba359649246075c996866106da069af69"},
{file = "contourpy-1.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:8153b8bfc11e1e4d75bcb0bff1db232f9e10b274e0929de9d608027e0d34ff8b"},
{file = "contourpy-1.3.3-cp312-cp312-win_arm64.whl", hash = "sha256:07ce5ed73ecdc4a03ffe3e1b3e3c1166db35ae7584be76f65dbbe28a7791b0cc"},
{file = "contourpy-1.3.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:177fb367556747a686509d6fef71d221a4b198a3905fe824430e5ea0fda54eb5"},
{file = "contourpy-1.3.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d002b6f00d73d69333dac9d0b8d5e84d9724ff9ef044fd63c5986e62b7c9e1b1"},
{file = "contourpy-1.3.3-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:348ac1f5d4f1d66d3322420f01d42e43122f43616e0f194fc1c9f5d830c5b286"},
{file = "contourpy-1.3.3-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:655456777ff65c2c548b7c454af9c6f33f16c8884f11083244b5819cc214f1b5"},
{file = "contourpy-1.3.3-cp313-cp313-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:644a6853d15b2512d67881586bd03f462c7ab755db95f16f14d7e238f2852c67"},
{file = "contourpy-1.3.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4debd64f124ca62069f313a9cb86656ff087786016d76927ae2cf37846b006c9"},
{file = "contourpy-1.3.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a15459b0f4615b00bbd1e91f1b9e19b7e63aea7483d03d804186f278c0af2659"},
{file = "contourpy-1.3.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ca0fdcd73925568ca027e0b17ab07aad764be4706d0a925b89227e447d9737b7"},
{file = "contourpy-1.3.3-cp313-cp313-win32.whl", hash = "sha256:b20c7c9a3bf701366556e1b1984ed2d0cedf999903c51311417cf5f591d8c78d"},
{file = "contourpy-1.3.3-cp313-cp313-win_amd64.whl", hash = "sha256:1cadd8b8969f060ba45ed7c1b714fe69185812ab43bd6b86a9123fe8f99c3263"},
{file = "contourpy-1.3.3-cp313-cp313-win_arm64.whl", hash = "sha256:fd914713266421b7536de2bfa8181aa8c699432b6763a0ea64195ebe28bff6a9"},
{file = "contourpy-1.3.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:88df9880d507169449d434c293467418b9f6cbe82edd19284aa0409e7fdb933d"},
{file = "contourpy-1.3.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d06bb1f751ba5d417047db62bca3c8fde202b8c11fb50742ab3ab962c81e8216"},
{file = "contourpy-1.3.3-cp313-cp313t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e4e6b05a45525357e382909a4c1600444e2a45b4795163d3b22669285591c1ae"},
{file = "contourpy-1.3.3-cp313-cp313t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ab3074b48c4e2cf1a960e6bbeb7f04566bf36b1861d5c9d4d8ac04b82e38ba20"},
{file = "contourpy-1.3.3-cp313-cp313t-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6c3d53c796f8647d6deb1abe867daeb66dcc8a97e8455efa729516b997b8ed99"},
{file = "contourpy-1.3.3-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:50ed930df7289ff2a8d7afeb9603f8289e5704755c7e5c3bbd929c90c817164b"},
{file = "contourpy-1.3.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4feffb6537d64b84877da813a5c30f1422ea5739566abf0bd18065ac040e120a"},
{file = "contourpy-1.3.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2b7e9480ffe2b0cd2e787e4df64270e3a0440d9db8dc823312e2c940c167df7e"},
{file = "contourpy-1.3.3-cp313-cp313t-win32.whl", hash = "sha256:283edd842a01e3dcd435b1c5116798d661378d83d36d337b8dde1d16a5fc9ba3"},
{file = "contourpy-1.3.3-cp313-cp313t-win_amd64.whl", hash = "sha256:87acf5963fc2b34825e5b6b048f40e3635dd547f590b04d2ab317c2619ef7ae8"},
{file = "contourpy-1.3.3-cp313-cp313t-win_arm64.whl", hash = "sha256:3c30273eb2a55024ff31ba7d052dde990d7d8e5450f4bbb6e913558b3d6c2301"},
{file = "contourpy-1.3.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fde6c716d51c04b1c25d0b90364d0be954624a0ee9d60e23e850e8d48353d07a"},
{file = "contourpy-1.3.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:cbedb772ed74ff5be440fa8eee9bd49f64f6e3fc09436d9c7d8f1c287b121d77"},
{file = "contourpy-1.3.3-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:22e9b1bd7a9b1d652cd77388465dc358dafcd2e217d35552424aa4f996f524f5"},
{file = "contourpy-1.3.3-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a22738912262aa3e254e4f3cb079a95a67132fc5a063890e224393596902f5a4"},
{file = "contourpy-1.3.3-cp314-cp314-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:afe5a512f31ee6bd7d0dda52ec9864c984ca3d66664444f2d72e0dc4eb832e36"},
{file = "contourpy-1.3.3-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f64836de09927cba6f79dcd00fdd7d5329f3fccc633468507079c829ca4db4e3"},
{file = "contourpy-1.3.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1fd43c3be4c8e5fd6e4f2baeae35ae18176cf2e5cced681cca908addf1cdd53b"},
{file = "contourpy-1.3.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:6afc576f7b33cf00996e5c1102dc2a8f7cc89e39c0b55df93a0b78c1bd992b36"},
{file = "contourpy-1.3.3-cp314-cp314-win32.whl", hash = "sha256:66c8a43a4f7b8df8b71ee1840e4211a3c8d93b214b213f590e18a1beca458f7d"},
{file = "contourpy-1.3.3-cp314-cp314-win_amd64.whl", hash = "sha256:cf9022ef053f2694e31d630feaacb21ea24224be1c3ad0520b13d844274614fd"},
{file = "contourpy-1.3.3-cp314-cp314-win_arm64.whl", hash = "sha256:95b181891b4c71de4bb404c6621e7e2390745f887f2a026b2d99e92c17892339"},
{file = "contourpy-1.3.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:33c82d0138c0a062380332c861387650c82e4cf1747aaa6938b9b6516762e772"},
{file = "contourpy-1.3.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:ea37e7b45949df430fe649e5de8351c423430046a2af20b1c1961cae3afcda77"},
{file = "contourpy-1.3.3-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d304906ecc71672e9c89e87c4675dc5c2645e1f4269a5063b99b0bb29f232d13"},
{file = "contourpy-1.3.3-cp314-cp314t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ca658cd1a680a5c9ea96dc61cdbae1e85c8f25849843aa799dfd3cb370ad4fbe"},
{file = "contourpy-1.3.3-cp314-cp314t-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ab2fd90904c503739a75b7c8c5c01160130ba67944a7b77bbf36ef8054576e7f"},
{file = "contourpy-1.3.3-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7301b89040075c30e5768810bc96a8e8d78085b47d8be6e4c3f5a0b4ed478a0"},
{file = "contourpy-1.3.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:2a2a8b627d5cc6b7c41a4beff6c5ad5eb848c88255fda4a8745f7e901b32d8e4"},
{file = "contourpy-1.3.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:fd6ec6be509c787f1caf6b247f0b1ca598bef13f4ddeaa126b7658215529ba0f"},
{file = "contourpy-1.3.3-cp314-cp314t-win32.whl", hash = "sha256:e74a9a0f5e3fff48fb5a7f2fd2b9b70a3fe014a67522f79b7cca4c0c7e43c9ae"},
{file = "contourpy-1.3.3-cp314-cp314t-win_amd64.whl", hash = "sha256:13b68d6a62db8eafaebb8039218921399baf6e47bf85006fd8529f2a08ef33fc"},
{file = "contourpy-1.3.3-cp314-cp314t-win_arm64.whl", hash = "sha256:b7448cb5a725bb1e35ce88771b86fba35ef418952474492cf7c764059933ff8b"},
{file = "contourpy-1.3.3-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:cd5dfcaeb10f7b7f9dc8941717c6c2ade08f587be2226222c12b25f0483ed497"},
{file = "contourpy-1.3.3-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:0c1fc238306b35f246d61a1d416a627348b5cf0648648a031e14bb8705fcdfe8"},
{file = "contourpy-1.3.3-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:70f9aad7de812d6541d29d2bbf8feb22ff7e1c299523db288004e3157ff4674e"},
{file = "contourpy-1.3.3-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5ed3657edf08512fc3fe81b510e35c2012fbd3081d2e26160f27ca28affec989"},
{file = "contourpy-1.3.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:3d1a3799d62d45c18bafd41c5fa05120b96a28079f2393af559b843d1a966a77"},
{file = "contourpy-1.3.3.tar.gz", hash = "sha256:083e12155b210502d0bca491432bb04d56dc3432f95a979b429f2848c3dbe880"},
]
[package.dependencies]
numpy = ">=1.25"
[package.extras]
bokeh = ["bokeh", "selenium"]
docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"]
mypy = ["bokeh", "contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.17.0)", "types-Pillow"]
test = ["Pillow", "contourpy[test-no-images]", "matplotlib"]
test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist", "wurlitzer"]
[[package]]
name = "cycler"
version = "0.12.1"
description = "Composable style cycles"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"},
{file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"},
]
[package.extras]
docs = ["ipython", "matplotlib", "numpydoc", "sphinx"]
tests = ["pytest", "pytest-cov", "pytest-xdist"]
[[package]] [[package]]
name = "dill" name = "dill"
@ -469,6 +656,79 @@ mccabe = ">=0.7.0,<0.8.0"
pycodestyle = ">=2.14.0,<2.15.0" pycodestyle = ">=2.14.0,<2.15.0"
pyflakes = ">=3.4.0,<3.5.0" pyflakes = ">=3.4.0,<3.5.0"
[[package]]
name = "fonttools"
version = "4.61.1"
description = "Tools to manipulate font files"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "fonttools-4.61.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c7db70d57e5e1089a274cbb2b1fd635c9a24de809a231b154965d415d6c6d24"},
{file = "fonttools-4.61.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5fe9fd43882620017add5eabb781ebfbc6998ee49b35bd7f8f79af1f9f99a958"},
{file = "fonttools-4.61.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d8db08051fc9e7d8bc622f2112511b8107d8f27cd89e2f64ec45e9825e8288da"},
{file = "fonttools-4.61.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a76d4cb80f41ba94a6691264be76435e5f72f2cb3cab0b092a6212855f71c2f6"},
{file = "fonttools-4.61.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a13fc8aeb24bad755eea8f7f9d409438eb94e82cf86b08fe77a03fbc8f6a96b1"},
{file = "fonttools-4.61.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b846a1fcf8beadeb9ea4f44ec5bdde393e2f1569e17d700bfc49cd69bde75881"},
{file = "fonttools-4.61.1-cp310-cp310-win32.whl", hash = "sha256:78a7d3ab09dc47ac1a363a493e6112d8cabed7ba7caad5f54dbe2f08676d1b47"},
{file = "fonttools-4.61.1-cp310-cp310-win_amd64.whl", hash = "sha256:eff1ac3cc66c2ac7cda1e64b4e2f3ffef474b7335f92fc3833fc632d595fcee6"},
{file = "fonttools-4.61.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c6604b735bb12fef8e0efd5578c9fb5d3d8532d5001ea13a19cddf295673ee09"},
{file = "fonttools-4.61.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5ce02f38a754f207f2f06557523cd39a06438ba3aafc0639c477ac409fc64e37"},
{file = "fonttools-4.61.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:77efb033d8d7ff233385f30c62c7c79271c8885d5c9657d967ede124671bbdfb"},
{file = "fonttools-4.61.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:75c1a6dfac6abd407634420c93864a1e274ebc1c7531346d9254c0d8f6ca00f9"},
{file = "fonttools-4.61.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0de30bfe7745c0d1ffa2b0b7048fb7123ad0d71107e10ee090fa0b16b9452e87"},
{file = "fonttools-4.61.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:58b0ee0ab5b1fc9921eccfe11d1435added19d6494dde14e323f25ad2bc30c56"},
{file = "fonttools-4.61.1-cp311-cp311-win32.whl", hash = "sha256:f79b168428351d11e10c5aeb61a74e1851ec221081299f4cf56036a95431c43a"},
{file = "fonttools-4.61.1-cp311-cp311-win_amd64.whl", hash = "sha256:fe2efccb324948a11dd09d22136fe2ac8a97d6c1347cf0b58a911dcd529f66b7"},
{file = "fonttools-4.61.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f3cb4a569029b9f291f88aafc927dd53683757e640081ca8c412781ea144565e"},
{file = "fonttools-4.61.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:41a7170d042e8c0024703ed13b71893519a1a6d6e18e933e3ec7507a2c26a4b2"},
{file = "fonttools-4.61.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:10d88e55330e092940584774ee5e8a6971b01fc2f4d3466a1d6c158230880796"},
{file = "fonttools-4.61.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:15acc09befd16a0fb8a8f62bc147e1a82817542d72184acca9ce6e0aeda9fa6d"},
{file = "fonttools-4.61.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e6bcdf33aec38d16508ce61fd81838f24c83c90a1d1b8c68982857038673d6b8"},
{file = "fonttools-4.61.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5fade934607a523614726119164ff621e8c30e8fa1ffffbbd358662056ba69f0"},
{file = "fonttools-4.61.1-cp312-cp312-win32.whl", hash = "sha256:75da8f28eff26defba42c52986de97b22106cb8f26515b7c22443ebc9c2d3261"},
{file = "fonttools-4.61.1-cp312-cp312-win_amd64.whl", hash = "sha256:497c31ce314219888c0e2fce5ad9178ca83fe5230b01a5006726cdf3ac9f24d9"},
{file = "fonttools-4.61.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c56c488ab471628ff3bfa80964372fc13504ece601e0d97a78ee74126b2045c"},
{file = "fonttools-4.61.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dc492779501fa723b04d0ab1f5be046797fee17d27700476edc7ee9ae535a61e"},
{file = "fonttools-4.61.1-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:64102ca87e84261419c3747a0d20f396eb024bdbeb04c2bfb37e2891f5fadcb5"},
{file = "fonttools-4.61.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c1b526c8d3f615a7b1867f38a9410849c8f4aef078535742198e942fba0e9bd"},
{file = "fonttools-4.61.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:41ed4b5ec103bd306bb68f81dc166e77409e5209443e5773cb4ed837bcc9b0d3"},
{file = "fonttools-4.61.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b501c862d4901792adaec7c25b1ecc749e2662543f68bb194c42ba18d6eec98d"},
{file = "fonttools-4.61.1-cp313-cp313-win32.whl", hash = "sha256:4d7092bb38c53bbc78e9255a59158b150bcdc115a1e3b3ce0b5f267dc35dd63c"},
{file = "fonttools-4.61.1-cp313-cp313-win_amd64.whl", hash = "sha256:21e7c8d76f62ab13c9472ccf74515ca5b9a761d1bde3265152a6dc58700d895b"},
{file = "fonttools-4.61.1-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:fff4f534200a04b4a36e7ae3cb74493afe807b517a09e99cb4faa89a34ed6ecd"},
{file = "fonttools-4.61.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:d9203500f7c63545b4ce3799319fe4d9feb1a1b89b28d3cb5abd11b9dd64147e"},
{file = "fonttools-4.61.1-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fa646ecec9528bef693415c79a86e733c70a4965dd938e9a226b0fc64c9d2e6c"},
{file = "fonttools-4.61.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:11f35ad7805edba3aac1a3710d104592df59f4b957e30108ae0ba6c10b11dd75"},
{file = "fonttools-4.61.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b931ae8f62db78861b0ff1ac017851764602288575d65b8e8ff1963fed419063"},
{file = "fonttools-4.61.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b148b56f5de675ee16d45e769e69f87623a4944f7443850bf9a9376e628a89d2"},
{file = "fonttools-4.61.1-cp314-cp314-win32.whl", hash = "sha256:9b666a475a65f4e839d3d10473fad6d47e0a9db14a2f4a224029c5bfde58ad2c"},
{file = "fonttools-4.61.1-cp314-cp314-win_amd64.whl", hash = "sha256:4f5686e1fe5fce75d82d93c47a438a25bf0d1319d2843a926f741140b2b16e0c"},
{file = "fonttools-4.61.1-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:e76ce097e3c57c4bcb67c5aa24a0ecdbd9f74ea9219997a707a4061fbe2707aa"},
{file = "fonttools-4.61.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:9cfef3ab326780c04d6646f68d4b4742aae222e8b8ea1d627c74e38afcbc9d91"},
{file = "fonttools-4.61.1-cp314-cp314t-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a75c301f96db737e1c5ed5fd7d77d9c34466de16095a266509e13da09751bd19"},
{file = "fonttools-4.61.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:91669ccac46bbc1d09e9273546181919064e8df73488ea087dcac3e2968df9ba"},
{file = "fonttools-4.61.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c33ab3ca9d3ccd581d58e989d67554e42d8d4ded94ab3ade3508455fe70e65f7"},
{file = "fonttools-4.61.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:664c5a68ec406f6b1547946683008576ef8b38275608e1cee6c061828171c118"},
{file = "fonttools-4.61.1-cp314-cp314t-win32.whl", hash = "sha256:aed04cabe26f30c1647ef0e8fbb207516fd40fe9472e9439695f5c6998e60ac5"},
{file = "fonttools-4.61.1-cp314-cp314t-win_amd64.whl", hash = "sha256:2180f14c141d2f0f3da43f3a81bc8aa4684860f6b0e6f9e165a4831f24e6a23b"},
{file = "fonttools-4.61.1-py3-none-any.whl", hash = "sha256:17d2bf5d541add43822bcf0c43d7d847b160c9bb01d15d5007d84e2217aaa371"},
{file = "fonttools-4.61.1.tar.gz", hash = "sha256:6675329885c44657f826ef01d9e4fb33b9158e9d93c537d84ad8399539bc6f69"},
]
[package.extras]
all = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\"", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.45.0)", "unicodedata2 (>=17.0.0) ; python_version <= \"3.14\"", "xattr ; sys_platform == \"darwin\"", "zopfli (>=0.1.4)"]
graphite = ["lz4 (>=1.7.4.2)"]
interpolatable = ["munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\""]
lxml = ["lxml (>=4.0)"]
pathops = ["skia-pathops (>=0.5.0)"]
plot = ["matplotlib"]
repacker = ["uharfbuzz (>=0.45.0)"]
symfont = ["sympy"]
type1 = ["xattr ; sys_platform == \"darwin\""]
unicode = ["unicodedata2 (>=17.0.0) ; python_version <= \"3.14\""]
woff = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "zopfli (>=0.1.4)"]
[[package]] [[package]]
name = "h11" name = "h11"
version = "0.16.0" version = "0.16.0"
@ -629,6 +889,117 @@ files = [
[package.dependencies] [package.dependencies]
referencing = ">=0.31.0" referencing = ">=0.31.0"
[[package]]
name = "kiwisolver"
version = "1.4.9"
description = "A fast implementation of the Cassowary constraint solver"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "kiwisolver-1.4.9-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b4b4d74bda2b8ebf4da5bd42af11d02d04428b2c32846e4c2c93219df8a7987b"},
{file = "kiwisolver-1.4.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fb3b8132019ea572f4611d770991000d7f58127560c4889729248eb5852a102f"},
{file = "kiwisolver-1.4.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:84fd60810829c27ae375114cd379da1fa65e6918e1da405f356a775d49a62bcf"},
{file = "kiwisolver-1.4.9-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b78efa4c6e804ecdf727e580dbb9cba85624d2e1c6b5cb059c66290063bd99a9"},
{file = "kiwisolver-1.4.9-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d4efec7bcf21671db6a3294ff301d2fc861c31faa3c8740d1a94689234d1b415"},
{file = "kiwisolver-1.4.9-cp310-cp310-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:90f47e70293fc3688b71271100a1a5453aa9944a81d27ff779c108372cf5567b"},
{file = "kiwisolver-1.4.9-cp310-cp310-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8fdca1def57a2e88ef339de1737a1449d6dbf5fab184c54a1fca01d541317154"},
{file = "kiwisolver-1.4.9-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9cf554f21be770f5111a1690d42313e140355e687e05cf82cb23d0a721a64a48"},
{file = "kiwisolver-1.4.9-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fc1795ac5cd0510207482c3d1d3ed781143383b8cfd36f5c645f3897ce066220"},
{file = "kiwisolver-1.4.9-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:ccd09f20ccdbbd341b21a67ab50a119b64a403b09288c27481575105283c1586"},
{file = "kiwisolver-1.4.9-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:540c7c72324d864406a009d72f5d6856f49693db95d1fbb46cf86febef873634"},
{file = "kiwisolver-1.4.9-cp310-cp310-win_amd64.whl", hash = "sha256:ede8c6d533bc6601a47ad4046080d36b8fc99f81e6f1c17b0ac3c2dc91ac7611"},
{file = "kiwisolver-1.4.9-cp310-cp310-win_arm64.whl", hash = "sha256:7b4da0d01ac866a57dd61ac258c5607b4cd677f63abaec7b148354d2b2cdd536"},
{file = "kiwisolver-1.4.9-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:eb14a5da6dc7642b0f3a18f13654847cd8b7a2550e2645a5bda677862b03ba16"},
{file = "kiwisolver-1.4.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:39a219e1c81ae3b103643d2aedb90f1ef22650deb266ff12a19e7773f3e5f089"},
{file = "kiwisolver-1.4.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2405a7d98604b87f3fc28b1716783534b1b4b8510d8142adca34ee0bc3c87543"},
{file = "kiwisolver-1.4.9-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dc1ae486f9abcef254b5618dfb4113dd49f94c68e3e027d03cf0143f3f772b61"},
{file = "kiwisolver-1.4.9-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8a1f570ce4d62d718dce3f179ee78dac3b545ac16c0c04bb363b7607a949c0d1"},
{file = "kiwisolver-1.4.9-cp311-cp311-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cb27e7b78d716c591e88e0a09a2139c6577865d7f2e152488c2cc6257f460872"},
{file = "kiwisolver-1.4.9-cp311-cp311-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:15163165efc2f627eb9687ea5f3a28137217d217ac4024893d753f46bce9de26"},
{file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bdee92c56a71d2b24c33a7d4c2856bd6419d017e08caa7802d2963870e315028"},
{file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:412f287c55a6f54b0650bd9b6dce5aceddb95864a1a90c87af16979d37c89771"},
{file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2c93f00dcba2eea70af2be5f11a830a742fe6b579a1d4e00f47760ef13be247a"},
{file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f117e1a089d9411663a3207ba874f31be9ac8eaa5b533787024dc07aeb74f464"},
{file = "kiwisolver-1.4.9-cp311-cp311-win_amd64.whl", hash = "sha256:be6a04e6c79819c9a8c2373317d19a96048e5a3f90bec587787e86a1153883c2"},
{file = "kiwisolver-1.4.9-cp311-cp311-win_arm64.whl", hash = "sha256:0ae37737256ba2de764ddc12aed4956460277f00c4996d51a197e72f62f5eec7"},
{file = "kiwisolver-1.4.9-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ac5a486ac389dddcc5bef4f365b6ae3ffff2c433324fb38dd35e3fab7c957999"},
{file = "kiwisolver-1.4.9-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f2ba92255faa7309d06fe44c3a4a97efe1c8d640c2a79a5ef728b685762a6fd2"},
{file = "kiwisolver-1.4.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a2899935e724dd1074cb568ce7ac0dce28b2cd6ab539c8e001a8578eb106d14"},
{file = "kiwisolver-1.4.9-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f6008a4919fdbc0b0097089f67a1eb55d950ed7e90ce2cc3e640abadd2757a04"},
{file = "kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:67bb8b474b4181770f926f7b7d2f8c0248cbcb78b660fdd41a47054b28d2a752"},
{file = "kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2327a4a30d3ee07d2fbe2e7933e8a37c591663b96ce42a00bc67461a87d7df77"},
{file = "kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7a08b491ec91b1d5053ac177afe5290adacf1f0f6307d771ccac5de30592d198"},
{file = "kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8fc5c867c22b828001b6a38d2eaeb88160bf5783c6cb4a5e440efc981ce286d"},
{file = "kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:3b3115b2581ea35bb6d1f24a4c90af37e5d9b49dcff267eeed14c3893c5b86ab"},
{file = "kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:858e4c22fb075920b96a291928cb7dea5644e94c0ee4fcd5af7e865655e4ccf2"},
{file = "kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ed0fecd28cc62c54b262e3736f8bb2512d8dcfdc2bcf08be5f47f96bf405b145"},
{file = "kiwisolver-1.4.9-cp312-cp312-win_amd64.whl", hash = "sha256:f68208a520c3d86ea51acf688a3e3002615a7f0238002cccc17affecc86a8a54"},
{file = "kiwisolver-1.4.9-cp312-cp312-win_arm64.whl", hash = "sha256:2c1a4f57df73965f3f14df20b80ee29e6a7930a57d2d9e8491a25f676e197c60"},
{file = "kiwisolver-1.4.9-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a5d0432ccf1c7ab14f9949eec60c5d1f924f17c037e9f8b33352fa05799359b8"},
{file = "kiwisolver-1.4.9-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efb3a45b35622bb6c16dbfab491a8f5a391fe0e9d45ef32f4df85658232ca0e2"},
{file = "kiwisolver-1.4.9-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1a12cf6398e8a0a001a059747a1cbf24705e18fe413bc22de7b3d15c67cffe3f"},
{file = "kiwisolver-1.4.9-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b67e6efbf68e077dd71d1a6b37e43e1a99d0bff1a3d51867d45ee8908b931098"},
{file = "kiwisolver-1.4.9-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5656aa670507437af0207645273ccdfee4f14bacd7f7c67a4306d0dcaeaf6eed"},
{file = "kiwisolver-1.4.9-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:bfc08add558155345129c7803b3671cf195e6a56e7a12f3dde7c57d9b417f525"},
{file = "kiwisolver-1.4.9-cp313-cp313-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:40092754720b174e6ccf9e845d0d8c7d8e12c3d71e7fc35f55f3813e96376f78"},
{file = "kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:497d05f29a1300d14e02e6441cf0f5ee81c1ff5a304b0d9fb77423974684e08b"},
{file = "kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:bdd1a81a1860476eb41ac4bc1e07b3f07259e6d55bbf739b79c8aaedcf512799"},
{file = "kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e6b93f13371d341afee3be9f7c5964e3fe61d5fa30f6a30eb49856935dfe4fc3"},
{file = "kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d75aa530ccfaa593da12834b86a0724f58bff12706659baa9227c2ccaa06264c"},
{file = "kiwisolver-1.4.9-cp313-cp313-win_amd64.whl", hash = "sha256:dd0a578400839256df88c16abddf9ba14813ec5f21362e1fe65022e00c883d4d"},
{file = "kiwisolver-1.4.9-cp313-cp313-win_arm64.whl", hash = "sha256:d4188e73af84ca82468f09cadc5ac4db578109e52acb4518d8154698d3a87ca2"},
{file = "kiwisolver-1.4.9-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:5a0f2724dfd4e3b3ac5a82436a8e6fd16baa7d507117e4279b660fe8ca38a3a1"},
{file = "kiwisolver-1.4.9-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:1b11d6a633e4ed84fc0ddafd4ebfd8ea49b3f25082c04ad12b8315c11d504dc1"},
{file = "kiwisolver-1.4.9-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61874cdb0a36016354853593cffc38e56fc9ca5aa97d2c05d3dcf6922cd55a11"},
{file = "kiwisolver-1.4.9-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:60c439763a969a6af93b4881db0eed8fadf93ee98e18cbc35bc8da868d0c4f0c"},
{file = "kiwisolver-1.4.9-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:92a2f997387a1b79a75e7803aa7ded2cfbe2823852ccf1ba3bcf613b62ae3197"},
{file = "kiwisolver-1.4.9-cp313-cp313t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a31d512c812daea6d8b3be3b2bfcbeb091dbb09177706569bcfc6240dcf8b41c"},
{file = "kiwisolver-1.4.9-cp313-cp313t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:52a15b0f35dad39862d376df10c5230155243a2c1a436e39eb55623ccbd68185"},
{file = "kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a30fd6fdef1430fd9e1ba7b3398b5ee4e2887783917a687d86ba69985fb08748"},
{file = "kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:cc9617b46837c6468197b5945e196ee9ca43057bb7d9d1ae688101e4e1dddf64"},
{file = "kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:0ab74e19f6a2b027ea4f845a78827969af45ce790e6cb3e1ebab71bdf9f215ff"},
{file = "kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dba5ee5d3981160c28d5490f0d1b7ed730c22470ff7f6cc26cfcfaacb9896a07"},
{file = "kiwisolver-1.4.9-cp313-cp313t-win_arm64.whl", hash = "sha256:0749fd8f4218ad2e851e11cc4dc05c7cbc0cbc4267bdfdb31782e65aace4ee9c"},
{file = "kiwisolver-1.4.9-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:9928fe1eb816d11ae170885a74d074f57af3a0d65777ca47e9aeb854a1fba386"},
{file = "kiwisolver-1.4.9-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d0005b053977e7b43388ddec89fa567f43d4f6d5c2c0affe57de5ebf290dc552"},
{file = "kiwisolver-1.4.9-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:2635d352d67458b66fd0667c14cb1d4145e9560d503219034a18a87e971ce4f3"},
{file = "kiwisolver-1.4.9-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:767c23ad1c58c9e827b649a9ab7809fd5fd9db266a9cf02b0e926ddc2c680d58"},
{file = "kiwisolver-1.4.9-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:72d0eb9fba308b8311685c2268cf7d0a0639a6cd027d8128659f72bdd8a024b4"},
{file = "kiwisolver-1.4.9-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f68e4f3eeca8fb22cc3d731f9715a13b652795ef657a13df1ad0c7dc0e9731df"},
{file = "kiwisolver-1.4.9-cp314-cp314-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d84cd4061ae292d8ac367b2c3fa3aad11cb8625a95d135fe93f286f914f3f5a6"},
{file = "kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:a60ea74330b91bd22a29638940d115df9dc00af5035a9a2a6ad9399ffb4ceca5"},
{file = "kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:ce6a3a4e106cf35c2d9c4fa17c05ce0b180db622736845d4315519397a77beaf"},
{file = "kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:77937e5e2a38a7b48eef0585114fe7930346993a88060d0bf886086d2aa49ef5"},
{file = "kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:24c175051354f4a28c5d6a31c93906dc653e2bf234e8a4bbfb964892078898ce"},
{file = "kiwisolver-1.4.9-cp314-cp314-win_amd64.whl", hash = "sha256:0763515d4df10edf6d06a3c19734e2566368980d21ebec439f33f9eb936c07b7"},
{file = "kiwisolver-1.4.9-cp314-cp314-win_arm64.whl", hash = "sha256:0e4e2bf29574a6a7b7f6cb5fa69293b9f96c928949ac4a53ba3f525dffb87f9c"},
{file = "kiwisolver-1.4.9-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:d976bbb382b202f71c67f77b0ac11244021cfa3f7dfd9e562eefcea2df711548"},
{file = "kiwisolver-1.4.9-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2489e4e5d7ef9a1c300a5e0196e43d9c739f066ef23270607d45aba368b91f2d"},
{file = "kiwisolver-1.4.9-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e2ea9f7ab7fbf18fffb1b5434ce7c69a07582f7acc7717720f1d69f3e806f90c"},
{file = "kiwisolver-1.4.9-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b34e51affded8faee0dfdb705416153819d8ea9250bbbf7ea1b249bdeb5f1122"},
{file = "kiwisolver-1.4.9-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d8aacd3d4b33b772542b2e01beb50187536967b514b00003bdda7589722d2a64"},
{file = "kiwisolver-1.4.9-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7cf974dd4e35fa315563ac99d6287a1024e4dc2077b8a7d7cd3d2fb65d283134"},
{file = "kiwisolver-1.4.9-cp314-cp314t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:85bd218b5ecfbee8c8a82e121802dcb519a86044c9c3b2e4aef02fa05c6da370"},
{file = "kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0856e241c2d3df4efef7c04a1e46b1936b6120c9bcf36dd216e3acd84bc4fb21"},
{file = "kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9af39d6551f97d31a4deebeac6f45b156f9755ddc59c07b402c148f5dbb6482a"},
{file = "kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:bb4ae2b57fc1d8cbd1cf7b1d9913803681ffa903e7488012be5b76dedf49297f"},
{file = "kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:aedff62918805fb62d43a4aa2ecd4482c380dc76cd31bd7c8878588a61bd0369"},
{file = "kiwisolver-1.4.9-cp314-cp314t-win_amd64.whl", hash = "sha256:1fa333e8b2ce4d9660f2cda9c0e1b6bafcfb2457a9d259faa82289e73ec24891"},
{file = "kiwisolver-1.4.9-cp314-cp314t-win_arm64.whl", hash = "sha256:4a48a2ce79d65d363597ef7b567ce3d14d68783d2b2263d98db3d9477805ba32"},
{file = "kiwisolver-1.4.9-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4d1d9e582ad4d63062d34077a9a1e9f3c34088a2ec5135b1f7190c07cf366527"},
{file = "kiwisolver-1.4.9-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:deed0c7258ceb4c44ad5ec7d9918f9f14fd05b2be86378d86cf50e63d1e7b771"},
{file = "kiwisolver-1.4.9-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0a590506f303f512dff6b7f75fd2fd18e16943efee932008fe7140e5fa91d80e"},
{file = "kiwisolver-1.4.9-pp310-pypy310_pp73-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e09c2279a4d01f099f52d5c4b3d9e208e91edcbd1a175c9662a8b16e000fece9"},
{file = "kiwisolver-1.4.9-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c9e7cdf45d594ee04d5be1b24dd9d49f3d1590959b2271fb30b5ca2b262c00fb"},
{file = "kiwisolver-1.4.9-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:720e05574713db64c356e86732c0f3c5252818d05f9df320f0ad8380641acea5"},
{file = "kiwisolver-1.4.9-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:17680d737d5335b552994a2008fab4c851bcd7de33094a82067ef3a576ff02fa"},
{file = "kiwisolver-1.4.9-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:85b5352f94e490c028926ea567fc569c52ec79ce131dadb968d3853e809518c2"},
{file = "kiwisolver-1.4.9-pp311-pypy311_pp73-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:464415881e4801295659462c49461a24fb107c140de781d55518c4b80cb6790f"},
{file = "kiwisolver-1.4.9-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:fb940820c63a9590d31d88b815e7a3aa5915cad3ce735ab45f0c730b39547de1"},
{file = "kiwisolver-1.4.9.tar.gz", hash = "sha256:c3b22c26c6fd6811b0ae8363b95ca8ce4ea3c202d3d0975b2914310ceb1bcc4d"},
]
[[package]] [[package]]
name = "markupsafe" name = "markupsafe"
version = "3.0.2" version = "3.0.2"
@ -700,6 +1071,85 @@ files = [
{file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"},
] ]
[[package]]
name = "matplotlib"
version = "3.10.8"
description = "Python plotting package"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "matplotlib-3.10.8-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:00270d217d6b20d14b584c521f810d60c5c78406dc289859776550df837dcda7"},
{file = "matplotlib-3.10.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37b3c1cc42aa184b3f738cfa18c1c1d72fd496d85467a6cf7b807936d39aa656"},
{file = "matplotlib-3.10.8-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ee40c27c795bda6a5292e9cff9890189d32f7e3a0bf04e0e3c9430c4a00c37df"},
{file = "matplotlib-3.10.8-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a48f2b74020919552ea25d222d5cc6af9ca3f4eb43a93e14d068457f545c2a17"},
{file = "matplotlib-3.10.8-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f254d118d14a7f99d616271d6c3c27922c092dac11112670b157798b89bf4933"},
{file = "matplotlib-3.10.8-cp310-cp310-win_amd64.whl", hash = "sha256:f9b587c9c7274c1613a30afabf65a272114cd6cdbe67b3406f818c79d7ab2e2a"},
{file = "matplotlib-3.10.8-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6be43b667360fef5c754dda5d25a32e6307a03c204f3c0fc5468b78fa87b4160"},
{file = "matplotlib-3.10.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2b336e2d91a3d7006864e0990c83b216fcdca64b5a6484912902cef87313d78"},
{file = "matplotlib-3.10.8-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:efb30e3baaea72ce5928e32bab719ab4770099079d66726a62b11b1ef7273be4"},
{file = "matplotlib-3.10.8-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d56a1efd5bfd61486c8bc968fa18734464556f0fb8e51690f4ac25d85cbbbbc2"},
{file = "matplotlib-3.10.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:238b7ce5717600615c895050239ec955d91f321c209dd110db988500558e70d6"},
{file = "matplotlib-3.10.8-cp311-cp311-win_amd64.whl", hash = "sha256:18821ace09c763ec93aef5eeff087ee493a24051936d7b9ebcad9662f66501f9"},
{file = "matplotlib-3.10.8-cp311-cp311-win_arm64.whl", hash = "sha256:bab485bcf8b1c7d2060b4fcb6fc368a9e6f4cd754c9c2fea281f4be21df394a2"},
{file = "matplotlib-3.10.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:64fcc24778ca0404ce0cb7b6b77ae1f4c7231cdd60e6778f999ee05cbd581b9a"},
{file = "matplotlib-3.10.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b9a5ca4ac220a0cdd1ba6bcba3608547117d30468fefce49bb26f55c1a3d5c58"},
{file = "matplotlib-3.10.8-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3ab4aabc72de4ff77b3ec33a6d78a68227bf1123465887f9905ba79184a1cc04"},
{file = "matplotlib-3.10.8-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:24d50994d8c5816ddc35411e50a86ab05f575e2530c02752e02538122613371f"},
{file = "matplotlib-3.10.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:99eefd13c0dc3b3c1b4d561c1169e65fe47aab7b8158754d7c084088e2329466"},
{file = "matplotlib-3.10.8-cp312-cp312-win_amd64.whl", hash = "sha256:dd80ecb295460a5d9d260df63c43f4afbdd832d725a531f008dad1664f458adf"},
{file = "matplotlib-3.10.8-cp312-cp312-win_arm64.whl", hash = "sha256:3c624e43ed56313651bc18a47f838b60d7b8032ed348911c54906b130b20071b"},
{file = "matplotlib-3.10.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3f2e409836d7f5ac2f1c013110a4d50b9f7edc26328c108915f9075d7d7a91b6"},
{file = "matplotlib-3.10.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:56271f3dac49a88d7fca5060f004d9d22b865f743a12a23b1e937a0be4818ee1"},
{file = "matplotlib-3.10.8-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a0a7f52498f72f13d4a25ea70f35f4cb60642b466cbb0a9be951b5bc3f45a486"},
{file = "matplotlib-3.10.8-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:646d95230efb9ca614a7a594d4fcacde0ac61d25e37dd51710b36477594963ce"},
{file = "matplotlib-3.10.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f89c151aab2e2e23cb3fe0acad1e8b82841fd265379c4cecd0f3fcb34c15e0f6"},
{file = "matplotlib-3.10.8-cp313-cp313-win_amd64.whl", hash = "sha256:e8ea3e2d4066083e264e75c829078f9e149fa119d27e19acd503de65e0b13149"},
{file = "matplotlib-3.10.8-cp313-cp313-win_arm64.whl", hash = "sha256:c108a1d6fa78a50646029cb6d49808ff0fc1330fda87fa6f6250c6b5369b6645"},
{file = "matplotlib-3.10.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:ad3d9833a64cf48cc4300f2b406c3d0f4f4724a91c0bd5640678a6ba7c102077"},
{file = "matplotlib-3.10.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:eb3823f11823deade26ce3b9f40dcb4a213da7a670013929f31d5f5ed1055b22"},
{file = "matplotlib-3.10.8-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d9050fee89a89ed57b4fb2c1bfac9a3d0c57a0d55aed95949eedbc42070fea39"},
{file = "matplotlib-3.10.8-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b44d07310e404ba95f8c25aa5536f154c0a8ec473303535949e52eb71d0a1565"},
{file = "matplotlib-3.10.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:0a33deb84c15ede243aead39f77e990469fff93ad1521163305095b77b72ce4a"},
{file = "matplotlib-3.10.8-cp313-cp313t-win_amd64.whl", hash = "sha256:3a48a78d2786784cc2413e57397981fb45c79e968d99656706018d6e62e57958"},
{file = "matplotlib-3.10.8-cp313-cp313t-win_arm64.whl", hash = "sha256:15d30132718972c2c074cd14638c7f4592bd98719e2308bccea40e0538bc0cb5"},
{file = "matplotlib-3.10.8-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b53285e65d4fa4c86399979e956235deb900be5baa7fc1218ea67fbfaeaadd6f"},
{file = "matplotlib-3.10.8-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:32f8dce744be5569bebe789e46727946041199030db8aeb2954d26013a0eb26b"},
{file = "matplotlib-3.10.8-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4cf267add95b1c88300d96ca837833d4112756045364f5c734a2276038dae27d"},
{file = "matplotlib-3.10.8-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2cf5bd12cecf46908f286d7838b2abc6c91cda506c0445b8223a7c19a00df008"},
{file = "matplotlib-3.10.8-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:41703cc95688f2516b480f7f339d8851a6035f18e100ee6a32bc0b8536a12a9c"},
{file = "matplotlib-3.10.8-cp314-cp314-win_amd64.whl", hash = "sha256:83d282364ea9f3e52363da262ce32a09dfe241e4080dcedda3c0db059d3c1f11"},
{file = "matplotlib-3.10.8-cp314-cp314-win_arm64.whl", hash = "sha256:2c1998e92cd5999e295a731bcb2911c75f597d937341f3030cc24ef2733d78a8"},
{file = "matplotlib-3.10.8-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:b5a2b97dbdc7d4f353ebf343744f1d1f1cca8aa8bfddb4262fcf4306c3761d50"},
{file = "matplotlib-3.10.8-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:3f5c3e4da343bba819f0234186b9004faba952cc420fbc522dc4e103c1985908"},
{file = "matplotlib-3.10.8-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f62550b9a30afde8c1c3ae450e5eb547d579dd69b25c2fc7a1c67f934c1717a"},
{file = "matplotlib-3.10.8-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:495672de149445ec1b772ff2c9ede9b769e3cb4f0d0aa7fa730d7f59e2d4e1c1"},
{file = "matplotlib-3.10.8-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:595ba4d8fe983b88f0eec8c26a241e16d6376fe1979086232f481f8f3f67494c"},
{file = "matplotlib-3.10.8-cp314-cp314t-win_amd64.whl", hash = "sha256:25d380fe8b1dc32cf8f0b1b448470a77afb195438bafdf1d858bfb876f3edf7b"},
{file = "matplotlib-3.10.8-cp314-cp314t-win_arm64.whl", hash = "sha256:113bb52413ea508ce954a02c10ffd0d565f9c3bc7f2eddc27dfe1731e71c7b5f"},
{file = "matplotlib-3.10.8-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:f97aeb209c3d2511443f8797e3e5a569aebb040d4f8bc79aa3ee78a8fb9e3dd8"},
{file = "matplotlib-3.10.8-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fb061f596dad3a0f52b60dc6a5dec4a0c300dec41e058a7efe09256188d170b7"},
{file = "matplotlib-3.10.8-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:12d90df9183093fcd479f4172ac26b322b1248b15729cb57f42f71f24c7e37a3"},
{file = "matplotlib-3.10.8-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:6da7c2ce169267d0d066adcf63758f0604aa6c3eebf67458930f9d9b79ad1db1"},
{file = "matplotlib-3.10.8-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9153c3292705be9f9c64498a8872118540c3f4123d1a1c840172edf262c8be4a"},
{file = "matplotlib-3.10.8-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1ae029229a57cd1e8fe542485f27e7ca7b23aa9e8944ddb4985d0bc444f1eca2"},
{file = "matplotlib-3.10.8.tar.gz", hash = "sha256:2299372c19d56bcd35cf05a2738308758d32b9eaed2371898d8f5bd33f084aa3"},
]
[package.dependencies]
contourpy = ">=1.0.1"
cycler = ">=0.10"
fonttools = ">=4.22.0"
kiwisolver = ">=1.3.1"
numpy = ">=1.23"
packaging = ">=20.0"
pillow = ">=8"
pyparsing = ">=3"
python-dateutil = ">=2.7"
[package.extras]
dev = ["meson-python (>=0.13.1,<0.17.0)", "pybind11 (>=2.13.2,!=2.13.3)", "setuptools (>=64)", "setuptools_scm (>=7)"]
[[package]] [[package]]
name = "mccabe" name = "mccabe"
version = "0.7.0" version = "0.7.0"
@ -906,6 +1356,115 @@ files = [
{file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"},
] ]
[[package]]
name = "pillow"
version = "12.0.0"
description = "Python Imaging Library (fork)"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "pillow-12.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:3adfb466bbc544b926d50fe8f4a4e6abd8c6bffd28a26177594e6e9b2b76572b"},
{file = "pillow-12.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1ac11e8ea4f611c3c0147424eae514028b5e9077dd99ab91e1bd7bc33ff145e1"},
{file = "pillow-12.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d49e2314c373f4c2b39446fb1a45ed333c850e09d0c59ac79b72eb3b95397363"},
{file = "pillow-12.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c7b2a63fd6d5246349f3d3f37b14430d73ee7e8173154461785e43036ffa96ca"},
{file = "pillow-12.0.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d64317d2587c70324b79861babb9c09f71fbb780bad212018874b2c013d8600e"},
{file = "pillow-12.0.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d77153e14b709fd8b8af6f66a3afbb9ed6e9fc5ccf0b6b7e1ced7b036a228782"},
{file = "pillow-12.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:32ed80ea8a90ee3e6fa08c21e2e091bba6eda8eccc83dbc34c95169507a91f10"},
{file = "pillow-12.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c828a1ae702fc712978bda0320ba1b9893d99be0badf2647f693cc01cf0f04fa"},
{file = "pillow-12.0.0-cp310-cp310-win32.whl", hash = "sha256:bd87e140e45399c818fac4247880b9ce719e4783d767e030a883a970be632275"},
{file = "pillow-12.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:455247ac8a4cfb7b9bc45b7e432d10421aea9fc2e74d285ba4072688a74c2e9d"},
{file = "pillow-12.0.0-cp310-cp310-win_arm64.whl", hash = "sha256:6ace95230bfb7cd79ef66caa064bbe2f2a1e63d93471c3a2e1f1348d9f22d6b7"},
{file = "pillow-12.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0fd00cac9c03256c8b2ff58f162ebcd2587ad3e1f2e397eab718c47e24d231cc"},
{file = "pillow-12.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a3475b96f5908b3b16c47533daaa87380c491357d197564e0ba34ae75c0f3257"},
{file = "pillow-12.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:110486b79f2d112cf6add83b28b627e369219388f64ef2f960fef9ebaf54c642"},
{file = "pillow-12.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5269cc1caeedb67e6f7269a42014f381f45e2e7cd42d834ede3c703a1d915fe3"},
{file = "pillow-12.0.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:aa5129de4e174daccbc59d0a3b6d20eaf24417d59851c07ebb37aeb02947987c"},
{file = "pillow-12.0.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bee2a6db3a7242ea309aa7ee8e2780726fed67ff4e5b40169f2c940e7eb09227"},
{file = "pillow-12.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:90387104ee8400a7b4598253b4c406f8958f59fcf983a6cea2b50d59f7d63d0b"},
{file = "pillow-12.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bc91a56697869546d1b8f0a3ff35224557ae7f881050e99f615e0119bf934b4e"},
{file = "pillow-12.0.0-cp311-cp311-win32.whl", hash = "sha256:27f95b12453d165099c84f8a8bfdfd46b9e4bda9e0e4b65f0635430027f55739"},
{file = "pillow-12.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:b583dc9070312190192631373c6c8ed277254aa6e6084b74bdd0a6d3b221608e"},
{file = "pillow-12.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:759de84a33be3b178a64c8ba28ad5c135900359e85fb662bc6e403ad4407791d"},
{file = "pillow-12.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:53561a4ddc36facb432fae7a9d8afbfaf94795414f5cdc5fc52f28c1dca90371"},
{file = "pillow-12.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:71db6b4c1653045dacc1585c1b0d184004f0d7e694c7b34ac165ca70c0838082"},
{file = "pillow-12.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2fa5f0b6716fc88f11380b88b31fe591a06c6315e955c096c35715788b339e3f"},
{file = "pillow-12.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:82240051c6ca513c616f7f9da06e871f61bfd7805f566275841af15015b8f98d"},
{file = "pillow-12.0.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:55f818bd74fe2f11d4d7cbc65880a843c4075e0ac7226bc1a23261dbea531953"},
{file = "pillow-12.0.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b87843e225e74576437fd5b6a4c2205d422754f84a06942cfaf1dc32243e45a8"},
{file = "pillow-12.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c607c90ba67533e1b2355b821fef6764d1dd2cbe26b8c1005ae84f7aea25ff79"},
{file = "pillow-12.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:21f241bdd5080a15bc86d3466a9f6074a9c2c2b314100dd896ac81ee6db2f1ba"},
{file = "pillow-12.0.0-cp312-cp312-win32.whl", hash = "sha256:dd333073e0cacdc3089525c7df7d39b211bcdf31fc2824e49d01c6b6187b07d0"},
{file = "pillow-12.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9fe611163f6303d1619bbcb653540a4d60f9e55e622d60a3108be0d5b441017a"},
{file = "pillow-12.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:7dfb439562f234f7d57b1ac6bc8fe7f838a4bd49c79230e0f6a1da93e82f1fad"},
{file = "pillow-12.0.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:0869154a2d0546545cde61d1789a6524319fc1897d9ee31218eae7a60ccc5643"},
{file = "pillow-12.0.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:a7921c5a6d31b3d756ec980f2f47c0cfdbce0fc48c22a39347a895f41f4a6ea4"},
{file = "pillow-12.0.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:1ee80a59f6ce048ae13cda1abf7fbd2a34ab9ee7d401c46be3ca685d1999a399"},
{file = "pillow-12.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c50f36a62a22d350c96e49ad02d0da41dbd17ddc2e29750dbdba4323f85eb4a5"},
{file = "pillow-12.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5193fde9a5f23c331ea26d0cf171fbf67e3f247585f50c08b3e205c7aeb4589b"},
{file = "pillow-12.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bde737cff1a975b70652b62d626f7785e0480918dece11e8fef3c0cf057351c3"},
{file = "pillow-12.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a6597ff2b61d121172f5844b53f21467f7082f5fb385a9a29c01414463f93b07"},
{file = "pillow-12.0.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0b817e7035ea7f6b942c13aa03bb554fc44fea70838ea21f8eb31c638326584e"},
{file = "pillow-12.0.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f4f1231b7dec408e8670264ce63e9c71409d9583dd21d32c163e25213ee2a344"},
{file = "pillow-12.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e51b71417049ad6ab14c49608b4a24d8fb3fe605e5dfabfe523b58064dc3d27"},
{file = "pillow-12.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d120c38a42c234dc9a8c5de7ceaaf899cf33561956acb4941653f8bdc657aa79"},
{file = "pillow-12.0.0-cp313-cp313-win32.whl", hash = "sha256:4cc6b3b2efff105c6a1656cfe59da4fdde2cda9af1c5e0b58529b24525d0a098"},
{file = "pillow-12.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:4cf7fed4b4580601c4345ceb5d4cbf5a980d030fd5ad07c4d2ec589f95f09905"},
{file = "pillow-12.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:9f0b04c6b8584c2c193babcccc908b38ed29524b29dd464bc8801bf10d746a3a"},
{file = "pillow-12.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:7fa22993bac7b77b78cae22bad1e2a987ddf0d9015c63358032f84a53f23cdc3"},
{file = "pillow-12.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f135c702ac42262573fe9714dfe99c944b4ba307af5eb507abef1667e2cbbced"},
{file = "pillow-12.0.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c85de1136429c524e55cfa4e033b4a7940ac5c8ee4d9401cc2d1bf48154bbc7b"},
{file = "pillow-12.0.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:38df9b4bfd3db902c9c2bd369bcacaf9d935b2fff73709429d95cc41554f7b3d"},
{file = "pillow-12.0.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7d87ef5795da03d742bf49439f9ca4d027cde49c82c5371ba52464aee266699a"},
{file = "pillow-12.0.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aff9e4d82d082ff9513bdd6acd4f5bd359f5b2c870907d2b0a9c5e10d40c88fe"},
{file = "pillow-12.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8d8ca2b210ada074d57fcee40c30446c9562e542fc46aedc19baf758a93532ee"},
{file = "pillow-12.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:99a7f72fb6249302aa62245680754862a44179b545ded638cf1fef59befb57ef"},
{file = "pillow-12.0.0-cp313-cp313t-win32.whl", hash = "sha256:4078242472387600b2ce8d93ade8899c12bf33fa89e55ec89fe126e9d6d5d9e9"},
{file = "pillow-12.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2c54c1a783d6d60595d3514f0efe9b37c8808746a66920315bfd34a938d7994b"},
{file = "pillow-12.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:26d9f7d2b604cd23aba3e9faf795787456ac25634d82cd060556998e39c6fa47"},
{file = "pillow-12.0.0-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:beeae3f27f62308f1ddbcfb0690bf44b10732f2ef43758f169d5e9303165d3f9"},
{file = "pillow-12.0.0-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:d4827615da15cd59784ce39d3388275ec093ae3ee8d7f0c089b76fa87af756c2"},
{file = "pillow-12.0.0-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:3e42edad50b6909089750e65c91aa09aaf1e0a71310d383f11321b27c224ed8a"},
{file = "pillow-12.0.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:e5d8efac84c9afcb40914ab49ba063d94f5dbdf5066db4482c66a992f47a3a3b"},
{file = "pillow-12.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:266cd5f2b63ff316d5a1bba46268e603c9caf5606d44f38c2873c380950576ad"},
{file = "pillow-12.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:58eea5ebe51504057dd95c5b77d21700b77615ab0243d8152793dc00eb4faf01"},
{file = "pillow-12.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f13711b1a5ba512d647a0e4ba79280d3a9a045aaf7e0cc6fbe96b91d4cdf6b0c"},
{file = "pillow-12.0.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6846bd2d116ff42cba6b646edf5bf61d37e5cbd256425fa089fee4ff5c07a99e"},
{file = "pillow-12.0.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c98fa880d695de164b4135a52fd2e9cd7b7c90a9d8ac5e9e443a24a95ef9248e"},
{file = "pillow-12.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fa3ed2a29a9e9d2d488b4da81dcb54720ac3104a20bf0bd273f1e4648aff5af9"},
{file = "pillow-12.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d034140032870024e6b9892c692fe2968493790dd57208b2c37e3fb35f6df3ab"},
{file = "pillow-12.0.0-cp314-cp314-win32.whl", hash = "sha256:1b1b133e6e16105f524a8dec491e0586d072948ce15c9b914e41cdadd209052b"},
{file = "pillow-12.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:8dc232e39d409036af549c86f24aed8273a40ffa459981146829a324e0848b4b"},
{file = "pillow-12.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:d52610d51e265a51518692045e372a4c363056130d922a7351429ac9f27e70b0"},
{file = "pillow-12.0.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1979f4566bb96c1e50a62d9831e2ea2d1211761e5662afc545fa766f996632f6"},
{file = "pillow-12.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b2e4b27a6e15b04832fe9bf292b94b5ca156016bbc1ea9c2c20098a0320d6cf6"},
{file = "pillow-12.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fb3096c30df99fd01c7bf8e544f392103d0795b9f98ba71a8054bcbf56b255f1"},
{file = "pillow-12.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7438839e9e053ef79f7112c881cef684013855016f928b168b81ed5835f3e75e"},
{file = "pillow-12.0.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d5c411a8eaa2299322b647cd932586b1427367fd3184ffbb8f7a219ea2041ca"},
{file = "pillow-12.0.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d7e091d464ac59d2c7ad8e7e08105eaf9dafbc3883fd7265ffccc2baad6ac925"},
{file = "pillow-12.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:792a2c0be4dcc18af9d4a2dfd8a11a17d5e25274a1062b0ec1c2d79c76f3e7f8"},
{file = "pillow-12.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:afbefa430092f71a9593a99ab6a4e7538bc9eabbf7bf94f91510d3503943edc4"},
{file = "pillow-12.0.0-cp314-cp314t-win32.whl", hash = "sha256:3830c769decf88f1289680a59d4f4c46c72573446352e2befec9a8512104fa52"},
{file = "pillow-12.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:905b0365b210c73afb0ebe9101a32572152dfd1c144c7e28968a331b9217b94a"},
{file = "pillow-12.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:99353a06902c2e43b43e8ff74ee65a7d90307d82370604746738a1e0661ccca7"},
{file = "pillow-12.0.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b22bd8c974942477156be55a768f7aa37c46904c175be4e158b6a86e3a6b7ca8"},
{file = "pillow-12.0.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:805ebf596939e48dbb2e4922a1d3852cfc25c38160751ce02da93058b48d252a"},
{file = "pillow-12.0.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cae81479f77420d217def5f54b5b9d279804d17e982e0f2fa19b1d1e14ab5197"},
{file = "pillow-12.0.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aeaefa96c768fc66818730b952a862235d68825c178f1b3ffd4efd7ad2edcb7c"},
{file = "pillow-12.0.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:09f2d0abef9e4e2f349305a4f8cc784a8a6c2f58a8c4892eea13b10a943bd26e"},
{file = "pillow-12.0.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bdee52571a343d721fb2eb3b090a82d959ff37fc631e3f70422e0c2e029f3e76"},
{file = "pillow-12.0.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:b290fd8aa38422444d4b50d579de197557f182ef1068b75f5aa8558638b8d0a5"},
{file = "pillow-12.0.0.tar.gz", hash = "sha256:87d4f8125c9988bfbed67af47dd7a953e2fc7b0cc1e7800ec6d2080d490bb353"},
]
[package.extras]
docs = ["furo", "olefile", "sphinx (>=8.2)", "sphinx-autobuild", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"]
fpx = ["olefile"]
mic = ["olefile"]
test-arrow = ["arro3-compute", "arro3-core", "nanoarrow", "pyarrow"]
tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma (>=5)", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "trove-classifiers (>=2024.10.12)"]
xmp = ["defusedxml"]
[[package]] [[package]]
name = "platformdirs" name = "platformdirs"
version = "4.4.0" version = "4.4.0"
@ -1045,6 +1604,21 @@ tomlkit = ">=0.10.1"
spelling = ["pyenchant (>=3.2,<4.0)"] spelling = ["pyenchant (>=3.2,<4.0)"]
testutils = ["gitpython (>3)"] testutils = ["gitpython (>3)"]
[[package]]
name = "pyparsing"
version = "3.2.5"
description = "pyparsing - Classes and methods to define and execute parsing grammars"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "pyparsing-3.2.5-py3-none-any.whl", hash = "sha256:e38a4f02064cf41fe6593d328d0512495ad1f3d8a91c4f73fc401b3079a59a5e"},
{file = "pyparsing-3.2.5.tar.gz", hash = "sha256:2df8d5b7b2802ef88e8d016a2eb9c7aeaa923529cd251ed0fe4608275d4105b6"},
]
[package.extras]
diagrams = ["jinja2", "railroad-diagrams"]
[[package]] [[package]]
name = "pyproject-api" name = "pyproject-api"
version = "1.9.1" version = "1.9.1"
@ -1116,6 +1690,89 @@ files = [
{file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"},
] ]
[[package]]
name = "pyyaml"
version = "6.0.3"
description = "YAML parser and emitter for Python"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "PyYAML-6.0.3-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:c2514fceb77bc5e7a2f7adfaa1feb2fb311607c9cb518dbc378688ec73d8292f"},
{file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c57bb8c96f6d1808c030b1687b9b5fb476abaa47f0db9c0101f5e9f394e97f4"},
{file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:efd7b85f94a6f21e4932043973a7ba2613b059c4a000551892ac9f1d11f5baf3"},
{file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22ba7cfcad58ef3ecddc7ed1db3409af68d023b7f940da23c6c2a1890976eda6"},
{file = "PyYAML-6.0.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6344df0d5755a2c9a276d4473ae6b90647e216ab4757f8426893b5dd2ac3f369"},
{file = "PyYAML-6.0.3-cp38-cp38-win32.whl", hash = "sha256:3ff07ec89bae51176c0549bc4c63aa6202991da2d9a6129d7aef7f1407d3f295"},
{file = "PyYAML-6.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:5cf4e27da7e3fbed4d6c3d8e797387aaad68102272f8f9752883bc32d61cb87b"},
{file = "pyyaml-6.0.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b"},
{file = "pyyaml-6.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956"},
{file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8"},
{file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:66291b10affd76d76f54fad28e22e51719ef9ba22b29e1d7d03d6777a9174198"},
{file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9c7708761fccb9397fe64bbc0395abcae8c4bf7b0eac081e12b809bf47700d0b"},
{file = "pyyaml-6.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:418cf3f2111bc80e0933b2cd8cd04f286338bb88bdc7bc8e6dd775ebde60b5e0"},
{file = "pyyaml-6.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5e0b74767e5f8c593e8c9b5912019159ed0533c70051e9cce3e8b6aa699fcd69"},
{file = "pyyaml-6.0.3-cp310-cp310-win32.whl", hash = "sha256:28c8d926f98f432f88adc23edf2e6d4921ac26fb084b028c733d01868d19007e"},
{file = "pyyaml-6.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:bdb2c67c6c1390b63c6ff89f210c8fd09d9a1217a465701eac7316313c915e4c"},
{file = "pyyaml-6.0.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:44edc647873928551a01e7a563d7452ccdebee747728c1080d881d68af7b997e"},
{file = "pyyaml-6.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:652cb6edd41e718550aad172851962662ff2681490a8a711af6a4d288dd96824"},
{file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:10892704fc220243f5305762e276552a0395f7beb4dbf9b14ec8fd43b57f126c"},
{file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:850774a7879607d3a6f50d36d04f00ee69e7fc816450e5f7e58d7f17f1ae5c00"},
{file = "pyyaml-6.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8bb0864c5a28024fac8a632c443c87c5aa6f215c0b126c449ae1a150412f31d"},
{file = "pyyaml-6.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37d57ad971609cf3c53ba6a7e365e40660e3be0e5175fa9f2365a379d6095a"},
{file = "pyyaml-6.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:37503bfbfc9d2c40b344d06b2199cf0e96e97957ab1c1b546fd4f87e53e5d3e4"},
{file = "pyyaml-6.0.3-cp311-cp311-win32.whl", hash = "sha256:8098f252adfa6c80ab48096053f512f2321f0b998f98150cea9bd23d83e1467b"},
{file = "pyyaml-6.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f3bfb4965eb874431221a3ff3fdcddc7e74e3b07799e0e84ca4a0f867d449bf"},
{file = "pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196"},
{file = "pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0"},
{file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28"},
{file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c"},
{file = "pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc"},
{file = "pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e"},
{file = "pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea"},
{file = "pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5"},
{file = "pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b"},
{file = "pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd"},
{file = "pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8"},
{file = "pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1"},
{file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c"},
{file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5"},
{file = "pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6"},
{file = "pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6"},
{file = "pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be"},
{file = "pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26"},
{file = "pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c"},
{file = "pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb"},
{file = "pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac"},
{file = "pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310"},
{file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7"},
{file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788"},
{file = "pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5"},
{file = "pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764"},
{file = "pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35"},
{file = "pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac"},
{file = "pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3"},
{file = "pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3"},
{file = "pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba"},
{file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c"},
{file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702"},
{file = "pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c"},
{file = "pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065"},
{file = "pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65"},
{file = "pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9"},
{file = "pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b"},
{file = "pyyaml-6.0.3-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:b865addae83924361678b652338317d1bd7e79b1f4596f96b96c77a5a34b34da"},
{file = "pyyaml-6.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c3355370a2c156cffb25e876646f149d5d68f5e0a3ce86a5084dd0b64a994917"},
{file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3c5677e12444c15717b902a5798264fa7909e41153cdf9ef7ad571b704a63dd9"},
{file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5ed875a24292240029e4483f9d4a4b8a1ae08843b9c54f43fcc11e404532a8a5"},
{file = "pyyaml-6.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0150219816b6a1fa26fb4699fb7daa9caf09eb1999f3b70fb6e786805e80375a"},
{file = "pyyaml-6.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fa160448684b4e94d80416c0fa4aac48967a969efe22931448d853ada8baf926"},
{file = "pyyaml-6.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:27c0abcb4a5dac13684a37f76e701e054692a9b2d3064b70f5e4eb54810553d7"},
{file = "pyyaml-6.0.3-cp39-cp39-win32.whl", hash = "sha256:1ebe39cb5fc479422b83de611d14e2c0d3bb2a18bbcb01f229ab3cfbd8fee7a0"},
{file = "pyyaml-6.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:2e71d11abed7344e42a8849600193d15b6def118602c4c176f748e4583246007"},
{file = "pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f"},
]
[[package]] [[package]]
name = "pyzmq" name = "pyzmq"
version = "27.1.0" version = "27.1.0"
@ -2136,4 +2793,4 @@ files = [
[metadata] [metadata]
lock-version = "2.1" lock-version = "2.1"
python-versions = ">=3.10" python-versions = ">=3.10"
content-hash = "546dd85a2ad750359310ff22acfe7bfd3ca764f025d19e3fd48a50cd431e64e5" content-hash = "561f5c2944eccf993252e21d130ed541e8b409ee702ff08281e8da715228fcac"

View File

@ -12,6 +12,7 @@ maintainers = [
{ name = "Benjamin Chinnery", email = "ben@qoherent.ai" }, { name = "Benjamin Chinnery", email = "ben@qoherent.ai" },
{ name = "Ashkan Beigi", email = "ash@qoherent.ai" }, { name = "Ashkan Beigi", email = "ash@qoherent.ai" },
{ name = "Madrigal Weersink", email = "madrigal@qoherent.ai" }, { name = "Madrigal Weersink", email = "madrigal@qoherent.ai" },
{ name = "Gillian Ford", email = "gillian@qoherent.ai" }
] ]
keywords = [ keywords = [
"radio", "radio",
@ -46,6 +47,9 @@ dependencies = [
"h5py (>=3.14.0,<4.0.0)", "h5py (>=3.14.0,<4.0.0)",
"pandas (>=2.3.2,<3.0.0)", "pandas (>=2.3.2,<3.0.0)",
"pyzmq (>=27.1.0,<28.0.0)", "pyzmq (>=27.1.0,<28.0.0)",
"pyyaml (>=6.0.3,<7.0.0)",
"click (>=8.1.0,<9.0.0)",
"matplotlib (>=3.8.0,<4.0.0)"
] ]
# [project.optional-dependencies] Commented out to prevent Tox tests from failing # [project.optional-dependencies] Commented out to prevent Tox tests from failing
@ -67,7 +71,8 @@ all-sdr = [
[tool.poetry] [tool.poetry]
packages = [ packages = [
{ include = "ria_toolkit_oss", from = "src" } { include = "ria_toolkit_oss", from = "src" },
{ include = "ria_toolkit_oss_cli", from = "src" }
] ]
include = [ include = [
"**/*.so", # Required for Nuitkaification "**/*.so", # Required for Nuitkaification
@ -97,6 +102,10 @@ pylint = "^3.2.6" # For pyreverse, to automate the creation of UML diagrams
"Source" = "https://riahub.ai/qoherent/ria-toolkit-oss" "Source" = "https://riahub.ai/qoherent/ria-toolkit-oss"
"Issues Board" = "https://riahub.ai/qoherent/ria-toolkit-oss/issues" "Issues Board" = "https://riahub.ai/qoherent/ria-toolkit-oss/issues"
[tool.poetry.scripts]
ria = "ria_toolkit_oss_cli.cli:cli"
ria-tools = "ria_toolkit_oss_cli.cli:cli"
[tool.black] [tool.black]
line-length = 119 line-length = 119
target-version = ["py310"] target-version = ["py310"]

0
src/__init__.py Normal file
View File

View File

@ -559,6 +559,102 @@ class Recording:
to_npy(recording=self, filename=filename, path=path, overwrite=overwrite) to_npy(recording=self, filename=filename, path=path, overwrite=overwrite)
def to_wav(
self,
filename: Optional[str] = None,
path: Optional[os.PathLike | str] = None,
target_sample_rate: Optional[int] = 48000,
bits_per_sample: int = 32,
overwrite: bool = False,
) -> str:
"""Write recording to WAV file with embedded YAML metadata.
WAV format uses stereo audio with I (in-phase) in left channel and Q (quadrature) in right channel.
Metadata is stored in standard LIST INFO chunks with RF-specific metadata encoded as YAML
in the ICMT (comment) field for human readability.
:param filename: The name of the file where the recording is to be saved. Defaults to auto generated filename.
:type filename: os.PathLike or str, optional
:param path: The directory path to where the recording is to be saved. Defaults to recordings/.
:type path: os.PathLike or str, optional
:param target_sample_rate: Sample rate stored in the WAV header when no sample_rate metadata
is present. IQ samples are written without decimation or interpolation. Default is 48000 Hz.
:type target_sample_rate: int, optional
:param bits_per_sample: Bits per sample (32 for float32, 16 for int16). Default is 32.
:type bits_per_sample: int, optional
:param overwrite: Whether to overwrite existing files. Default is False.
:type overwrite: bool, optional
:raises IOError: If there is an issue encountered during the file writing process.
:return: Path where the file was saved.
:rtype: str
**Examples:**
Create a recording and save it to a .wav file:
>>> import numpy
>>> from utils.data import Recording
>>> samples = numpy.exp(1j * 2 * numpy.pi * 0.1 * numpy.arange(10000))
>>> metadata = {"sample_rate": 1e6, "center_frequency": 915e6}
>>> recording = Recording(data=samples, metadata=metadata)
>>> recording.to_wav()
"""
from utils.io.recording import to_wav
return to_wav(
recording=self,
filename=filename,
path=path,
target_sample_rate=target_sample_rate,
bits_per_sample=bits_per_sample,
overwrite=overwrite,
)
def to_blue(
self,
filename: Optional[str] = None,
path: Optional[os.PathLike | str] = None,
data_format: str = "CI",
overwrite: bool = False,
) -> str:
"""Write recording to MIDAS Blue file format.
MIDAS Blue is a legacy RF file format with a 512-byte binary header.
Commonly used with X-Midas and other RF/radar signal processing tools.
:param filename: The name of the file where the recording is to be saved. Defaults to auto generated filename.
:type filename: os.PathLike or str, optional
:param path: The directory path to where the recording is to be saved. Defaults to recordings/.
:type path: os.PathLike or str, optional
:param data_format: Format code (default 'CI' = complex int16).
Common formats: 'CI' (complex int16), 'CF' (complex float32), 'CD' (complex float64).
Integer formats require the IQ samples to already be scaled within [-1, 1).
:type data_format: str, optional
:param overwrite: Whether to overwrite existing files. Default is False.
:type overwrite: bool, optional
:raises IOError: If there is an issue encountered during the file writing process.
:return: Path where the file was saved.
:rtype: str
**Examples:**
Create a recording and save it to a .blue file:
>>> import numpy
>>> from utils.data import Recording
>>> samples = numpy.ones(10000, dtype=numpy.complex64)
>>> metadata = {"sample_rate": 1e6, "center_frequency": 2.44e9}
>>> recording = Recording(data=samples, metadata=metadata)
>>> recording.to_blue()
"""
from utils.io.recording import to_blue
return to_blue(recording=self, filename=filename, path=path, data_format=data_format, overwrite=overwrite)
def trim(self, num_samples: int, start_sample: Optional[int] = 0) -> Recording: def trim(self, num_samples: int, start_sample: Optional[int] = 0) -> Recording:
"""Trim Recording samples to a desired length, shifting annotations to maintain alignment. """Trim Recording samples to a desired length, shifting annotations to maintain alignment.

View File

@ -2,3 +2,37 @@
The IO package contains utilities for input and output operations, such as loading and saving recordings to and from The IO package contains utilities for input and output operations, such as loading and saving recordings to and from
file. file.
""" """
__all__ = [
# Common:
"exists",
"copy",
"move",
"validate",
# Recording:
"save_recording",
"load_recording",
"to_sigmf",
"from_sigmf",
"to_npy",
"from_npy",
"from_npy_legacy",
"to_wav",
"from_wav",
"to_blue",
"from_blue",
]
from .common import copy, exists, move, validate
from .recording import (
from_blue,
from_npy,
from_npy_legacy,
from_sigmf,
from_wav,
load_recording,
to_blue,
to_npy,
to_sigmf,
to_wav,
)

View File

@ -0,0 +1,83 @@
"""
Utilities for common input/output operations.
"""
import os
import ria_toolkit_oss
def exists(fid: str | os.PathLike) -> bool:
"""Check if the file or directory exists.
.. todo::
This method is not yet implemented.
:param fid: The path to the file or directory to check for existence.
:type fid: str or os.PathLike
:return: True if the file or directory exists, False otherwise.
:rtype: bool
"""
raise NotImplementedError
def validate(fid: str | os.PathLike) -> bool:
"""Validate the contents of the file or directory to ensure it is not corrupted,
the correct format for its extension, and readable RIA.
.. todo::
This method is not yet implemented.
:param fid: The path to the file or directory to validate.
:type fid: str or os.PathLike
:return: True if the file or directory is valid and readable, False otherwise.
"""
raise NotImplementedError
def move(source_path: str | os.PathLike, destination_path: str | os.PathLike, copy: bool = False) -> None:
"""Recursively move a file or directory at source_path to destination_path.
.. todo::
This method is not yet implemented.
:param source_path: The path to the source file or directory.
:type source_path: str or os.PathLike
:param destination_path: The path to the destination directory.
:type destination_path: str or os.PathLike
:param copy: If True, perform a copy instead of a move. Default is False.
:type copy: bool, optional
:raises RuntimeError: If the move was unsuccessful.
:return: None
"""
if copy:
ria_toolkit_oss.io.common.copy(source_path=source_path, destination_path=destination_path)
return
raise NotImplementedError
def copy(source_path: str | os.PathLike, destination_path: str | os.PathLike) -> None:
"""Copy the file or directory at source_path to destination_path.
.. todo::
This function is not yet implemented.
:param source_path: The path to the source file or directory.
:type source_path: str or os.PathLike
:param destination_path: The path to the destination directory.
:type destination_path: str or os.PathLike
:raises RuntimeError: If the copy was unsuccessful.
:return: None
"""
raise NotImplementedError

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,6 @@ It streamlines tasks involving signal reception and transmission, as well as com
operations such as detecting and configuring available devices. operations such as detecting and configuring available devices.
""" """
__all__ = ["SDR"] __all__ = ["SDR", "SDRError", "SDRParameterError"]
from .sdr import SDR from .sdr import SDR, SDRError, SDRParameterError

View File

@ -0,0 +1,7 @@
"""
The Signal Package provides a comprehensive suite of tools for signal generation and processing.
"""
from .recordable import Recordable
__all__ = ["Recordable"]

View File

@ -0,0 +1,398 @@
"""
.. todo:: Need to add some information here about signal generation and the signal generators in this module.
"""
from typing import Optional
import numpy as np
import scipy.signal
from scipy.signal import butter
from scipy.signal import chirp as sci_chirp
from scipy.signal import hilbert, lfilter
from ria_toolkit_oss.datatypes.recording import Recording
def sine(
sample_rate: Optional[int] = 1000,
length: Optional[int] = 1000,
frequency: Optional[float] = 1000,
amplitude: Optional[float] = 1,
baseband_phase: Optional[float] = 0,
rf_phase: Optional[float] = 0,
dc_offset: Optional[float] = 0,
) -> Recording:
"""Generate a basic sine wave signal.
:param sample_rate: The number of samples per second (Hz). Defaults to 1,000.
:type sample_rate: int, optional
:param length: Number of samples in the recording. Defaults to 1,000.
:type length: int, optional
:param frequency: The frequency of the sine wave (Hz). Defaults to 1,000.
:type frequency: float, optional
:param amplitude: Amplitude of the sine wave. Defaults to 1.
:type amplitude: float, optional
:param baseband_phase: Phase offset in radians, relative to the sine wave frequency. Defaults to 0.
:type baseband_phase: float, optional
:param rf_phase: Phase offset in radians of the complex samples. Defaults to 0.
:type rf_phase: float, optional
:param dc_offset: DC offset (average of the sine wave). Defaults to 0.
:type dc_offset: float, optional
:return: A Recording object containing the generated sine wave signal.
:rtype: Recording
Examples:
.. todo:: Usage examples coming soon!
"""
if sample_rate < 1:
raise ValueError("sample_rate must be > 1")
total_time = length / sample_rate
t = np.linspace(0, total_time, length, endpoint=False)
sine_wave = amplitude * np.sin(2 * np.pi * frequency * t + baseband_phase) + dc_offset
complex_sine_wave = sine_wave * np.exp(1j * rf_phase)
metadata = {
"signal": "sine",
"source": "synth",
"sample_rate": sample_rate,
"length": length,
"signal_frequency": frequency,
"amplitude": amplitude,
"baseband_phase": baseband_phase,
"rf_phase": rf_phase,
"dc_offset": dc_offset,
}
return Recording(data=complex_sine_wave, metadata=metadata)
def square(
sample_rate: Optional[int] = 1000,
length: Optional[int] = 1000,
frequency: Optional[float] = 1,
amplitude: Optional[float] = 1,
duty_cycle: Optional[float] = 0.5,
baseband_phase: Optional[float] = 0,
rf_phase: Optional[float] = 0,
dc_offset: Optional[float] = 0,
) -> Recording:
"""Generate a square wave signal.
:param sample_rate: The number of samples per second (Hz). Defaults to 1,000.
:type sample_rate: int, optional
:param length: Number of samples in the recording. Defaults to 1,000.
:type length: int, optional
:param frequency: The frequency of the square wave (Hz). Defaults to 1.
:type frequency: float, optional
:param amplitude: The amplitude of the square wave. Defaults to 1.
:type amplitude: float, optional
:param duty_cycle: The duty cycle of the square wave as a decimal in the range [0, 1]. Defaults to 0.5.
:param baseband_phase: Phase offset in radians, relative to the square wave frequency. Defaults to 0.
:type baseband_phase: float, optional
:param rf_phase: Phase offset in radians of the complex samples. Defaults to 0.
:type rf_phase: float, optional
:param dc_offset: DC offset. If dc_offset is 0 but duty_cycle is not 0.5, the actual dc offset may not be
exactly 0. Defaults to 0.
:type dc_offset: float, optional
:return: A Recording object containing the generated square wave signal.
:rtype: Recording
Examples:
.. todo:: Usage examples coming soon!
"""
if sample_rate < 1:
raise ValueError("sample_rate must be > 1")
t = np.arange(length)
square_wave = amplitude * scipy.signal.square(
2 * np.pi * frequency * (t / sample_rate - (baseband_phase / (2 * np.pi))), duty=duty_cycle
)
square_wave = square_wave + dc_offset
complex_square_wave = square_wave * np.exp(1j * rf_phase)
metadata = {
"signal": "square",
"source": "synth",
"sample_rate": sample_rate,
"length": length,
"signal_frequency": frequency,
"amplitude": amplitude,
"baseband_phase": baseband_phase,
"duty_cycle": duty_cycle,
"rf_phase": rf_phase,
"dc_offset": dc_offset,
}
return Recording(data=complex_square_wave, metadata=metadata)
def sawtooth(
sample_rate: Optional[int] = 1000,
length: Optional[int] = 1000,
frequency: Optional[float] = 1,
amplitude: Optional[float] = 1,
baseband_phase: Optional[float] = 0,
rf_phase: Optional[float] = 0,
dc_offset: Optional[float] = 0,
) -> Recording:
"""Generate a sawtooth wave signal.
:param sample_rate: The number of samples per second (Hz). Defaults to 1,000.
:type sample_rate: int, optional
:param length: Number of samples in the recording. Defaults to 1,000.
:type length: int, optional
:param frequency: The frequency of the sawtooth wave (Hz). Defaults to 1.
:type frequency: float, optional
:param amplitude: Amplitude of the sawtooth wave. Defaults to 1.
:type amplitude: float, optional
:param baseband_phase: Phase offset in radians, relative to the wave frequency. Defaults to 0.
:type baseband_phase: float, optional
:param rf_phase: Phase offset in radians of the complex samples. Defaults to 0.
:type rf_phase: float, optional
:param dc_offset: DC offset (average of the wave). Defaults to 0.
:type dc_offset: float, optional
:return: A Recording object containing the generated sawtooth signal.
:rtype: Recording
Examples:
.. todo:: Usage examples coming soon!
"""
if sample_rate < 1:
raise ValueError("sample_rate must be > 1")
t = np.arange(length)
saw_wave = amplitude * scipy.signal.sawtooth(
2 * np.pi * frequency * (t / sample_rate - (baseband_phase / (2 * np.pi)))
)
saw_wave = saw_wave + dc_offset
complex_sine_wave = saw_wave * np.exp(1j * rf_phase)
metadata = {
"signal": "sawtooth",
"source": "synth",
"sample_rate": sample_rate,
"length": length,
"signal_frequency": frequency,
"amplitude": amplitude,
"baseband_phase": baseband_phase,
"rf_phase": rf_phase,
"dc_offset": dc_offset,
}
return Recording(data=complex_sine_wave, metadata=metadata)
def noise(
sample_rate: Optional[int] = 1000,
length: Optional[int] = 1000,
rms_power: Optional[float] = 0.2,
dc_offset: Optional[float] = 0,
) -> Recording:
"""Generate a Gaussian white noise (GWN) wave signal.
:param sample_rate: The number of samples per second (Hz). Defaults to 1,000.
:type sample_rate: int, optional
:param length: Number of samples in the recording. Defaults to 1,000.
:type length: int, optional
:param rms_power: Root-Mean-Square power of the generated signal. Defaults to 0.2.
:type rms_power: float, optional
:param dc_offset: DC offset (average of the wave). Defaults to 0.
:type dc_offset: float, optional
:return: A Recording object containing the generated noise signal.
:rtype: Recording
Examples:
.. todo:: Usage examples coming soon!
"""
if sample_rate < 1:
raise ValueError("sample_rate must be > 1")
variance = rms_power**2
magnitude = np.random.normal(loc=0, scale=np.sqrt(variance), size=length)
magnitude2 = np.clip(magnitude, -1, 1)
# TODO figure out a better way to make it conform to [-1,1]
if not np.array_equal(magnitude, magnitude2):
print("Warning: clipping in basic_signal_generator.noise")
phase = np.random.uniform(low=0, high=2 * np.pi, size=length)
complex_awgn = magnitude2 * np.exp(1j * phase)
complex_awgn = complex_awgn + dc_offset
metadata = {
"signal": "awgn",
"source": "synth",
"sample_rate": sample_rate,
"length": length,
"amplitude": np.max(np.abs(complex_awgn)),
"dc_offset": dc_offset,
}
return Recording(data=complex_awgn, metadata=metadata)
def chirp(sample_rate: int, num_samples: int, center_frequency: Optional[float] = 0) -> Recording:
"""Generator a sinusoidal waveform with a linear frequency sweep.
Start and end frequencies are chosen based on the maximum frequency range that can be covered without aliasing,
which is determined by the sample rate. To chirp over a larger frequency range, increase the sample rate.
Chirps are often used in radar, sonar, and communication systems because they can effectively cover a wide
frequency range and are useful for testing and measurement purposes.
:param sample_rate: The number of samples per second (Hz).
:type sample_rate: int
:param num_samples: The number of samples in the chirp.
:type num_samples: int
:param center_frequency: The center frequency of the chirp.
:type center_frequency: float, optional
:return: A Recording object containing the generated noise signal.
:rtype: Recording
Examples:
.. todo:: Usage examples coming soon!
"""
# Ensure that the generated chirp signal remains within a safe frequency range to avoid aliasing.
chirp_start_frequency = center_frequency - sample_rate / 4
chirp_end_frequency = center_frequency + sample_rate / 4
t = np.arange(num_samples) / int(sample_rate)
f_t = chirp_start_frequency + (chirp_end_frequency - chirp_start_frequency) * t / t[-1]
complex_samples = np.exp(2.0j * np.pi * f_t * t)
metadata = {"sample_rate": sample_rate, "num_samples:": num_samples}
return Recording(data=complex_samples, metadata=metadata)
def lfm_chirp_complex(
sample_rate: int, width: int, chirp_period: float, sigfc: int | float, total_time: float, chirp_type: str
):
"""
Generate a complex linearly frequency modulated chirp signal.
:param sample_rate:
"""
# Time vector for one chirp
chirp_length = int(chirp_period * sample_rate)
t_chirp = np.linspace(0, chirp_period, chirp_length)
if len(t_chirp) > chirp_length:
t_chirp = t_chirp[:chirp_length]
# Generate one chirp from 0 Hz to the full width
if chirp_type == "up":
baseband_chirp = sci_chirp(t_chirp, f0=0, f1=width, t1=chirp_period, method="linear")
elif chirp_type == "down":
baseband_chirp = sci_chirp(t_chirp, f0=width, f1=0, t1=chirp_period, method="linear")
elif chirp_type == "up_down":
half_duration = chirp_period / 2
t_up_half, t_down_half = np.array_split(t_chirp, 2)
up_part = sci_chirp(t_up_half, f0=0, t1=half_duration, f1=width, method="linear")
down_part = np.flip(up_part)
baseband_chirp = np.concatenate([up_part, down_part])
# Generate the full signal by tiling the windowed chirp
num_chirps = round(total_time / chirp_period)
full_signal = np.tile(baseband_chirp, num_chirps)
# Create an analytic signal (complex with no negative frequency components)
analytic_signal = hilbert(full_signal)
# Shift the chirp to the signal center frequency
t_full = np.linspace(0, total_time, len(analytic_signal))
complex_chirp = analytic_signal * np.exp(1j * 2 * np.pi * (sigfc - width / 2) * t_full)
nyquist = 0.5 * sample_rate # Nyquist frequency
normal_cutoff = width / nyquist # Normalize cutoff
b, a = butter(8, normal_cutoff, btype="low", analog=False)
filtered_chirp = lfilter(b, a, complex_chirp)
metadata = {
"source": "basic_signal_generator",
"sample_rate": sample_rate,
"width": width,
"chirp_period": chirp_period,
"chirp_center_frequency": sigfc,
"total_time": total_time,
"filter": "low_pass",
}
return Recording(data=filtered_chirp, metadata=metadata)
def complex_sine(sample_rate, length, frequency):
"""
Generates a complex sine wave.
:param sample_rate: The number of samples per second (Hz). Defaults to 1,000.
:type sample_rate: int, optional
:param length: Number of samples in the recording. Defaults to 1,000.
:type length: int, optional
:param frequency: The frequency of the square wave (Hz). Defaults to 1.
:type frequency: float, optional
"""
if sample_rate < 1:
raise ValueError("sample_rate must be > 1")
total_time = length / sample_rate
t = np.linspace(0, total_time, length, endpoint=False)
power_factor = np.random.uniform(-8, 0)
complex_sine_wave = (10**power_factor) * np.exp(1j * 2 * np.pi * frequency * t)
metadata = {
"signal": "complex_sine",
"source": "synth",
"sample_rate": sample_rate,
"length": length,
"signal_frequency": frequency,
"power_factor": power_factor,
}
return Recording(data=complex_sine_wave, metadata=metadata)
def birdie(sample_rate, length, frequency):
"""
Generates a complex sine wave for birdies in demos.
:param sample_rate: The number of samples per second (Hz). Defaults to 1,000.
:type sample_rate: int, optional
:param length: Number of samples in the recording. Defaults to 1,000.
:type length: int, optional
:param frequency: The frequency of the square wave (Hz). Defaults to 1.
:type frequency: float, optional
"""
if sample_rate < 1:
raise ValueError("sample_rate must be > 1")
total_time = length / sample_rate
t = np.linspace(0, total_time, length, endpoint=False)
power_factor = np.random.uniform(-2.5, -0.5)
complex_sine_wave = (10**power_factor) * np.exp(1j * 2 * np.pi * frequency * t)
metadata = {
"signal": "complex_sine",
"source": "synth",
"sample_rate": sample_rate,
"length": length,
"signal_frequency": frequency,
"power_factor": power_factor,
}
return Recording(data=complex_sine_wave, metadata=metadata)

View File

@ -0,0 +1,63 @@
# RIA Block Signal Generator
Welcome to the RIA block generator! These modular signal processing blocks can be used together to create synthetic radio signals, and it is easy to add new blocks.
These instructions apply to using the block system within python, and not to the front end GUI.
# Overview
A block can be a SourceBlock or a ProcessBlock. Either of these can also be a RecordableBlock, or not.
SourceBlocks produce samples, and have no input.
ProcessBlocks process samples. They also provide a .process() method that can be used to directly operate on samples without using the block system.
RecordableBlocks provide a .record() method to create a recording. Some blocks, such as the RandomBinarySource produce non IQ sample formats such as bits, which is why they are not recordable.
Blocks are connected in a tree structure terminating in a final RecordableBlock. Blocks may have multiple inputs but can only have one output, and this output cannot be connected to the inputs of more than one block.
# Getting Started
Let's create a block flow tree to create a QPSK signal, add a LFM jamming signal, and add some noise.
First, imports:
```
from ria_toolkit_oss.signal.block_generator import RandomBinarySource, Mapper, Upsampling, RaisedCosineFilter, FrequencyShift, LFMChirpSource, Add, AWGNSource\
sample_rate = 1000000
```
Create the random binary source block:
```
source = RandomBinarySource()
```
Create a constellation mapper block to convert bits to QPSK symbols, connecting its input to the source block.
```
mapper = Mapper(input=[source], constellation_type="PSK", num_bits_per_symbol=2)
```
Add an upsampling block and a raised cosine filter for pulse shaping:
```
upsampler = Upsampling(input = [mapper], factor = 4)
filter = RaisedCosineFilter(input=[upsampler], span_in_symbols=100, upsampling_factor=4, beta=0.1)
```
Create another branch of the block tree for the LFM jamming source and frequency shifter:
```
jammer=LFMChirpSource(sample_rate=sample_rate, bandwidth=sample_rate/2, chirp_period=0.01, chirp_type='up')
f_shift = FrequencyShift(input = [jammer], shift_frequency=100000, sampling_rate=sample_rate)
```
Sum the two signals with an Add block:
```
adder = Add(input=[filter, f_shift])
```
Add another branch to create noise:
```
awgn_source = AWGNSource(variance = 0.05)
adder2 = Add(input = [adder, awgn_source])
```
Finally create a recording at the terminal block in the tree:
```
recording = mapper.record(100000)
recording.view()
recording.to_sigmf()
```

View File

@ -0,0 +1,88 @@
"""
RIA Block-Based Signal Generator Module
This module provides a flexible framework for simulating communication systems using configurable blocks. It includes:
- Various block types: filters, mappers, modulators, demodulators, and channels
- Easy-to-use classes for creating custom signal processing chains
- Pre-configured generators for common use cases
Key features:
- Modular design for building complex systems
- Customizable block parameters
- Ready-to-use generators for quick prototyping
Usage:
1. Import desired blocks
2. Configure block parameters
3. Connect blocks to create a processing chain
4. Run simulations with custom or provided input signals
For detailed examples and API reference, see the documentation.
"""
from .basic import Add, FrequencyShift, MultiplyConstant, PhaseShift
from .generators import (
PAMGenerator,
PSKGenerator,
QAMGenerator,
SignalGenerator,
)
from .mapping import Mapper, SymbolDemapper
from .process_block import ProcessBlock
from .pulse_shaping import (
GaussianFilter,
RaisedCosineFilter,
RectFilter,
RootRaisedCosineFilter,
SincFilter,
Upsampling,
)
from .recordable_block import RecordableBlock
from .siso_channel import AWGNChannel, FlatRayleigh
from .source import (
AWGNSource,
BinarySource,
ConstantSource,
LFMChirpSource,
RecordingSource,
SawtoothSource,
SineSource,
SquareSource,
)
from .source_block import SourceBlock
from .symbol_modulation import GMSKModulator, OOKModulator, OQPSKModulator
__all__ = [
"Add",
"FrequencyShift",
"MultiplyConstant",
"PhaseShift",
"PAMGenerator",
"PSKGenerator",
"QAMGenerator",
"SignalGenerator",
"Mapper",
"SymbolDemapper",
"GMSKModulator",
"OOKModulator",
"OQPSKModulator",
"RaisedCosineFilter",
"RootRaisedCosineFilter",
"SincFilter",
"RectFilter",
"GaussianFilter",
"Upsampling",
"AWGNChannel",
"FlatRayleigh",
"AWGNSource",
"ConstantSource",
"LFMChirpSource",
"BinarySource",
"RecordingSource",
"SawtoothSource",
"SineSource",
"SquareSource",
]

View File

@ -0,0 +1,6 @@
from .add import Add
from .frequency_shift import FrequencyShift
from .multiply_constant import MultiplyConstant
from .phase_shift import PhaseShift
__all__ = ["Add", "FrequencyShift", "MultiplyConstant", "PhaseShift"]

View File

@ -0,0 +1,69 @@
import numpy as np
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.process_block import ProcessBlock
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
class Add(RecordableBlock, ProcessBlock):
"""
Add Block
Sums the input from two blocks.
Input type: [BASEBAND_SIGNAL, BASEBAND_SIGNAL]
Output type: BASEBAND_SIGNAL
"""
def __init__(self):
super().__init__()
def connect_input(self, input):
datatype = input[0].output_type
for input_block in input:
if input_block.output_type != datatype:
print(input_block.output_type)
raise ValueError(
f"'Add' block requires inputs to have the same datatype but got \
{'[' + ',' .join(f'{block.__class__.__name__}({block.output_type()})' for block in input) + ']'}"
) # TODO make this print the strings not numbers
return super().connect_input(input)
def _get_input_samples(self, block, num_samples):
"""
Request n samples from a block and validate the correct shape of CxN samples was received.
"""
samples = block.get_samples(num_samples)
if len(samples) != num_samples:
raise ValueError(
f"Block {self.__class__.__name__} requested {num_samples} \
from block {block.__class__.__name__} but got {len(samples)}."
)
return samples
@property
def input_type(self):
return [DataType.BASEBAND_SIGNAL, DataType.BASEBAND_SIGNAL]
@property
def output_type(self):
return DataType.BASEBAND_SIGNAL
def __call__(self, samples: list[np.array]):
"""
Add two signals together.
:param samples: A list containing two sample arrays of the same length.
:type samples: list of np.array
:returns: An array of output samples.
:rtype: np.array"""
if len(samples) != 2:
raise ValueError("Input must be a list of two input arrays.")
if len(samples[0]) != len(samples[1]):
raise ValueError(f"Input arrays must be equal length but were {len(samples[0])} and {len(samples[1])}")
return samples[0] + samples[1]

View File

@ -0,0 +1,56 @@
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.process_block import ProcessBlock
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
class FrequencyShift(ProcessBlock, RecordableBlock):
"""
Frequency Shift Block
Applies a frequency shift the input signal.
Input type: BASEBAND_SIGNAL
Output type: BASEBAND_SIGNAL
:param shift_frequency: The frequency to shift the signal by.
:type shift_frequency: float
:param sample_rate: The sample rate to use in frequency calculations.
:type sample_rate: float.
WARNING: This block does not include any anti-aliasing filters.
It is the responsiblity of the user to ensure proper
filtering is performed before/after this block to prevent aliasing.
"""
def __init__(self, shift_frequency: Optional[float] = 100000, sampling_rate: Optional[float] = 1000000):
self.shift_frequency = shift_frequency
self.sampling_rate = sampling_rate
super().__init__()
@property
def input_type(self) -> DataType:
return [DataType.BASEBAND_SIGNAL]
@property
def output_type(self) -> DataType:
return DataType.BASEBAND_SIGNAL
def __call__(self, samples: list[np.array]):
"""
Frequency shift input samples by the previously intialized shift frequency.
:param samples: A list containing a single array of complex samples.
:type samples: list of np.array
:returns: Processed samples.
:rtype: np.array
"""
signal = samples[0]
num_samples = len(signal)
t = np.arange(num_samples) / self.sampling_rate
carrier = np.exp(1j * 2 * np.pi * self.shift_frequency * t)
return signal * carrier

View File

@ -0,0 +1,41 @@
from typing import Optional
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.process_block import ProcessBlock
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
class MultiplyConstant(ProcessBlock, RecordableBlock):
"""
MultiplyConstant Block
Multiply the input samples by a constant.
Input Type: BASEBAND_SIGNAL
Output Type: BASEBAND_SIGNAL
:param multiplier: The value to multiply the samples by.
:type multiplier: float.
"""
def __init__(self, multiplier: Optional[float] = 0.5):
self.multiplier = multiplier
@property
def input_type(self):
return [DataType.BASEBAND_SIGNAL]
@property
def output_type(self):
return DataType.BASEBAND_SIGNAL
def __call__(self, samples):
"""
Multiply an array of complex samples by the previously initialised value.
:param samples: A list containing a single array of complex samples.
:type samples: list of np.array
:returns: Processed samples.
:rtype: np.array"""
return samples[0] * self.multiplier

View File

@ -0,0 +1,40 @@
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.process_block import ProcessBlock
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
class PhaseShift(ProcessBlock, RecordableBlock):
"""
PhaseShift Block
Apply a complex phase shift to the input signal.
:param phase: The complex phase shift in radians.
:type phase: float."""
def __init__(self, phase: Optional[float] = 0):
self.phase = phase
super().__init__()
@property
def input_type(self):
return [DataType.BASEBAND_SIGNAL]
@property
def output_type(self):
return DataType.BASEBAND_SIGNAL
def __call__(self, samples):
"""
Phase shift an array of complex samples by the previously initialised phase.
:param samples: A list containing a single array of complex samples.
:type samples: list of np.array
:returns: Processed samples.
:rtype: np.array"""
return samples[0] * np.exp(1j * self.phase)

View File

@ -0,0 +1,122 @@
import json
from abc import ABC, abstractmethod
import numpy as np
from ria_toolkit_oss.signal.block_generator.data_types import DataType
class Block(ABC):
"""
Abstract base class for signal processing blocks.
This class defines the interface for all signal processing blocks,
including input and output data types and the call method for processing.
"""
@property
@abstractmethod
def input_type(self) -> DataType:
"""
Get the input data type for the block.
:return: The input data type.
:rtype: DataType
"""
pass
@property
@abstractmethod
def output_type(self) -> DataType:
"""
Get the output data type for the block.
:return: The output data type.
:rtype: DataType
"""
pass
@abstractmethod
def get_samples(self, num_samples) -> np.ndarray:
"""
Process the input data and produce output.
:param args: Positional arguments for the processing method.
:param kwargs: Keyword arguments for the processing method.
:return: The processed output data.
:rtype: numpy array
"""
pass
def _get_metadata(self):
metadata = {}
for key, value in vars(self).items():
try:
# Try to serialize the value to check if it's JSON serializable
json.dumps(value)
metadata[f"BlockGenerator:{self.__class__.__name__}:{key}"] = value
except (TypeError, ValueError):
# If the value is not JSON serializable, skip it
continue
for block in self.input:
metadata = self._combine_dicts_and_handle_double_keys(block._get_metadata(), metadata)
return metadata
# TODO improve this
def _combine_dicts_and_handle_double_keys(self, source_dict, other_dict):
for key, value in source_dict.items():
# Find the last colon in the key
last_colon_index = key.rfind(":")
# Ensure there's at least one colon in the key
if last_colon_index == -1:
# If no colon, just append "(1)"
new_key = f"{key}(1)"
else:
# Extract the prefix and the part after the last colon
prefix = key[:last_colon_index]
suffix = key[last_colon_index + 1 :]
# Check if the suffix has a number inside parentheses
if suffix.startswith("(") and suffix.endswith(")") and suffix[1:-1].isdigit():
# Extract the number inside the parentheses and increment it
number = int(suffix[1:-1]) + 1
new_key = f"{prefix}({number})"
else:
# No number at the end, so just append "(1)"
new_key = f"{key}(1)"
# Ensure the new key is unique in both dictionaries
while new_key in other_dict:
# Find the last parentheses to extract the current number
last_paren_index = new_key.rfind(")")
prefix = new_key[:last_paren_index]
suffix = new_key[last_paren_index + 1 :]
# Extract the number in parentheses and increment it
if suffix.startswith("(") and suffix.endswith(")") and suffix[1:-1].isdigit():
number = int(suffix[1:-1]) + 1
else:
number = 1 # Default to 1 if no number in parentheses
# Create the new key with the incremented number
new_key = f"{prefix}({number})"
# Update the other dictionary with the new key
other_dict[new_key] = value
return other_dict
@abstractmethod
def __call__(self, *args, **kwargs) -> np.ndarray:
"""
Process the input data and produce output.
:param args: Positional arguments for the processing method.
:param kwargs: Keyword arguments for the processing method.
:return: The processed output data.
:rtype: numpy array
"""
pass

View File

@ -0,0 +1,112 @@
import numpy as np
from ria_toolkit_oss.signal.block_generator.continuous_modulation.demodulator import (
Demodulator,
)
from ria_toolkit_oss.signal.block_generator.data_types import DataType
class CoherentCorrelator(Demodulator):
"""
A correlator for coherent detection that performs frequency downconversion via correlation.
This class implements a coherent correlator by multiplying the received passband signal
with a reference carrier and integrating (or convolving with an optional matched filter)
over one symbol period. The reference carrier can be generated in one of two ways:
- If 'per_symbol' is True, the carrier reference is generated for each symbol separately
(i.e. a time vector that resets to zero for every symbol).
- If 'per_symbol' is False, a continuous time vector is used over the entire signal.
Optionally, a pulse-shaping filter (subclass of PulseShapingFilter) can be provided. When set,
each symbol's downconverted product is first convolved with the matched filter (via its
`apply_matched_filter` method) before integration. If not provided, a simple integration (sum)
is performed.
:param carrier_frequency: The carrier frequency (Hz) used for demodulation.
:param symbol_duration: The duration (seconds) of one symbol period.
:param sampling_rate: The sampling rate (Hz) of the received signal.
:param per_symbol: If True, uses a per-symbol time vector; if False, uses a continuous time vector.
"""
def __init__(
self,
carrier_frequency: float,
symbol_duration: float,
sampling_rate: float,
per_symbol: bool = True,
):
self.carrier_frequency = carrier_frequency
self.symbol_duration = symbol_duration
self.sampling_rate = sampling_rate
self.samples_per_symbol = int(self.symbol_duration * self.sampling_rate)
self.per_symbol = per_symbol
@property
def input_type(self) -> DataType:
"""The correlator expects a passband signal as input."""
return DataType.PASSBAND_SIGNAL
@property
def output_type(self) -> DataType:
"""The correlator produces decision statistics (typically complex or real values)."""
return DataType.BITS
def __call__(self, signal: np.ndarray) -> np.ndarray:
"""
Correlate the input passband signal with a reference carrier to produce decision statistics.
The input signal is assumed to be a 2D numpy array of shape (batch_size, total_samples),
where total_samples is an integer multiple of the number of samples per symbol.
Depending on the 'per_symbol' flag, the reference carrier is generated as:
- If True: a per-symbol time vector (from 0 to symbol_duration) is used.
- If False: a continuous time vector for the entire signal is used.
If a pulse shaping filter is provided (self.filter is not None), the symbol's product
(signal multiplied by the reference carrier) is convolved with the filter via its
`apply_matched_filter` method before integration.
:param signal: The input passband signal (shape: (batch_size, total_samples)).
:return: A 2D numpy array of decision statistics with shape (batch_size, num_symbols).
:raises ValueError: If the total number of samples is not an integer multiple of samples_per_symbol.
"""
batch_size, total_samples = signal.shape
samples_per_symbol = self.samples_per_symbol
if total_samples % samples_per_symbol != 0:
raise ValueError(
"The total number of samples in the signal must be an integer multiple of the samples per symbol."
)
num_symbols = total_samples // samples_per_symbol
# Reshape the signal into symbols: shape (batch_size, num_symbols, samples_per_symbol)
symbols = signal.reshape(batch_size, num_symbols, samples_per_symbol)
if self.per_symbol:
# Generate per-symbol time vector (from 0 to symbol_duration)
t_symbol = np.arange(samples_per_symbol) / self.sampling_rate
if np.iscomplexobj(signal):
reference = np.exp(-1j * 2 * np.pi * self.carrier_frequency * t_symbol)
else:
reference = np.cos(2 * np.pi * self.carrier_frequency * t_symbol)
# Multiply each symbol with the reference (broadcasted) to obtain the product.
product = symbols * reference[None, None, :]
else:
# Use a continuous time vector for the entire signal.
t_full = np.arange(total_samples) / self.sampling_rate
if np.iscomplexobj(signal):
reference_full = np.exp(-1j * 2 * np.pi * self.carrier_frequency * t_full)
else:
reference_full = np.cos(2 * np.pi * self.carrier_frequency * t_full)
reference_full = reference_full.reshape(1, num_symbols, samples_per_symbol)
product = symbols * reference_full
decision_stats = np.sum(product, axis=2)
return decision_stats
def __str__(self) -> str:
"""Return a string representation of the CoherentCorrelator."""
return (
f"CoherentCorrelator(carrier_frequency={self.carrier_frequency}, "
f"symbol_duration={self.symbol_duration}, sampling_rate={self.sampling_rate} "
)

View File

@ -0,0 +1,218 @@
import itertools
import warnings
from typing import List, Tuple
import numpy as np
from ria_toolkit_oss.signal.block_generator.continuous_modulation.demodulator import (
Demodulator,
)
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.mapping.mapper import Mapper
from ria_toolkit_oss.signal.block_generator.mapping.symbol_demapper import (
SymbolDemapper,
)
from ria_toolkit_oss.signal.block_generator.pulse_shaping.gaussian_filter import (
GaussianFilter,
)
from ria_toolkit_oss.signal.block_generator.pulse_shaping.rect_filter import RectFilter
class CPFSKDemodulator(Demodulator):
"""
M-ary CPFSK demodulator.
Two operating modes
-------------------
symbol_by_symbol = True identical to your original code
symbol_by_symbol = False runs an L-memory Viterbi detector
(L set by `va_memory`)
The Viterbi detector models the residual ISI introduced by the Gaussian/
rectangular pulse as a *linear* partial-response channel whose taps are
extracted automatically from the matched-filter output of an impulse.
"""
def __init__(
self,
num_bits_per_symbol: int,
frequency_spacing: float,
symbol_duration: float,
sampling_frequency: float,
gaussian: bool = False,
bt: float = 0.3,
symbol_by_symbol: bool = False,
):
super().__init__()
self.M_bits = num_bits_per_symbol
self.M = 1 << num_bits_per_symbol # 2,4,8,…
self.freq_sep = frequency_spacing
self.Ts = symbol_duration
self.Fs = sampling_frequency
self.sps = int(self.Fs * self.Ts) # samples / symbol
if self.sps % 2 == 0: # keep it odd
self.sps += 1
self.symbol_by_symbol = symbol_by_symbol
# ------------------------------------------------------------------ #
# frontend filter (same as transmitter) and matchedfilter partner #
# ------------------------------------------------------------------ #
if gaussian:
self.filter = GaussianFilter(3, upsampling_factor=self.sps, bt=bt, normalize=False)
else:
self.filter = RectFilter(1, upsampling_factor=self.sps)
self.va_mem = self.filter.span_in_symbols
# Mapper / Demapper (PAM levels are (M1), …, +(M1))
self.mapper = Mapper("pam", num_bits_per_symbol, normalize=False)
self.const = self.mapper.get_constellation() # (M,)
self.bit_map = self.mapper.get_bit_mapping() # dict: sym→bits
self.demapper = SymbolDemapper(self.const, self.bit_map)
# ------------------------------------------------------------------ #
# Precompute symbolrate channel taps for the Viterbi branch #
# ------------------------------------------------------------------ #
self.taps = self._symbol_rate_taps(self.va_mem) # (L,)
# NOTE: taps[0] is always 1 because of matched filtering normalisation
# Build state mapping once (for VA)
self._states, self._prev_lookup = self._enumerate_states()
@property
def input_type(self) -> DataType:
return DataType.BASEBAND_SIGNAL
@property
def output_type(self) -> DataType:
return DataType.BITS
def __call__(self, signal: np.ndarray) -> np.ndarray:
batches, total = signal.shape
n_sym = total // self.sps
if total % self.sps:
signal = signal[:, : n_sym * self.sps]
warnings.warn("Input truncated to an integer number of symbols.")
# -------------------------------------------------------------- #
# Phase → freq → matchedfilter (identical to your original) #
# -------------------------------------------------------------- #
# phase = np.angle(signal)
# phase_unwrap = np.unwrap(phase, axis=1)
# diff_phase = np.diff(phase_unwrap, axis=1)
dtheta = np.angle(signal[:, 1:] * np.conj(signal[:, :-1])) # length N1
freq_est = dtheta * self.Fs / (2 * np.pi) # Hz
u_est = freq_est / (self.freq_sep / 2)
# freq_est = diff_phase * self.Fs / (2 * np.pi) # Hz
# u_est = freq_est / (self.freq_sep / 2) # ±1,±3,±5…
u_matched = self.filter.apply_matched_filter(u_est)
start = self.filter.span_in_symbols * self.sps
soft = u_matched[:, start :: self.sps][:, :n_sym] # (B, K)
if self.symbol_by_symbol or self.va_mem == 1:
# ---------- legacy: slice & direct PAM demap --------------
return self._pam_slice_demod(soft)
# ---------- new: sequence detector on each burst --------------
# Viterbi: iterate over bursts
out = np.empty((batches, n_sym * self.M_bits), dtype=np.uint8)
for b in range(batches):
out[b] = self._viterbi_one_burst(soft[b])
return out
# ---------------------------------------------------------------------- #
# Helpers #
# ---------------------------------------------------------------------- #
def _pam_slice_demod(self, soft_symbols: np.ndarray) -> np.ndarray:
"""Your original “single-symbol” flow."""
return self.demapper(soft_symbols.astype(np.complex128))
# ---- 1. obtain channel taps at symbol rate --------------------------- #
def _symbol_rate_taps(self, L: int) -> np.ndarray:
"""
Send a delta through the matched filter and sample once / symbol.
Gives the *discrete partial-response channel* h[0..L-1].
"""
span = self.filter.span_in_symbols
N = (span + 1) * self.sps + 1
delta = np.zeros(N)
delta[span * self.sps] = 1.0 # impulse at t=0
mf_out = self.filter.apply_matched_filter(delta[None, :])[0]
taps = mf_out[span * self.sps : span * self.sps + L * self.sps : self.sps]
taps /= taps[0] # normalise so h[0]=1
return taps # shape (L,)
# ---- 2. build state book for Viterbi --------------------------------- #
def _enumerate_states(self) -> Tuple[List[Tuple[int, ...]], dict]:
"""
Returns
-------
states : list of tuples of symbol indices (len = M^{L-1})
State #i is a tuple of the (L-1) previous symbol *indices*.
prev_lookup : dict[state_index] list[(prev_state_index, sym_index)]
For fast VA branch generation.
"""
if self.va_mem == 1:
return [()], {0: [(0, m) for m in range(self.M)]}
states = list(itertools.product(range(self.M), repeat=self.va_mem - 1)) # (L-1)-tuple
to_idx = {s: i for i, s in enumerate(states)}
prev_lookup = {i: [] for i in range(len(states))}
for i, s in enumerate(states):
for m in range(self.M):
new_s = (m,) + s[:-1] # push current sym in, drop last
prev_lookup[to_idx[new_s]].append((i, m))
return states, prev_lookup
# ---- 3. Viterbi over real partialresponse channel ------------------- #
def _viterbi_one_burst(self, soft: np.ndarray) -> np.ndarray:
"""
soft : shape (K,) real matched-filter samples for one burst
Returns hard-bit array length = K * M_bits
"""
K = len(soft)
L = self.va_mem
h = self.taps # (L,)
n_states = self.M ** (L - 1) if L > 1 else 1
big = 1e12
metric = np.zeros(n_states) + big
metric[0] = 0.0 # start from “all zeros”
# For traceback
surv_state = np.zeros((K, n_states), dtype=np.int32)
surv_symbol = np.zeros((K, n_states), dtype=np.int32)
const = self.const # symbol amplitudes
for k in range(K):
yk = soft[k]
mnew = np.zeros_like(metric) + big
for s_cur in range(n_states):
for s_prev, sym_idx in self._prev_lookup[s_cur]:
# predicted sample = h0 * a_k + Σ_{i=1}^{L-1} h_i * a_{k-i}
pred = h[0] * const[sym_idx]
if L > 1:
prev_syms = self._states[s_prev]
for d, a_prev_idx in enumerate(prev_syms, 1):
pred += h[d] * const[a_prev_idx]
br_metric = (yk - pred) ** 2
cost = metric[s_prev] + br_metric
if cost < mnew[s_cur]:
mnew[s_cur] = cost
surv_state[k, s_cur] = s_prev
surv_symbol[k, s_cur] = sym_idx
metric = mnew
# ---------- traceback ----------
s_hat = int(np.argmin(metric))
sym_hat = np.zeros(K, dtype=int)
for k in range(K - 1, -1, -1):
sym_hat[k] = surv_symbol[k, s_hat]
s_hat = surv_state[k, s_hat]
# map to bits with your existing SymbolDemapper
sym_amp = np.atleast_2d(const[sym_hat].astype(np.complex128)) # make it complex
return self.demapper(sym_amp)

View File

@ -0,0 +1,140 @@
import warnings
import numpy as np
from scipy.signal import hilbert
from ria_toolkit_oss.signal.block_generator.continuous_modulation.demodulator import (
Demodulator,
)
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.mapping.mapper import Mapper
from ria_toolkit_oss.signal.block_generator.mapping.symbol_demapper import (
SymbolDemapper, # or implement your own
)
from ria_toolkit_oss.signal.block_generator.pulse_shaping.gaussian_filter import (
GaussianFilter,
)
from ria_toolkit_oss.signal.block_generator.pulse_shaping.rect_filter import RectFilter
class CPFSKDemodulator(Demodulator):
"""
A basic CPFSK demodulator that attempts to invert the CPFSKModulator logic:
1) Convert real passband to complex baseband (Hilbert transform + mix down).
2) Unwrap phase and differentiate to estimate instantaneous frequency offset.
3) Match-filter that offset using the same shape (Rect or Gaussian).
4) Sample once per symbol and map back to bits with an inverse of your PAM mapper.
Note: For strongly filtered CPFSK/GFSK, a sequence detector (like Viterbi) is often
required for best performance. This simple approach treats each symbol independently.
"""
def __init__(
self,
num_bits_per_symbol: int,
center_frequency: float,
frequency_spacing: float,
symbol_duration: float,
sampling_frequency: float,
gaussian: bool = False,
):
self.num_bits_per_symbol = num_bits_per_symbol
self.center_frequency = center_frequency
self.frequency_spacing = frequency_spacing
self.symbol_duration = symbol_duration
self.sampling_frequency = sampling_frequency
self.samples_per_symbol = int(round(self.symbol_duration * self.sampling_frequency))
# Use the same filter type/params as the modulator for matched filtering in freq-domain
if gaussian:
self.filter = GaussianFilter(1, upsampling_factor=self.samples_per_symbol, bt=0.3, normalize=False)
else:
self.filter = RectFilter(1, upsampling_factor=self.samples_per_symbol, normalize=False)
self.mapper = Mapper("pam", num_bits_per_symbol, normalize=False)
constellation = self.mapper.get_constellation()
bit_mapping = self.mapper.get_bit_mapping()
self.demapper = SymbolDemapper(constellation, bit_mapping)
@property
def input_type(self) -> DataType:
return DataType.PASSBAND_SIGNAL
@property
def output_type(self) -> DataType:
return DataType.BITS
def mixed_difference_derivative(self, x):
"""
Computes the numerical derivative of multiple 1D signals x,
where x has shape (num_signals, num_samples).
The sampling period is computed as 1 / self.sampling_frequency.
Derivative is returned in the same shape (num_signals, num_samples),
using:
- Forward difference at the first sample
- Central difference for interior samples
- Backward difference at the last sample
"""
dt = 1.0 / self.sampling_frequency
# Expect x to have shape (num_signals, num_samples)
num_signals, num_samples = x.shape
# If not enough samples to take a difference, just return zeros
if num_samples < 2:
return np.zeros_like(x)
# Allocate output array
dx_dt = np.zeros_like(x)
# Forward difference at n=0
# shape: (num_signals,)
dx_dt[:, 0] = (x[:, 1] - x[:, 0]) / dt
# Central difference for n in [1 ... num_samples-2]
# shape: (num_signals, num_samples-2)
dx_dt[:, 1:-1] = (x[:, 2:] - x[:, :-2]) / (2.0 * dt)
# Backward difference at n = num_samples - 1
dx_dt[:, -1] = (x[:, -1] - x[:, -2]) / dt
return dx_dt
def __call__(self, signal: np.ndarray) -> np.ndarray:
"""
:param signal: Real passband CPFSK waveforms, shape (batch_size, total_samples).
:return: Recovered bits, shape (batch_size, num_bits).
"""
batch_size, total_samples = signal.shape
num_symbols = total_samples // self.samples_per_symbol
# Ensure total_samples is multiple of samples_per_symbol
if total_samples % self.samples_per_symbol != 0:
# Just truncate if needed
excess = total_samples % self.samples_per_symbol
signal = signal[:, : total_samples - excess]
total_samples = signal.shape[1]
warnings.warn("Truncated input signal to be multiple of samples_per_symbol.")
# 1) Make an analytic signal along axis=1 (time axis)
analytic = hilbert(signal, axis=1)
# 2) Instantaneous phase in [-pi, +pi]
phase = np.angle(analytic) # shape => (batch_size, total_samples)
# 3) Unwrap in time to remove 2*pi jumps
phase_unwrapped = np.unwrap(phase, axis=1)
# 4) Numerical derivative of phase -> ~ phi'(t)
# Because discrete difference is ~ [phi(n+1)-phi(n)] * fs
diff_phase = np.diff(phase_unwrapped, axis=1) # shape => (batch_size, total_samples-1)
freq_est = (diff_phase * self.sampling_frequency) / (2 * np.pi)
u_est = (freq_est - self.center_frequency) / (self.frequency_spacing / 2)
u_matched = self.filter.apply_matched_filter(u_est) / self.filter.energy
u_matched_ds = u_matched[
:, self.samples_per_symbol : (num_symbols + 1) * self.samples_per_symbol : self.samples_per_symbol
]
bits = self.demapper(u_matched_ds)
return bits

View File

@ -0,0 +1,104 @@
import warnings
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.continuous_modulation.modulator import (
Modulator,
)
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.mapping.mapper import Mapper
from ria_toolkit_oss.signal.block_generator.multirate.upsampling import Upsampling
from ria_toolkit_oss.signal.block_generator.pulse_shaping.gaussian_filter import (
GaussianFilter,
)
from ria_toolkit_oss.signal.block_generator.pulse_shaping.rect_filter import RectFilter
class CPFSKModulator(Modulator):
def __init__(
self,
num_bits_per_symbol: int,
frequency_spacing: float,
symbol_duration: float,
sampling_frequency: float,
gaussian: Optional[bool] = False,
):
# Assert that the frequency spacing and symbol duration are sufficient
# to maintain orthogonality for coherent FSK.
assert frequency_spacing * symbol_duration >= 0.5, (
"For orthogonal coherent FSK, frequency_spacing * symbol_duration must be at least 0.5. "
f"Received frequency_spacing={frequency_spacing} and symbol_duration={symbol_duration}"
)
# Calculate the largest possible carrier frequency from the candidate mapping.
largest_carrier = (2**num_bits_per_symbol - 1) / 2 * frequency_spacing
if sampling_frequency < 2 * largest_carrier:
warnings.warn(
f"Sampling frequency ({sampling_frequency} Hz) is less than twice the largest carrier frequency "
f"({largest_carrier} Hz). This may violate the Nyquist criterion and cause aliasing.",
UserWarning,
)
self.num_bits_per_symbol = num_bits_per_symbol
self.frequency_spacing = frequency_spacing
self.symbol_duration = symbol_duration
self.sampling_frequency = sampling_frequency
self.samples_per_symbol = int(self.sampling_frequency * self.symbol_duration)
if self.samples_per_symbol % 2 == 0:
self.samples_per_symbol += 1
self.pam_mapper = Mapper("pam", num_bits_per_symbol, normalize=False)
self.us = Upsampling(self.samples_per_symbol)
if gaussian:
self.filter = GaussianFilter(3, upsampling_factor=self.samples_per_symbol, bt=0.3, normalize=False)
else:
self.filter = RectFilter(1, upsampling_factor=self.samples_per_symbol, normalize=False)
# self.filter = RootRaisedCosineFilter(
# 1, upsampling_factor=self.samples_per_symbol, beta=0.25, normalize=False)
@property
def input_type(self) -> DataType:
return DataType.BITS
@property
def output_type(self) -> DataType:
return DataType.PASSBAND_SIGNAL
def get_samples(self, num_samples):
raise NotImplementedError
def __call__(self, bits: np.ndarray) -> np.ndarray:
batch_size, num_bits = bits.shape
# Validate bit length
if num_bits % self.num_bits_per_symbol != 0:
raise ValueError(
f"The number of bits per row ({num_bits}) must be a multiple of "
f"num_bits_per_symbol ({self.num_bits_per_symbol})."
)
# 1) Map bits to symbols (e.g., PAM), shape -> (batch_size, num_symbols)
symbols = np.real(self.pam_mapper(bits))
# 2) Upsample each row by 'samples_per_symbol', shape -> (batch_size, num_symbols * samples_per_symbol)
x_upsampled = self.us(symbols)
# 3) Filter (Rect or Gaussian), shape still -> (batch_size, total_samples)
x_shaped = self.filter(x_upsampled)
# For CPFSK, interpret x_shaped as a frequency offset around center_frequency.
# A common convention is to let freq_dev = frequency_spacing / 2 if you want ± frequency_spacing/2 offset,
# but you can also set freq_dev = frequency_spacing if that suits your design.
freq_dev = self.frequency_spacing / 2.0
# Compute the instantaneous frequency for all samples and all batches
freq_inst = freq_dev * x_shaped # shape: (batch_size, total_samples)
# Compute the phase increment per sample and perform a cumulative sum along axis=1 (time axis)
phase = np.cumsum(2 * np.pi * freq_inst / self.sampling_frequency, axis=1)
# Generate the CPFSK waveform by taking the cosine of the phase
total_samples = num_bits // self.num_bits_per_symbol * self.samples_per_symbol
waveform = np.exp(1j * phase)[:, :total_samples]
return waveform

View File

@ -0,0 +1,108 @@
import warnings
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.continuous_modulation.modulator import (
Modulator,
)
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.mapping.mapper import Mapper
from ria_toolkit_oss.signal.block_generator.multirate.upsampling import Upsampling
from ria_toolkit_oss.signal.block_generator.pulse_shaping.gaussian_filter import (
GaussianFilter,
)
from ria_toolkit_oss.signal.block_generator.pulse_shaping.rect_filter import RectFilter
class CPFSKModulator(Modulator):
def __init__(
self,
num_bits_per_symbol: int,
center_frequency: float,
frequency_spacing: float,
symbol_duration: float,
sampling_frequency: float,
gaussian: Optional[bool] = False,
):
# Assert that the frequency spacing and symbol duration are sufficient
# to maintain orthogonality for coherent FSK.
assert frequency_spacing * symbol_duration >= 0.5, (
"For orthogonal coherent FSK, frequency_spacing * symbol_duration must be at least 0.5. "
f"Received frequency_spacing={frequency_spacing} and symbol_duration={symbol_duration}"
)
# Ensure that the lowest frequency (when mapping symbols symmetrically about the center) is positive.
assert center_frequency - ((2**num_bits_per_symbol - 1) / 2) * frequency_spacing > 0, (
f"With center_frequency={center_frequency} Hz, frequency_spacing={frequency_spacing} Hz, "
f"and num_bits_per_symbol={num_bits_per_symbol}, the lowest frequency would be "
f"{center_frequency - ((2**num_bits_per_symbol - 1) / 2) * frequency_spacing} Hz, which must be positive."
)
# Calculate the largest possible carrier frequency from the candidate mapping.
largest_carrier = center_frequency + ((2**num_bits_per_symbol - 1) / 2) * frequency_spacing
if sampling_frequency < 2 * largest_carrier:
warnings.warn(
f"Sampling frequency ({sampling_frequency} Hz) is less than twice the largest carrier frequency "
f"({largest_carrier} Hz). This may violate the Nyquist criterion and cause aliasing.",
UserWarning,
)
self.num_bits_per_symbol = num_bits_per_symbol
self.center_frequency = center_frequency
self.frequency_spacing = frequency_spacing
self.symbol_duration = symbol_duration
self.sampling_frequency = sampling_frequency
self.samples_per_symbol = int(self.sampling_frequency * self.symbol_duration)
self.pam_mapper = Mapper("pam", num_bits_per_symbol, normalize=False)
self.us = Upsampling(self.samples_per_symbol)
if gaussian:
self.filter = GaussianFilter(1, upsampling_factor=self.samples_per_symbol, bt=0.3, normalize=False)
else:
self.filter = RectFilter(1, upsampling_factor=self.samples_per_symbol, normalize=False)
@property
def input_type(self) -> DataType:
return DataType.BITS
@property
def output_type(self) -> DataType:
return DataType.PASSBAND_SIGNAL
def get_samples(self, num_samples):
raise NotImplementedError
def __call__(self, bits: np.ndarray) -> np.ndarray:
batch_size, num_bits = bits.shape
# Validate bit length
if num_bits % self.num_bits_per_symbol != 0:
raise ValueError(
f"The number of bits per row ({num_bits}) must be a multiple of "
f"num_bits_per_symbol ({self.num_bits_per_symbol})."
)
# 1) Map bits to symbols (e.g., PAM), shape -> (batch_size, num_symbols)
symbols = np.real(self.pam_mapper(bits))
# 2) Upsample each row by 'samples_per_symbol', shape -> (batch_size, num_symbols * samples_per_symbol)
x_upsampled = self.us(symbols)
# 3) Filter (Rect or Gaussian), shape still -> (batch_size, total_samples)
x_shaped = self.filter(x_upsampled)
# For CPFSK, interpret x_shaped as a frequency offset around center_frequency.
# A common convention is to let freq_dev = frequency_spacing / 2 if you want ± frequency_spacing/2 offset,
# but you can also set freq_dev = frequency_spacing if that suits your design.
freq_dev = self.frequency_spacing / 2.0
# Compute the instantaneous frequency for all samples and all batches
freq_inst = self.center_frequency + freq_dev * x_shaped # shape: (batch_size, total_samples)
# Compute the phase increment per sample and perform a cumulative sum along axis=1 (time axis)
phase = np.cumsum(2 * np.pi * freq_inst / self.sampling_frequency, axis=1)
# Generate the CPFSK waveform by taking the cosine of the phase
total_samples = num_bits // self.num_bits_per_symbol * self.samples_per_symbol
waveform = np.cos(phase)[:, :total_samples]
return waveform

View File

@ -0,0 +1,11 @@
from abc import ABC, abstractmethod
import numpy as np
from ria_toolkit_oss.signal.block_generator.block import Block
class Demodulator(Block, ABC):
@abstractmethod
def __call__(self, *args, **kwargs) -> np.ndarray:
raise NotImplementedError

View File

@ -0,0 +1,141 @@
import warnings
import numpy as np
from ria_toolkit_oss.signal.block_generator.continuous_modulation.coherent_correlator import (
CoherentCorrelator,
)
from ria_toolkit_oss.signal.block_generator.continuous_modulation.demodulator import (
Demodulator,
)
from ria_toolkit_oss.signal.block_generator.data_types import DataType
class FSKDemodulator(Demodulator):
"""
A coherent FSK demodulator that uses a bank of correlators for symbol detection.
The received baseband signal (assumed to be a 2D array of shape (batch_size, total_samples))
is segmented into symbol intervals. Each correlator processes the signal over each symbol,
returning a decision statistic. For each symbol period, the demodulator selects the candidate
with the maximum absolute correlation output, converts that candidate index into a bit sequence,
and outputs the recovered bit stream.
Parameter constraints:
- frequency_spacing * symbol_duration must be at least 0.5 (for coherent detection).
- The lowest candidate frequency (when mapping symmetrically about center_frequency)
must be positive.
:param num_bits_per_symbol: Number of bits per symbol.
:type num_bits_per_symbol: int
:param frequency_spacing: The frequency spacing (Hz) between adjacent symbols.
Note: Effective frequency offsets are (frequency_spacing/2) times the
mapped odd integers.
:type frequency_spacing: float
:param symbol_duration: The duration (seconds) of one symbol period.
:type symbol_duration: float
:param sampling_frequency: The sampling frequency (Hz) of the received signal.
:type sampling_frequency: float
:raises AssertionError: If frequency_spacing * symbol_duration < 1, or if the lowest candidate frequency
is not positive.
"""
def __init__(
self, num_bits_per_symbol: int, frequency_spacing: float, symbol_duration: float, sampling_frequency: float
):
# Assert that the frequency spacing and symbol duration are sufficient
# to maintain orthogonality for coherent FSK.
assert frequency_spacing * symbol_duration >= 0.5, (
"For orthogonal coherent FSK, frequency_spacing * symbol_duration must be at least 0.5. "
f"Received frequency_spacing={frequency_spacing} and symbol_duration={symbol_duration}"
)
# Calculate the largest possible carrier frequency from the candidate mapping.
largest_carrier = (2**num_bits_per_symbol - 1) / 2 * frequency_spacing
if sampling_frequency < 2 * largest_carrier:
warnings.warn(
f"Sampling frequency ({sampling_frequency} Hz) is less than twice the largest carrier frequency "
f"({largest_carrier} Hz). This may violate the Nyquist criterion and cause aliasing.",
UserWarning,
)
self.num_bits_per_symbol = num_bits_per_symbol
self.frequency_spacing = frequency_spacing
self.symbol_duration = symbol_duration
self.sampling_frequency = sampling_frequency
# Number of candidate symbols.
self.num_candidates = 2**self.num_bits_per_symbol
# Map candidate indices to odd integers:
# For example, if num_candidates=4, candidate_indices = [-3, -1, 1, 3].
self.candidate_indices = 2 * np.arange(self.num_candidates) - (self.num_candidates - 1)
# Compute the candidate carrier frequencies.
self.candidate_frequencies = (self.frequency_spacing / 2) * self.candidate_indices
# Create a bank of correlators for each candidate frequency.
self.correlators = [
CoherentCorrelator(f_c, self.symbol_duration, self.sampling_frequency, False)
for f_c in self.candidate_frequencies
]
@property
def input_type(self) -> DataType:
"""The demodulator expects a passband signal as input."""
return DataType.PASSBAND_SIGNAL
@property
def output_type(self) -> DataType:
"""The demodulator produces a bit stream as output."""
return DataType.BITS
def __call__(self, signal: np.ndarray) -> np.ndarray:
"""
Demodulate the received FSK signal using a bank of coherent correlators.
The received signal is assumed to be a 2D numpy array of shape
(batch_size, total_samples), where total_samples is an integer multiple of the
number of samples per symbol (samples_per_symbol = symbol_duration * sampling_frequency).
For each candidate frequency, the corresponding correlator processes the signal and
returns decision statistics (one per symbol). The demodulator then selects, for each symbol,
the candidate with the maximum absolute correlation value, and converts that candidate index
into its corresponding bit representation.
:param signal: The received passband signal (shape: (batch_size, total_samples)).
:type signal: np.ndarray
:return: A 2D numpy array of shape (batch_size, num_bits), where
num_bits = (total_samples / samples_per_symbol) * num_bits_per_symbol.
:rtype: np.ndarray
:raises ValueError: If total_samples is not an integer multiple of samples_per_symbol.
"""
batch_size, total_samples = signal.shape
samples_per_symbol = int(self.symbol_duration * self.sampling_frequency)
excess_samples = total_samples % samples_per_symbol
if excess_samples != 0:
signal = signal[:, : total_samples - excess_samples]
# Process the signal with each correlator in the bank.
# Each correlator returns an array of shape (batch_size, num_symbols).
stats = [corr(signal) for corr in self.correlators]
# Stack along a new axis: shape (num_candidates, batch_size, num_symbols)
stats = np.stack(stats, axis=0)
# For each symbol (per batch), select the candidate with the maximum absolute correlation.
# decision_indices: shape (batch_size, num_symbols) with values in {0, ..., num_candidates - 1}.
decision_indices = np.argmax(np.abs(stats), axis=0)
# Convert candidate indices to bit sequences.
# Each candidate index is in the range [0, num_candidates - 1] and is represented with num_bits_per_symbol bits
# We convert each decision index into its binary representation.
bits = ((decision_indices[..., None] >> np.arange(self.num_bits_per_symbol - 1, -1, -1)) & 1).astype(np.int32)
# Reshape the bits to produce a bit stream of shape (batch_size, num_symbols * num_bits_per_symbol).
bits = bits.reshape(batch_size, -1)
return bits
def __str__(self) -> str:
"""Return a string representation of the FSKDemodulator."""
return (
f"FSKDemodulator(num_bits_per_symbol={self.num_bits_per_symbol}, "
f"frequency_spacing={self.frequency_spacing}, "
f"symbol_duration={self.symbol_duration}, "
f"sampling_frequency={self.sampling_frequency})"
)

View File

@ -0,0 +1,155 @@
import warnings
import numpy as np
from ria_toolkit_oss.signal.block_generator.continuous_modulation.coherent_correlator import (
CoherentCorrelator,
)
from ria_toolkit_oss.signal.block_generator.continuous_modulation.demodulator import (
Demodulator,
)
from ria_toolkit_oss.signal.block_generator.data_types import DataType
class FSKDemodulator(Demodulator):
"""
A coherent FSK demodulator that uses a bank of correlators for symbol detection.
The received passband signal (assumed to be a 2D array of shape (batch_size, total_samples))
is segmented into symbol intervals. Each correlator processes the signal over each symbol,
returning a decision statistic. For each symbol period, the demodulator selects the candidate
with the maximum absolute correlation output, converts that candidate index into a bit sequence,
and outputs the recovered bit stream.
Parameter constraints:
- frequency_spacing * symbol_duration must be at least 0.5 (for coherent detection).
- The lowest candidate frequency (when mapping symmetrically about center_frequency)
must be positive.
:param num_bits_per_symbol: Number of bits per symbol.
:type num_bits_per_symbol: int
:param center_frequency: The center frequency (Hz) about which the candidate carrier frequencies are distributed.
:type center_frequency: float
:param frequency_spacing: The frequency spacing (Hz) between adjacent symbols.
Note: Effective frequency offsets are (frequency_spacing/2) times the mapped odd integers
:type frequency_spacing: float
:param symbol_duration: The duration (seconds) of one symbol period.
:type symbol_duration: float
:param sampling_frequency: The sampling frequency (Hz) of the received signal.
:type sampling_frequency: float
:param per_symbol: Optional boolean flag. If True, uses per-symbol carrier sampling; if False,
uses a continuous time vector over the whole signal
:raises AssertionError: If frequency_spacing * symbol_duration < 1, or if the lowest candidate frequency is not
positive
"""
def __init__(
self,
num_bits_per_symbol: int,
center_frequency: float,
frequency_spacing: float,
symbol_duration: float,
sampling_frequency: float,
):
# Assert that the frequency spacing and symbol duration are sufficient
# to maintain orthogonality for coherent FSK.
assert frequency_spacing * symbol_duration >= 0.5, (
"For orthogonal coherent FSK, frequency_spacing * symbol_duration must be at least 1. "
f"Received frequency_spacing={frequency_spacing} and symbol_duration={symbol_duration}"
)
# Ensure that the lowest frequency (when mapping symbols symmetrically about the center) is positive.
assert center_frequency - ((2**num_bits_per_symbol - 1) / 2) * frequency_spacing > 0, (
f"With center_frequency={center_frequency} Hz, frequency_spacing={frequency_spacing} Hz, "
f"and num_bits_per_symbol={num_bits_per_symbol}, the lowest candidate frequency would be "
f"{center_frequency - ((2**num_bits_per_symbol - 1) / 2) * frequency_spacing} Hz, which must be positive."
)
# Calculate the largest possible carrier frequency from the candidate mapping.
largest_carrier = center_frequency + ((2**num_bits_per_symbol - 1) / 2) * frequency_spacing
if sampling_frequency < 2 * largest_carrier:
warnings.warn(
f"Sampling frequency ({sampling_frequency} Hz) is less than twice the largest carrier frequency "
f"({largest_carrier} Hz). This may violate the Nyquist criterion and cause aliasing.",
UserWarning,
)
self.num_bits_per_symbol = num_bits_per_symbol
self.center_frequency = center_frequency
self.frequency_spacing = frequency_spacing
self.symbol_duration = symbol_duration
self.sampling_frequency = sampling_frequency
# Number of candidate symbols.
self.num_candidates = 2**self.num_bits_per_symbol
# Map candidate indices to odd integers:
# For example, if num_candidates=4, candidate_indices = [-3, -1, 1, 3].
self.candidate_indices = 2 * np.arange(self.num_candidates) - (self.num_candidates - 1)
# Compute the candidate carrier frequencies.
self.candidate_frequencies = self.center_frequency + (self.frequency_spacing / 2) * self.candidate_indices
# Create a bank of correlators for each candidate frequency.
self.correlators = [
CoherentCorrelator(f_c, self.symbol_duration, self.sampling_frequency, False)
for f_c in self.candidate_frequencies
]
@property
def input_type(self) -> DataType:
"""The demodulator expects a passband signal as input."""
return DataType.PASSBAND_SIGNAL
@property
def output_type(self) -> DataType:
"""The demodulator produces a bit stream as output."""
return DataType.BITS
def __call__(self, signal: np.ndarray) -> np.ndarray:
"""
Demodulate the received FSK signal using a bank of coherent correlators.
The received signal is assumed to be a 2D numpy array of shape
(batch_size, total_samples), where total_samples is an integer multiple of the
number of samples per symbol (samples_per_symbol = symbol_duration * sampling_frequency).
For each candidate frequency, the corresponding correlator processes the signal and
returns decision statistics (one per symbol). The demodulator then selects, for each symbol,
the candidate with the maximum absolute correlation value, and converts that candidate index
into its corresponding bit representation.
:param signal: The received passband signal (shape: (batch_size, total_samples)).
:type signal: np.ndarray
:return: A 2D numpy array of shape (batch_size, num_bits), where
num_bits = (total_samples / samples_per_symbol) * num_bits_per_symbol.
:rtype: np.ndarray
:raises ValueError: If total_samples is not an integer multiple of samples_per_symbol.
"""
batch_size, total_samples = signal.shape
samples_per_symbol = int(self.symbol_duration * self.sampling_frequency)
excess_samples = total_samples % samples_per_symbol
if excess_samples != 0:
signal = signal[:, : total_samples - excess_samples]
# Process the signal with each correlator in the bank.
# Each correlator returns an array of shape (batch_size, num_symbols).
stats = [corr(signal) for corr in self.correlators]
# Stack along a new axis: shape (num_candidates, batch_size, num_symbols)
stats = np.stack(stats, axis=0)
# For each symbol (per batch), select the candidate with the maximum absolute correlation.
# decision_indices: shape (batch_size, num_symbols) with values in {0, ..., num_candidates - 1}.
decision_indices = np.argmax(np.abs(stats), axis=0)
# Convert candidate indices to bit sequences.
# Each candidate index is in the range [0, num_candidates - 1] and is represented with num_bits_per_symbol bits
# We convert each decision index into its binary representation.
bits = ((decision_indices[..., None] >> np.arange(self.num_bits_per_symbol - 1, -1, -1)) & 1).astype(np.int32)
# Reshape the bits to produce a bit stream of shape (batch_size, num_symbols * num_bits_per_symbol).
bits = bits.reshape(batch_size, -1)
return bits
def __str__(self) -> str:
"""Return a string representation of the FSKDemodulator."""
return (
f"FSKDemodulator(num_bits_per_symbol={self.num_bits_per_symbol}, "
f"center_frequency={self.center_frequency}, frequency_spacing={self.frequency_spacing}, "
f"symbol_duration={self.symbol_duration}, sampling_frequency={self.sampling_frequency})"
)

View File

@ -0,0 +1,133 @@
import warnings
import numpy as np
from ria_toolkit_oss.signal.block_generator.continuous_modulation.modulator import (
Modulator,
)
from ria_toolkit_oss.signal.block_generator.data_types import DataType
class FSKModulator(Modulator):
"""
A modulator for Frequency Shift Keying (FSK) signals that converts binary sequences into
baseband waveforms with frequencies mapped symmetrically about a given center frequency.
This design yields carrier frequencies that are symmetrically distributed around the
`fc=0`. A sinusoidal waveform at the corresponding frequency is generated over
the symbol duration, and the complete modulated signal is obtained by concatenating the
waveforms for all symbols.
The modulator also enforces parameter constraints:
- The product of `frequency_spacing` and `symbol_duration` must be at least 0.5 to ensure
sufficient frequency separation for coherent FSK.
- The lowest frequency, when mapping symbols symmetrically about the center, must be positive.
:param num_bits_per_symbol: Number of bits per symbol.
:type num_bits_per_symbol: int
:param frequency_spacing: The frequency spacing (Hz) between adjacent symbols. Effective spacing
is half of this value when using the odd integer mapping.
:type frequency_spacing: float
:param symbol_duration: The duration (seconds) of each symbol.
:type symbol_duration: float
:param sampling_frequency: The sampling frequency (Hz) used to generate the waveform.
:type sampling_frequency: float
:raises AssertionError: If frequency_spacing * symbol_duration is less than 1, or if the
computed lowest frequency is not positive.
"""
def __init__(
self,
num_bits_per_symbol: int,
frequency_spacing: float,
symbol_duration: float,
sampling_frequency: float,
):
# Assert that the frequency spacing and symbol duration are sufficient
# to maintain orthogonality for coherent FSK.
assert frequency_spacing * symbol_duration >= 0.5, (
"For orthogonal discontinuous phase FSK, frequency_spacing * symbol_duration must be at least 0.5. "
f"Received frequency_spacing={frequency_spacing} and symbol_duration={symbol_duration}"
)
# Calculate the largest possible carrier frequency from the candidate mapping.
largest_carrier = ((2**num_bits_per_symbol - 1) / 2) * frequency_spacing
if sampling_frequency < 2 * largest_carrier:
warnings.warn(
f"Sampling frequency ({sampling_frequency} Hz) is less than twice the largest carrier frequency "
f"({largest_carrier} Hz). This may violate the Nyquist criterion and cause aliasing.",
UserWarning,
)
self.num_bits_per_symbol = num_bits_per_symbol
self.frequency_spacing = frequency_spacing
self.symbol_duration = symbol_duration
self.sampling_frequency = sampling_frequency
@property
def input_type(self) -> DataType:
return DataType.BITS
@property
def output_type(self) -> DataType:
return DataType.PASSBAND_SIGNAL
def get_samples(self, num_samples):
raise NotImplementedError
def __call__(self, bits: np.ndarray) -> np.ndarray:
"""
Modulate a batch of binary sequences into FSK waveforms in a vectorized manner.
Each row of the input 2D numpy array is treated as an independent bit stream.
The bits are grouped into symbols of length `num_bits_per_symbol`, converted to integer
symbol indices using MSB-first ordering, and then mapped to odd integer values centered around zero.
These symbol indices are used to compute the carrier frequencies for each symbol as:
frequency = (frequency_spacing / 2) * symbol_indices
A sinusoidal waveform is generated for each symbol over the symbol duration,
and the waveforms for all symbols are concatenated to form the final modulated signal.
:param bits: A 2D numpy array of shape (batch_size, num_bits), where each row is a separate bit stream.
:type bits: np.ndarray
:return: A 2D numpy array of shape (batch_size, total_samples) representing the modulated baseband signal,
where total_samples = (num_bits // num_bits_per_symbol) * (symbol_duration * sampling_frequency).
:rtype: np.ndarray
:raises ValueError: If the number of bits per row is not a multiple of num_bits_per_symbol.
"""
batch_size, num_bits = bits.shape
if num_bits % self.num_bits_per_symbol != 0:
raise ValueError(
f"The number of bits per row ({num_bits}) must be a multiple of "
f"num_bits_per_symbol ({self.num_bits_per_symbol})."
)
# Calculate the number of symbols per bit stream.
num_symbols = num_bits // self.num_bits_per_symbol
# Reshape to (batch_size, num_symbols, num_bits_per_symbol) and convert bits to integers.
bits_reshaped = bits.reshape(batch_size, num_symbols, self.num_bits_per_symbol).astype(np.int32)
# Create a vector of powers for MSB-first conversion: [2^(n-1), ..., 2^0].
powers_of_two = 1 << np.arange(self.num_bits_per_symbol)[::-1]
raw_indices = np.sum(bits_reshaped * powers_of_two, axis=2)
# Map raw indices to odd integers centered about zero.
symbol_indices = 2 * (raw_indices + 1) - 2**self.num_bits_per_symbol - 1
# Map symbols to carrier frequencies.
frequencies = symbol_indices * self.frequency_spacing / 2
# Compute the number of samples per symbol.
samples_per_symbol = int(self.symbol_duration * self.sampling_frequency)
total_samples = num_symbols * samples_per_symbol
# Create a time vector for one symbol period and reshape for broadcasting.
t = np.linspace(0, self.symbol_duration, samples_per_symbol, endpoint=False)[None, None, :]
# Generate the sinusoidal waveform for each symbol in a vectorized manner.
symbol_waveforms = np.exp(2j * np.pi * frequencies[:, :, None] * t)
# Concatenate the symbol waveforms to form the final modulated waveform.
waveform = symbol_waveforms.reshape(batch_size, total_samples)
return waveform

View File

@ -0,0 +1,143 @@
import warnings
import numpy as np
from ria_toolkit_oss.signal.block_generator.continuous_modulation.modulator import (
Modulator,
)
from ria_toolkit_oss.signal.block_generator.data_types import DataType
class FSKModulator(Modulator):
"""
A modulator for Frequency Shift Keying (FSK) signals that converts binary sequences into
passband waveforms with frequencies mapped symmetrically about a given center frequency.
This design yields carrier frequencies that are symmetrically distributed around the
`center_frequency`. A sinusoidal waveform at the corresponding frequency is generated over
the symbol duration, and the complete modulated signal is obtained by concatenating the
waveforms for all symbols.
The modulator also enforces parameter constraints:
- The product of `frequency_spacing` and `symbol_duration` must be at least 0.5 to ensure
sufficient frequency separation for coherent FSK.
- The lowest frequency, when mapping symbols symmetrically about the center, must be positive.
:param num_bits_per_symbol: Number of bits per symbol.
:type num_bits_per_symbol: int
:param center_frequency: The center frequency (Hz) around which the carrier frequencies are distributed.
:type center_frequency: float
:param frequency_spacing: The frequency spacing (Hz) between adjacent symbols. Effective spacing
is half of this value when using the odd integer mapping.
:type frequency_spacing: float
:param symbol_duration: The duration (seconds) of each symbol.
:type symbol_duration: float
:param sampling_frequency: The sampling frequency (Hz) used to generate the waveform.
:type sampling_frequency: float
:raises AssertionError: If frequency_spacing * symbol_duration is less than 1, or if the
computed lowest frequency is not positive.
"""
def __init__(
self,
num_bits_per_symbol: int,
center_frequency: float,
frequency_spacing: float,
symbol_duration: float,
sampling_frequency: float,
):
# Assert that the frequency spacing and symbol duration are sufficient
# to maintain orthogonality for coherent FSK.
assert frequency_spacing * symbol_duration >= 0.5, (
"For orthogonal discontinuous phase FSK, frequency_spacing * symbol_duration must be at least 0.5. "
f"Received frequency_spacing={frequency_spacing} and symbol_duration={symbol_duration}"
)
# Ensure that the lowest frequency (when mapping symbols symmetrically about the center) is positive.
assert center_frequency - ((2**num_bits_per_symbol - 1) / 2) * frequency_spacing > 0, (
f"With center_frequency={center_frequency} Hz, frequency_spacing={frequency_spacing} Hz, "
f"and num_bits_per_symbol={num_bits_per_symbol}, the lowest frequency would be "
f"{center_frequency - ((2**num_bits_per_symbol - 1) / 2) * frequency_spacing} Hz, which must be positive."
)
# Calculate the largest possible carrier frequency from the candidate mapping.
largest_carrier = center_frequency + ((2**num_bits_per_symbol - 1) / 2) * frequency_spacing
if sampling_frequency < 2 * largest_carrier:
warnings.warn(
f"Sampling frequency ({sampling_frequency} Hz) is less than twice the largest carrier frequency "
f"({largest_carrier} Hz). This may violate the Nyquist criterion and cause aliasing.",
UserWarning,
)
self.num_bits_per_symbol = num_bits_per_symbol
self.center_frequency = center_frequency
self.frequency_spacing = frequency_spacing
self.symbol_duration = symbol_duration
self.sampling_frequency = sampling_frequency
@property
def input_type(self) -> DataType:
return DataType.BITS
@property
def output_type(self) -> DataType:
return DataType.PASSBAND_SIGNAL
def get_samples(self, num_samples):
raise NotImplementedError
def __call__(self, bits: np.ndarray) -> np.ndarray:
"""
Modulate a batch of binary sequences into FSK waveforms in a vectorized manner.
Each row of the input 2D numpy array is treated as an independent bit stream.
The bits are grouped into symbols of length `num_bits_per_symbol`, converted to integer
symbol indices using MSB-first ordering, and then mapped to odd integer values centered around zero.
These symbol indices are used to compute the carrier frequencies for each symbol as:
frequency = center_frequency + (frequency_spacing / 2) * symbol_indices
A sinusoidal waveform is generated for each symbol over the symbol duration,
and the waveforms for all symbols are concatenated to form the final modulated signal.
:param bits: A 2D numpy array of shape (batch_size, num_bits), where each row is a separate bit stream.
:type bits: np.ndarray
:return: A 2D numpy array of shape (batch_size, total_samples) representing the modulated passband signal,
where total_samples = (num_bits // num_bits_per_symbol) * (symbol_duration * sampling_frequency).
:rtype: np.ndarray
:raises ValueError: If the number of bits per row is not a multiple of num_bits_per_symbol.
"""
batch_size, num_bits = bits.shape
if num_bits % self.num_bits_per_symbol != 0:
raise ValueError(
f"The number of bits per row ({num_bits}) must be a multiple of "
f"num_bits_per_symbol ({self.num_bits_per_symbol})."
)
# Calculate the number of symbols per bit stream.
num_symbols = num_bits // self.num_bits_per_symbol
# Reshape to (batch_size, num_symbols, num_bits_per_symbol) and convert bits to integers.
bits_reshaped = bits.reshape(batch_size, num_symbols, self.num_bits_per_symbol).astype(np.int32)
# Create a vector of powers for MSB-first conversion: [2^(n-1), ..., 2^0].
powers_of_two = 1 << np.arange(self.num_bits_per_symbol)[::-1]
raw_indices = np.sum(bits_reshaped * powers_of_two, axis=2)
# Map raw indices to odd integers centered about zero.
symbol_indices = 2 * (raw_indices + 1) - 2**self.num_bits_per_symbol - 1
# Map symbols to carrier frequencies.
frequencies = self.center_frequency + (self.frequency_spacing / 2) * symbol_indices
# Compute the number of samples per symbol.
samples_per_symbol = int(self.symbol_duration * self.sampling_frequency)
total_samples = num_symbols * samples_per_symbol
# Create a time vector for one symbol period and reshape for broadcasting.
t = np.linspace(0, self.symbol_duration, samples_per_symbol, endpoint=False)[None, None, :]
# Generate the sinusoidal waveform for each symbol in a vectorized manner.
symbol_waveforms = np.cos(2 * np.pi * frequencies[:, :, None] * t)
# Concatenate the symbol waveforms to form the final modulated waveform.
waveform = symbol_waveforms.reshape(batch_size, total_samples)
return waveform

View File

@ -0,0 +1,11 @@
from abc import ABC, abstractmethod
import numpy as np
from ria_toolkit_oss.signal.block_generator.block import Block
class Modulator(Block, ABC):
@abstractmethod
def __call__(self, *args, **kwargs) -> np.ndarray:
raise NotImplementedError

View File

@ -0,0 +1,34 @@
from enum import IntEnum
class DataType(IntEnum):
"""
Enumeration of different data types used in signal processing.
"""
NONE = 0
"""Represents no input."""
SYMBOLS = 1
"""Represents symbol data."""
SOFT_SYMBOLS = 2
"""Represents soft symbol data."""
UPSAMPLED_SYMBOLS = 3
"""Represents upsampled symbol data."""
BITS = 4
"""Represents bit data."""
SOFT_BITS = 5
"""Represents soft bit data."""
BASEBAND_SIGNAL = 6
"""Represents baseband signal data."""
PASSBAND_SIGNAL = 7
"""Represents passband signal data."""
IQ_COMPONENTS = 8
"""Represents in-phase and quadrature components."""

View File

@ -0,0 +1,4 @@
from .downconversion import FrequencyDownConversion
from .upconversion import FrequencyUpConversion
__all__ = ["FrequencyUpConversion", "FrequencyDownConversion"]

View File

@ -0,0 +1,57 @@
import numpy as np
from ria_toolkit_oss.signal.block_generator.block import Block
from ria_toolkit_oss.signal.block_generator.data_types import DataType
class FrequencyDownConversion(Block):
"""
A class to perform frequency down-conversion on passband signals.
:param carrier_frequency: The carrier frequency in Hz.
:type carrier_frequency: float
:param sampling_rate: The sampling rate of the input signal in Hz.
:type sampling_rate: float
Methods:
--------
__call__(signal: np.ndarray) -> np.ndarray:
Applies frequency down-conversion to the input passband signal.
"""
def __init__(self, carrier_frequency: float, sampling_rate: float):
self.carrier_frequency = carrier_frequency
self.sampling_rate = sampling_rate
@property
def input_type(self) -> DataType:
"""Get the input data type for the frequency down-conversion operation."""
return DataType.PASSBAND_SIGNAL
@property
def output_type(self) -> DataType:
"""Get the output data type for the frequency down-conversion operation."""
return DataType.BASEBAND_SIGNAL
def __call__(self, signal: np.ndarray) -> np.ndarray:
"""
Apply frequency down-conversion to the input passband signal.
:param signal: The input passband signal to be demodulated.
:type signal: np.ndarray
:return: The demodulated baseband signal.
:rtype: np.ndarray
"""
num_samples = signal.shape[1]
t = np.arange(num_samples) / self.sampling_rate
if np.iscomplexobj(signal):
carrier = np.exp(-1j * 2 * np.pi * self.carrier_frequency * t)
else:
carrier = np.cos(2 * np.pi * self.carrier_frequency * t)
return signal * carrier
def __str__(self) -> str:
"""Return a string representation of the FrequencyDownConversion object."""
return (
f"FrequencyDownConversion(carrier_frequency={self.carrier_frequency}, sampling_rate={self.sampling_rate})"
)

View File

@ -0,0 +1,55 @@
import numpy as np
from utils.signal.block_generator.block import Block
from utils.signal.block_generator.data_types import DataType
class FrequencyUpConversion(Block):
"""
A class to perform frequency up-conversion on baseband signals.
:param carrier_frequency: The carrier frequency in Hz.
:type carrier_frequency: float
:param sampling_rate: The sampling rate of the input signal in Hz.
:type sampling_rate: float
Methods:
--------
__call__(signal: np.ndarray) -> np.ndarray:
Applies frequency up-conversion to the input baseband signal.
"""
def __init__(self, carrier_frequency: float, sampling_rate: float):
self.carrier_frequency = carrier_frequency
self.sampling_rate = sampling_rate
@property
def input_type(self) -> DataType:
"""Get the input data type for the frequency up-conversion operation."""
return DataType.BASEBAND_SIGNAL
@property
def output_type(self) -> DataType:
"""Get the output data type for the frequency up-conversion operation."""
return DataType.PASSBAND_SIGNAL
def __call__(self, signal: np.ndarray) -> np.ndarray:
"""
Apply frequency up-conversion to the input baseband signal.
:param signal: The input baseband signal to be modulated.
:type signal: np.ndarray
:return: The modulated passband signal.
:rtype: np.ndarray
"""
num_samples = signal.shape[1]
t = np.arange(num_samples) / self.sampling_rate
if np.iscomplexobj(signal):
carrier = np.exp(1j * 2 * np.pi * self.carrier_frequency * t)
else:
carrier = np.cos(2 * np.pi * self.carrier_frequency * t)
return signal * carrier
def __str__(self) -> str:
"""Return a string representation of the FrequencyUpConversion object."""
return f"FrequencyUpConversion(carrier_frequency={self.carrier_frequency}, sampling_rate={self.sampling_rate})"

View File

@ -0,0 +1,34 @@
"""
RIA Block-Based Signal Generator Module: Generators
This module provides high-level generator wrappers that utilize the RIA block-based signal generator.
These generators simplify the creation of common communication system signals by automatically
configuring and connecting the appropriate blocks.
Key components:
- SignalGenerator: Base class for all generators
- Specialized generators: PAMGenerator, PSKGenerator, QAMGenerator
Features:
- Easy-to-use interfaces for generating complex signals
- Built on top of RIA's modular block system
- Customizable parameters for each generator type
Usage:
- Import specific generators to quickly create signals without manually connecting individual blocks.
- For more control, use the underlying blocks directly.
See individual generator classes for detailed parameters and methods.
"""
from ria_toolkit_oss.signal.block_generator.generators.pam_generator import PAMGenerator
from ria_toolkit_oss.signal.block_generator.generators.psk_generator import PSKGenerator
from ria_toolkit_oss.signal.block_generator.generators.qam_generator import QAMGenerator
from ria_toolkit_oss.signal.block_generator.generators.signal_generator import (
SignalGenerator,
)
__all__ = ["SignalGenerator", "PAMGenerator", "PSKGenerator", "QAMGenerator"]

View File

@ -0,0 +1,55 @@
from ria_toolkit_oss.datatypes.recording import Recording
from ria_toolkit_oss.signal.block_generator.generators.signal_generator import (
SignalGenerator,
)
from ria_toolkit_oss.signal.block_generator.mapping.mapper import Mapper
from ria_toolkit_oss.signal.block_generator.multirate.upsampling import Upsampling
from ria_toolkit_oss.signal.block_generator.pulse_shaping.pulse_shaping_filter import (
PulseShapingFilter,
)
from ria_toolkit_oss.signal.block_generator.source.binary_source import BinarySource
class PAMGenerator(SignalGenerator):
"""
Pulse Amplitude Modulation (PAM) signal generator.
This class generates PAM signals with configurable parameters such as bits per symbol,
upsampling factor, and pulse shaping filter.
:param num_bits_per_symbol: Number of bits per symbol.
:type num_bits_per_symbol: int
:param upsampling_factor: Upsampling factor.
:type upsampling_factor: int
:param pulse_shaping_filter: Pulse shaping filter to be applied.
:type pulse_shaping_filter: PulseShapingFilter
"""
def __init__(self, num_bits_per_symbol: int, upsampling_factor: int, pulse_shaping_filter: PulseShapingFilter):
src = BinarySource()
mapper = Mapper("PAM", num_bits_per_symbol)
us = Upsampling(upsampling_factor)
self.num_bits_per_symbol = num_bits_per_symbol
super().__init__([src, mapper, us, pulse_shaping_filter])
def record(self, batch_size: int = 1, num_bits: int = 1024) -> Recording:
"""
Generate and record PAM signals.
:param batch_size: Number of recordings to generate, defaults to 1.
:type batch_size: int, optional
:param num_bits: Number of bits per recording, defaults to 1024.
:type num_bits: int, optional
:return: A Recording object containing the generated signals and metadata.
:rtype: Recording
"""
x = self.blocks[0](batch_size, num_bits)
for block in self.blocks[1:]:
x = block(x)
metadata = {
"num_recordings": batch_size,
"bits_per_recording": num_bits,
"modulation": f"{2**self.num_bits_per_symbol}PAM",
"pulse_shaping_filter": str(self.blocks[-1]),
}
return Recording(x, metadata)

View File

@ -0,0 +1,55 @@
from ria_toolkit_oss.datatypes.recording import Recording
from ria_toolkit_oss.signal.block_generator.generators.signal_generator import (
SignalGenerator,
)
from ria_toolkit_oss.signal.block_generator.mapping.mapper import Mapper
from ria_toolkit_oss.signal.block_generator.multirate.upsampling import Upsampling
from ria_toolkit_oss.signal.block_generator.pulse_shaping.pulse_shaping_filter import (
PulseShapingFilter,
)
from ria_toolkit_oss.signal.block_generator.source.binary_source import BinarySource
class PSKGenerator(SignalGenerator):
"""
A generator for Phase Shift Keying (PSK) modulated signals.
This class generates PSK signals with configurable parameters such as
bits per symbol, upsampling factor, and pulse shaping filter.
:param num_bits_per_symbol: Number of bits per symbol in the PSK modulation.
:type num_bits_per_symbol: int
:param upsampling_factor: Factor by which to upsample the signal.
:type upsampling_factor: int
:param pulse_shaping_filter: The pulse shaping filter to apply to the signal.
:type pulse_shaping_filter: PulseShapingFilter
"""
def __init__(self, num_bits_per_symbol: int, upsampling_factor: int, pulse_shaping_filter: PulseShapingFilter):
src = BinarySource()
mapper = Mapper("PSK", num_bits_per_symbol)
us = Upsampling(upsampling_factor)
self.num_bits_per_symbol = num_bits_per_symbol
super().__init__([src, mapper, us, pulse_shaping_filter])
def record(self, batch_size: int = 1, num_bits: int = 1024) -> Recording:
"""
Generate and record PSK signals.
:param batch_size: Number of recordings to generate, defaults to 1.
:type batch_size: int, optional
:param num_bits: Number of bits per recording, defaults to 1024.
:type num_bits: int, optional
:return: A Recording object containing the generated signals and metadata.
:rtype: Recording
"""
x = self.blocks[0](batch_size, num_bits)
for block in self.blocks[1:]:
x = block(x)
metadata = {
"num_recordings": batch_size,
"bits_per_recording:": num_bits,
"modulation": f"{2**self.num_bits_per_symbol}PSK",
"pulse_shaping_filter": str(self.blocks[-1]),
}
return Recording(x, metadata)

View File

@ -0,0 +1,55 @@
from ria_toolkit_oss.datatypes.recording import Recording
from ria_toolkit_oss.signal.block_generator.generators.signal_generator import (
SignalGenerator,
)
from ria_toolkit_oss.signal.block_generator.mapping.mapper import Mapper
from ria_toolkit_oss.signal.block_generator.multirate.upsampling import Upsampling
from ria_toolkit_oss.signal.block_generator.pulse_shaping.pulse_shaping_filter import (
PulseShapingFilter,
)
from ria_toolkit_oss.signal.block_generator.source.binary_source import BinarySource
class QAMGenerator(SignalGenerator):
"""
A generator for Quadrature Amplitude Modulation (QAM) signals.
This class generates QAM signals with configurable parameters such as
bits per symbol, upsampling factor, and pulse shaping filter.
:param num_bits_per_symbol: Number of bits per QAM symbol.
:type num_bits_per_symbol: int
:param upsampling_factor: Factor by which to upsample the signal.
:type upsampling_factor: int
:param pulse_shaping_filter: Filter used for pulse shaping.
:type pulse_shaping_filter: PulseShapingFilter
"""
def __init__(self, num_bits_per_symbol: int, upsampling_factor: int, pulse_shaping_filter: PulseShapingFilter):
src = BinarySource()
mapper = Mapper("QAM", num_bits_per_symbol)
us = Upsampling(upsampling_factor)
self.num_bits_per_symbol = num_bits_per_symbol
super().__init__([src, mapper, us, pulse_shaping_filter])
def record(self, batch_size: int = 1, num_bits: int = 1024) -> Recording:
"""
Generate and record QAM signals.
:param batch_size: Number of recordings to generate, defaults to 1.
:type batch_size: int, optional
:param num_bits: Number of bits per recording, defaults to 1024.
:type num_bits: int, optional
:return: A Recording object containing the generated signals and metadata.
:rtype: Recording
"""
x = self.blocks[0](batch_size, num_bits)
for block in self.blocks[1:]:
x = block(x)
metadata = {
"num_recordings": batch_size,
"bits_per_recording": num_bits,
"modulation": f"{2**self.num_bits_per_symbol}QAM",
"pulse_shaping_filter": str(self.blocks[-1]),
}
return Recording(x, metadata)

View File

@ -0,0 +1,36 @@
from abc import ABC
from typing import List
from ria_toolkit_oss.signal.block_generator.block import Block
from ria_toolkit_oss.signal.recordable import Recordable
class SignalGenerator(Recordable, ABC):
"""
An abstract base class for signal generators that work with a sequence of blocks.
This class provides a foundation for creating signal generators that operate on a
series of processing blocks. It ensures type compatibility between consecutive
blocks in the sequence by validating that the output type of each block matches
the input type of the subsequent block.
:param blocks: A list of processing blocks to be used in the signal generation.
:type blocks: List of Blocks
:raises ValueError: If there's a mismatch between block output and input types.
"""
# TODO: Consider exposing 'blocks' through a property, and adding methods for adding to / manipulating the
# block sequence.
def __init__(self, blocks: List[Block]):
self.blocks = blocks
self._validate_block_sequence()
def _validate_block_sequence(self) -> None:
for i in range(len(self.blocks) - 1):
if self.blocks[i].output_type != self.blocks[i + 1].input_type:
raise ValueError(
f"Block {i} output type {self.blocks[i].output_type} does not match "
f"block {i + 1} input type {self.blocks[i + 1].input_type}."
)

View File

@ -0,0 +1,20 @@
import pathlib
from typing import Union
import numpy as np
def file_to_bits(path: str | pathlib.Path) -> np.ndarray:
data = pathlib.Path(path).read_bytes()
bits = np.unpackbits(np.frombuffer(data, dtype=np.uint8))
return bits.astype(np.uint8) # shape (N,)
def bits_to_file(bits: np.ndarray, path: str | pathlib.Path):
bits = bits.astype(np.uint8)[: (len(bits) // 8) * 8] # trim to bytes
data = np.packbits(bits).tobytes()
pathlib.Path(path).write_bytes(data)
def txt_to_str(path: Union[str, pathlib.Path], encoding: str = "utf-8") -> str:
return pathlib.Path(path).read_text(encoding=encoding)

View File

@ -0,0 +1,27 @@
"""
RIA Symbol Mapping and Demapping Module
This module provides blocks for symbol mapping and demapping within the RIA block-based signal generator framework.
Key components:
- Mapper: Maps bits to constellation points for various modulation schemes (e.g., M-QAM, M-PSK, M-PAM)
- SymbolDemapper: Converts soft symbols back to original symbols using maximum likelihood estimation
Features:
- Support for multiple modulation schemes
- Configurable parameters for different constellation sizes
Usage:
- Import Mapper or SymbolDemapper to incorporate into your signal processing chain.
For detailed parameters and methods, see individual class documentation.
"""
from .constellation_mapper import ConstellationMapper
from .mapper import Mapper
from .symbol_demapper import SymbolDemapper
__all__ = ["ConstellationMapper", "Mapper", "SymbolDemapper"]

View File

@ -0,0 +1,74 @@
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.mapping.constellation_mapper import (
ConstellationMapper,
)
class _APSKMapper(ConstellationMapper):
"""
A class to map input bits to Amplitude Phase Shift Keying (APSK) constellation points.
Follows DVB-S2 / DVB-S2X standard structures for rings and radii ratios where applicable,
or generic concentric ring structures.
"""
def __init__(
self, num_bits_per_symbol: int, normalize: Optional[bool] = True, use_gray_code: Optional[bool] = True
):
super().__init__(num_bits_per_symbol, normalize, use_gray_code)
self.constellation = self._generate_constellation()
# Re-generate bit mapping if needed, or assume default
# Note: Base class calls _generate_bit_mapping() which does generic gray/binary
# For APSK, generic gray might not match DVB standards, but is sufficient for synthetic generation.
def _generate_constellation(self) -> np.ndarray:
M = 2**self.num_bits_per_symbol
# Define structures (rings and points per ring)
# Based on common DVB standards
if M == 16: # 16APSK: 4+12
radii = [1.0, 2.57] # R2/R1 ratio approx 2.57 for DVB-S2 16APSK
points = [4, 12]
phase_offsets = [0, 0]
elif M == 32: # 32APSK: 4+12+16
radii = [1.0, 2.53, 4.30]
points = [4, 12, 16]
phase_offsets = [0, 0, 0]
elif M == 64: # 64APSK: 4+12+20+28
radii = [1.0, 2.5, 4.3, 6.0] # Approximate
points = [4, 12, 20, 28]
phase_offsets = [0, 0, 0, 0]
elif M == 128: # 128APSK: 8+16+24+32+48? Or 4+12+28+36+48 (from prototype)
# Proto: 4+12+28+36+48
radii = [1.0, 2.5, 4.0, 5.5, 7.0]
points = [4, 12, 20, 36, 56] # Sum must be 128
# 4+12+20+36+56 = 128
phase_offsets = [0] * 5
elif M == 256: # 256APSK
# Proto: 4+12+28+52+68+92 (Sum=256)
radii = np.linspace(1, 6, 6)
points = [4, 12, 28, 52, 68, 92]
phase_offsets = [0] * 6
else:
# Fallback for other orders: single ring (PSK) or simple multi-ring
# Just use PSK fallback if not specific APSK structure defined
return self._generate_psk_fallback(M)
constellation = []
for r, p, phi in zip(radii, points, phase_offsets):
angles = np.linspace(0, 2 * np.pi, p, endpoint=False) + phi
ring = r * np.exp(1j * angles)
constellation.extend(ring)
constellation = np.array(constellation)
if self.normalize:
return self._normalize(constellation)
return constellation
def _generate_psk_fallback(self, M):
# Fallback to PSK
angles = np.linspace(0, 2 * np.pi, M, endpoint=False)
return np.exp(1j * angles)

View File

@ -0,0 +1,186 @@
import os
from abc import ABC, abstractmethod
from datetime import datetime
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
class ConstellationMapper(ABC):
"""
Abstract base class for mapping input bits to constellation points.
This class provides methods to generate constellation points, map input bits
to constellation points, normalize constellation points, and display a
constellation diagram.
:param num_bits_per_symbol: Number of bits per symbol. To be used by subclasses.
:type num_bits_per_symbol: int
:param normalize: Whether to normalize the constellation points. To be used by subclasses.
:type normalize: bool, optional
:param use_gray_code: Whether to use gray code as constellation points. To be used by subclasses.
:type use_gray_code: bool, optional
Note:
This is an abstract class and should not be instantiated directly.
Subclasses should implement the `_generate_constellation` method.
"""
def __init__(
self, num_bits_per_symbol: int, normalize: Optional[bool] = True, use_gray_code: Optional[bool] = True
):
self.num_bits_per_symbol = num_bits_per_symbol
self.normalize = normalize
self.use_gray_code = use_gray_code
self.constellation = None
self._generate_bit_mapping()
def _generate_bit_mapping(self):
"""Generate bit mapping."""
if self.use_gray_code:
indices = self.gray_code(self.num_bits_per_symbol)
else:
indices = range(2**self.num_bits_per_symbol)
self.bit_mapping = np.array(indices)
@abstractmethod
def _generate_constellation(self) -> np.ndarray:
"""
Generate the constellation points.
This method should be implemented by subclasses.
:raises NotImplementedError: This method must be implemented by subclasses.
"""
raise NotImplementedError
@staticmethod
def gray_code(n: int) -> List[int]:
"""
Generate Gray code for a given number of bits.
:param n: Number of bits
:type n: int
:return: List of Gray-encoded values
:rtype: List of ints
"""
return [i ^ (i >> 1) for i in range(2**n)]
def _reorder_for_gray(self) -> None:
"""
Physically reorder self.constellation so index = Gray-coded decimal index.
If the base class set self.bit_mapping to a Gray code forward map fwd_map
such that fwd_map[d] = g, then we do new_const[g] = old_const[d].
"""
M = len(self.constellation)
old_const = self.constellation.copy()
new_const = np.zeros_like(old_const)
# self.bit_mapping is your forward Gray map array (length M)
# fwd_map[d] = g
fwd_map = self.bit_mapping
for d in range(M):
g = fwd_map[d]
new_const[g] = old_const[d]
self.constellation = new_const
# Once physically reordered, array index i is the Gray-coded decimal i
# So we can simplify to an identity map
self.bit_mapping = np.arange(M)
def __call__(self, bits: np.ndarray) -> np.ndarray:
"""
Map bits to constellation points.
:param bits: Input bits to be mapped. Shape should be (num_batches, num_bits).
:type bits: np.ndarray
:return: Mapped constellation points. Shape will be (num_batches, num_symbols).
:rtype: np.ndarray
:raises ValueError: If the number of input bits is not divisible by the number of bits per symbol.
"""
# Check if the number of input bits is divisible by the number of bits per symbol
if bits.shape[1] % self.num_bits_per_symbol != 0:
raise ValueError(
f"Number of input bits ({bits.shape[1]}) "
f"must be divisible by the number of bits per symbol ({self.num_bits_per_symbol})."
)
# Reshape the input bits to have one row per batch and one column per bit
bits = bits.astype(np.int32).reshape((bits.shape[0], -1, self.num_bits_per_symbol))
decimal_values = np.sum(bits * (1 << np.arange(self.num_bits_per_symbol)[::-1]), axis=2)
# Map symbol indices to constellation points
symbol_indices = self.bit_mapping[decimal_values]
return self.constellation[symbol_indices]
@staticmethod
def _normalize(constellation: np.ndarray) -> np.ndarray:
"""
Normalize the constellation points so that their average energy is 1.
:param constellation: The constellation points to normalize.
:type constellation: np.ndarray
:return: Normalized constellation points.
:rtype: np.ndarray
"""
average_energy = np.mean(np.abs(constellation) ** 2)
return constellation / np.sqrt(average_energy)
def show_constellation(self) -> None:
"""
Display the constellation diagram with bit labels.
"""
real_part, imag_part = np.real(self.constellation), np.imag(self.constellation)
# Determine if it's a PAM constellation
is_pam = np.allclose(imag_part, 0)
fig, ax = plt.subplots(figsize=(10, 10))
ax.scatter(real_part, imag_part, color="b", s=100)
# Add bit labels to each point
if self.num_bits_per_symbol <= 6:
for i, (x, y) in enumerate(zip(real_part, imag_part)):
ax.annotate(
bin(self.bit_mapping[i])[2:].zfill(self.num_bits_per_symbol),
(x, y),
xytext=(5, 5),
textcoords="offset points",
)
# Set axis labels and title
ax.set_xlabel("I (In-Phase)")
ax.set_ylabel("Q (Quadrature)")
ax.set_title(f"{self.__class__.__name__[1:-6]} Constellation Diagram")
# Show grid
ax.grid(True)
# Make the plot square
ax.set_aspect("equal", adjustable="box")
if is_pam:
# For PAM, set y-axis limits to make the constellation visible
y_range = max(abs(np.max(real_part)), abs(np.min(real_part))) * 0.2
ax.set_ylim([-y_range, y_range])
else:
# For non-PAM, set limits based on the constellation points
max_val = max(np.max(np.abs(real_part)), np.max(np.abs(imag_part)))
ax.set_xlim([-max_val * 1.2, max_val * 1.2])
ax.set_ylim([-max_val * 1.2, max_val * 1.2])
# Save the figure
os.makedirs("images", exist_ok=True)
now = datetime.now()
formatted_time = now.strftime("%Y%m%d_%H%M%S")
file_name = f"images/constellation_{self.__class__.__name__}_{formatted_time}.png"
fig.savefig(file_name, dpi=300, bbox_inches="tight")
plt.show()

View File

@ -0,0 +1,64 @@
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.mapping.constellation_mapper import (
ConstellationMapper,
)
class _CrossQAMMapper(ConstellationMapper):
"""
A class to map input bits to Cross-QAM constellation points (Odd-order QAM).
Supports 32QAM (5 bits) and 128QAM (7 bits) by removing corners from larger square constellations.
"""
def __init__(
self, num_bits_per_symbol: int, normalize: Optional[bool] = True, use_gray_code: Optional[bool] = True
):
# Allow odd bits
super().__init__(num_bits_per_symbol, normalize, use_gray_code)
self.constellation = self._generate_constellation()
# Use default bit mapping from base class (integer index -> symbol index)
# For true gray coding on Cross QAM, we'd need a specific lookup table.
# Using generic index mapping for now.
def _generate_constellation(self) -> np.ndarray:
M = 2**self.num_bits_per_symbol
if M == 32:
# 32-QAM: Subset of 6x6 (36 points) - remove 4 corners
# Grid -2.5 to 2.5 (step 1) -> -5, -3, -1, 1, 3, 5 (scaled)
axis = np.array([-5, -3, -1, 1, 3, 5])
xv, yv = np.meshgrid(axis, axis)
points = xv + 1j * yv
points = points.flatten()
# Remove corners: |I| > 3 AND |Q| > 3
# axis ends are +/- 5. Inner are +/- 3, +/- 1.
# Corners are (5,5), (5,-5), (-5,5), (-5,-5)
mask = (np.abs(points.real) > 3) & (np.abs(points.imag) > 3)
constellation = points[~mask]
elif M == 128:
# 128-QAM: Subset of 12x12 (144 points) - remove 16 points (4 from each corner)
# 12x12 grid
# axis length 12. -11, -9, ..., 9, 11
axis = np.arange(-11, 12, 2)
xv, yv = np.meshgrid(axis, axis)
points = xv + 1j * yv
points = points.flatten()
# Remove corners. 144 - 128 = 16 points to remove.
# 4 points per corner.
# Corner region: |I| >= 9 AND |Q| >= 9 (points 9, 11) -> 2x2 = 4 points per corner
# 9,9; 9,11; 11,9; 11,11 (and signs)
mask = (np.abs(points.real) >= 9) & (np.abs(points.imag) >= 9)
constellation = points[~mask]
else:
raise ValueError(f"Unsupported Cross-QAM order: {M}")
if self.normalize:
return self._normalize(constellation)
return constellation

View File

@ -0,0 +1,159 @@
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.mapping.pam_mapper import _PAMMapper
from ria_toolkit_oss.signal.block_generator.mapping.psk_mapper import _PSKMapper
from ria_toolkit_oss.signal.block_generator.mapping.qam_mapper import _QAMMapper
from ria_toolkit_oss.signal.block_generator.process_block import ProcessBlock
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
class Mapper(ProcessBlock, RecordableBlock):
"""
A class to map input bits to constellation points using various modulation schemes.
:param constellation_type: The type of constellation ('PSK', 'QAM', 'PAM').
:type constellation_type: str
:param num_bits_per_symbol: Number of bits per symbol.
:type num_bits_per_symbol: int
:param normalize: Whether to normalize the constellation points, defaults to True.
:type normalize: bool, optional
Methods:
--------
__call__(bits: np.ndarray) -> np.ndarray:
Maps input bits to constellation points.
show_constellation():
Displays the constellation diagram.
Example:
--------
# Create a QAM Mapper
>>> qam_mapper = Mapper('QAM', 4, True)
# Generate some random bits
>>> bits = np.random.randint(0, 2, (10, 8)) # 10 batches of 8 bits each
# Map bits to QAM constellation points
>>> mapped_points = qam_mapper(bits)
# Show the constellation diagram
>>> qam_mapper.show_constellation()
"""
def __init__(
self,
constellation_type: Optional[str] = "psk",
num_bits_per_symbol: Optional[int] = 2,
normalize: Optional[bool] = True,
use_gray_code: Optional[bool] = True,
):
"""
Initialize a mapper block to map bits to constellation symbols.
:param constellation_type: The type of constellation ('PSK', 'QAM', 'PAM').
:type constellation_type: str
:param num_bits_per_symbol: Number of bits per symbol.
:type num_bits_per_symbol: int
:param normalize: Whether to normalize the constellation points, defaults to True.
:type normalize: bool, optional
"""
self.constellation_type = constellation_type
self.num_bits_per_symbol = num_bits_per_symbol
self.normalize = normalize
self.use_gray_code = use_gray_code
self.constellation_mapper = self._create_constellation_mapper()
super().__init__()
@property
def input_type(self) -> DataType:
"""
Get the input data type.
:return: The input data type.
:rtype: DataType
"""
return [DataType.BITS]
@property
def output_type(self) -> DataType:
"""
Get the output data type.
:return: The output data type.
:rtype: DataType
"""
return DataType.SYMBOLS
def _create_constellation_mapper(self):
"""
Factory method to create the appropriate constellation mapper based on the type specified.
:return: An instance of a specific constellation mapper.
:rtype: ConstellationMapper
:raises ValueError: If the constellation type is unsupported.
"""
if self.constellation_type.upper() == "PSK":
return _PSKMapper(self.num_bits_per_symbol, self.normalize, self.use_gray_code)
elif self.constellation_type.upper() == "QAM":
return _QAMMapper(self.num_bits_per_symbol, self.normalize, self.use_gray_code)
elif self.constellation_type.upper() == "PAM":
return _PAMMapper(self.num_bits_per_symbol, self.normalize, self.use_gray_code)
else:
raise ValueError("Unsupported constellation type")
def get_constellation(self) -> np.ndarray:
"""
Get the constellation points.
:return: A numpy array of constellation points.
:rtype: np.ndarray
"""
return self.constellation_mapper.constellation
def get_bit_mapping(self) -> np.ndarray:
"""
Get the bit mapping.
:return: A numpy array of symbol to bit mapping
:rtype: np.ndarray
"""
return self.constellation_mapper.bit_mapping
def get_samples(self, num_samples: int):
"""
Get num_samples samples from this block by recursively requesting samples from upstream blocks.
:param num_samples: The number of samples to output.
:type num_samples: int
Note: If a new block implementation decimates or multiplies the number of samples from upstream blocks
this method must be overridden to implement the correct sample requests from input blocks.
"""
input_signals = [input.get_samples(num_samples * self.num_bits_per_symbol) for input in self.input]
output = self.__call__(samples=input_signals)
if len(output) != num_samples:
raise ValueError(
f"Error in block {self.__class__.__name__}: requested {num_samples} samples but got {len(output)}."
)
return output
def __call__(self, samples):
"""
Convert an array of bits into symbols.
:param samples: A list containing a single array of bits, dtype = float.
:type samples: list of np.array
:returns: Output symbols, dtype = np.complex64.
:rtype: np.array"""
return self.constellation_mapper(np.array([samples[0]]))[0]
def show_constellation(self) -> None:
"""
Display the constellation diagram.
:return: None
"""
self.constellation_mapper.show_constellation()

View File

@ -0,0 +1,46 @@
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.mapping.constellation_mapper import (
ConstellationMapper,
)
class _PAMMapper(ConstellationMapper):
"""
A class to map input bits to Pulse Amplitude Modulation (PAM) constellation points.
:param num_bits_per_symbol: Number of bits per symbol. Must be an even number.
:type num_bits_per_symbol: int
:param normalize: Whether to normalize the constellation points, defaults to True.
:type normalize: bool, optional
:param use_gray_code: Whether to use gray code as constellation points, defaults to True.
:type use_gray_code: bool, optional
:raises ValueError: If num_bits_per_symbol is not an even number.
"""
def __init__(
self, num_bits_per_symbol: int, normalize: Optional[bool] = True, use_gray_code: Optional[bool] = True
):
if num_bits_per_symbol % 2 != 0:
raise ValueError("num_bits_per_symbol must be an even number")
super().__init__(num_bits_per_symbol, normalize, use_gray_code)
self.constellation = self._generate_constellation()
if self.use_gray_code:
self._reorder_for_gray()
def _generate_constellation(self) -> np.ndarray:
"""
Generate the PAM constellation points.
:returns: The PAM constellation points.
:rtype: numpy array
"""
num_pam_symbols = 2**self.num_bits_per_symbol
constellation = np.arange(-num_pam_symbols + 1, num_pam_symbols, 2).astype(np.complex128)
if self.normalize:
return self._normalize(constellation)
return constellation

View File

@ -0,0 +1,49 @@
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.mapping.constellation_mapper import (
ConstellationMapper,
)
class _PSKMapper(ConstellationMapper):
"""
A class to map input bits to Phase Shift Keying (PSK) constellation points.
:param num_bits_per_symbol: Number of bits per symbol. Must be an even number.
:type num_bits_per_symbol: int
:param normalize: Whether to normalize the constellation points, defaults to True.
:type normalize: bool, optional
:param use_gray_code: Whether to use gray code as constellation points, defaults to True.
:type use_gray_code: bool, optional
:raises ValueError: If num_bits_per_symbol is not an even number.
"""
def __init__(
self, num_bits_per_symbol: int, normalize: Optional[bool] = True, use_gray_code: Optional[bool] = True
):
super().__init__(num_bits_per_symbol, normalize, use_gray_code)
self.constellation = self._generate_constellation()
if self.use_gray_code:
self._reorder_for_gray()
def _generate_constellation(self) -> np.ndarray:
"""
Generate the PSK constellation points.
:returns: The PSK constellation points.
:rtype: numpy array
"""
num_symbols = 2**self.num_bits_per_symbol
symbol_indices = np.arange(0, num_symbols) + 1
real_part = np.cos(2 * np.pi * symbol_indices / num_symbols)
image_part = np.sin(2 * np.pi * symbol_indices / num_symbols)
constellation = real_part + 1j * image_part
if self.num_bits_per_symbol == 2:
constellation *= np.exp(1j * np.pi / 4) # rotate 45 degrees
if self.normalize:
return self._normalize(constellation)
return constellation

View File

@ -0,0 +1,119 @@
from typing import Optional, Tuple
import numpy as np
from ria_toolkit_oss.signal.block_generator.mapping.constellation_mapper import (
ConstellationMapper,
)
QAM16_GRAY_CODE = np.array([0, 1, 3, 2, 4, 5, 7, 6, 12, 13, 15, 14, 8, 9, 11, 10])
class _QAMMapper(ConstellationMapper):
"""
A class to map input bits to Quadrature Amplitude Modulation (QAM) constellation points.
:param num_bits_per_symbol: Number of bits per symbol. Must be an even number.
:type num_bits_per_symbol: int
:param normalize: Whether to normalize the constellation points, defaults to True.
:type normalize: bool, optional
:param use_gray_code: Whether to use gray code as constellation points, defaults to True.
:type use_gray_code: bool, optional
:raises ValueError: If num_bits_per_symbol is not an even number.
"""
def __init__(
self, num_bits_per_symbol: int, normalize: Optional[bool] = True, use_gray_code: Optional[bool] = True
):
if num_bits_per_symbol % 2 != 0:
raise ValueError("num_bits_per_symbol must be an even number")
elif num_bits_per_symbol <= 2:
raise ValueError("num_bits_per_symbol must more than two")
super().__init__(num_bits_per_symbol, normalize, False)
self.constellation = self._generate_constellation()
self.use_gray_code = use_gray_code
if self.use_gray_code:
self.constellation, self.bit_mapping, _ = self._generate_gray_code(num_bits_per_symbol)
self._reorder_for_gray()
@staticmethod
def _generate_indexing_scheme(n: int) -> np.ndarray:
# Create an empty n x n matrix to store the result
matrix = np.full((n, n), np.nan)
index = 0
# Fill 1st quadrant (bottom-left), but in reverse (flip up-down)
for col in range(n // 2):
for row in range(n // 2 - 1, -1, -1):
matrix[n // 2 + row, col] = index
index += 1
# Fill 2nd quadrant (top-left)
for col in range(n // 2):
for row in range(n // 2):
matrix[n // 2 - 1 - row, col] = index
index += 1
# Fill 3rd quadrant (top-right)
for col in range(n // 2, n):
for row in range(n // 2):
matrix[n // 2 - 1 - row, col] = index
index += 1
# Fill 4th quadrant (bottom-right), but in reverse (flip up-down)
for col in range(n // 2, n):
for row in range(n // 2 - 1, -1, -1):
matrix[n // 2 + row, col] = index
index += 1
return matrix.astype(np.int32)
def _generate_gray_code(self, num_bits_per_symbol: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Recursively generate Gray code for higher-order QAM constellations. Base case is 16QAM.
:param num_bits_per_symbol: Number of bits for the QAM constellation
:return: Tuple of numpy arrays (constellation, bit_mapping and ref_bit_mapping)
"""
if num_bits_per_symbol == 4:
return self.constellation, QAM16_GRAY_CODE, QAM16_GRAY_CODE
_, _, lower_mod_gray_code = self._generate_gray_code(num_bits_per_symbol - 2)
grid_len = int(np.sqrt(2 ** (num_bits_per_symbol - 2)))
lower_mod_gray_code = np.flipud(lower_mod_gray_code.reshape(grid_len, grid_len).T)
# Generate quadrants
quadrants = [
lower_mod_gray_code,
lower_mod_gray_code + 2 ** (num_bits_per_symbol - 2),
lower_mod_gray_code + 3 * 2 ** (num_bits_per_symbol - 2),
lower_mod_gray_code + 2 ** (num_bits_per_symbol - 1),
]
# Combine quadrants
left_side = np.vstack((np.flipud(quadrants[1]), quadrants[0]))
right_side = np.vstack((np.flipud(np.fliplr(quadrants[2])), np.fliplr(quadrants[3])))
ref_bit_mapping = np.hstack((left_side, right_side)).reshape(-1)
# Apply indexing scheme
indices = self._generate_indexing_scheme(int(np.sqrt(2**num_bits_per_symbol))).reshape(-1)
constellation = self.constellation[indices]
bit_mapping = ref_bit_mapping[indices]
return constellation, bit_mapping, ref_bit_mapping
def _generate_constellation(self) -> np.ndarray:
"""
Generate the QAM constellation points.
:returns: The QAM constellation points.
:rtype: numpy array
"""
num_pam_symbols = 2 ** (self.num_bits_per_symbol // 2)
pam_constellation = np.arange(-num_pam_symbols + 1, num_pam_symbols, 2)
constellation = np.array(np.meshgrid(pam_constellation, pam_constellation)).T.reshape((-1, 2))
constellation = constellation[:, 0] + 1j * constellation[:, 1]
if self.normalize:
return self._normalize(constellation)
return constellation

View File

@ -0,0 +1,141 @@
from typing import Optional
import numpy as np
from scipy.special import logsumexp
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.process_block import ProcessBlock
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
class SymbolDemapper(RecordableBlock, ProcessBlock):
"""
A class to map received symbols back to their most likely symbols from a predefined constellation
using Maximum Likelihood Detection.
:param constellation: The array of constellation points.
:type constellation: np.ndarray
:param no: The noise power spectral density, defaults to 1.
:type no: float, optional
:param prior: The prior probabilities of the constellation points, defaults to None.
:type prior: np.ndarray, optional
:param bits_out: Whether to return bits or symbols, defaults to True.
:type bits_out: bool, optional
Methods:
--------
__call__(rx_symbols: np.ndarray) -> np.ndarray:
Maps received symbols to their nearest constellation points based on the maximum likelihood estimation.
"""
def __init__(
self,
constellation: np.ndarray,
bit_mapping: np.ndarray,
no: Optional[float] = 1e-6,
prior: Optional[np.ndarray] = None,
bits_out: Optional[bool] = True,
llrs_out: Optional[bool] = False,
gray_code: Optional[bool] = False,
):
self.constellation = constellation
self.bits_out = bits_out
self.llrs_out = llrs_out
if gray_code:
self.bit_mapping = np.argsort(bit_mapping)
else:
self.bit_mapping = bit_mapping
if prior is not None:
self.prior = prior
else:
self.prior = np.zeros((len(constellation),))
self.no = no
@property
def input_type(self) -> DataType:
"""
Get the input data type for the SymbolDemapper.
:return: The input data type.
:rtype: DataType
"""
return [DataType.SOFT_SYMBOLS]
@property
def output_type(self) -> DataType:
"""
Get the output data type for the SymbolDemapper.
:return: The output data type.
:rtype: DataType
"""
if self.bits_out:
return DataType.BITS
else:
return DataType.SYMBOLS
def _decimal_to_bits(self, decimal_arr: np.ndarray) -> np.ndarray:
"""
Convert an array of decimal values to their binary representations.
:param decimal_arr: 2D array of decimal values to be converted
:type decimal_arr: numpy array
:return: 2D array of binary representations
:rtype: numpy array
"""
num_bits_per_symbol = int(np.log2(len(self.constellation)))
num_samples, num_symbols = decimal_arr.shape
# Vectorized conversion of decimal to binary
binary_arr = ((decimal_arr[:, :, np.newaxis] & (1 << np.arange(num_bits_per_symbol)[::-1])) > 0).astype(int)
# Reshape to flatten the bits for each sample
return binary_arr.reshape(num_samples, -1)
def get_samples(self, num_samples):
samples = self.input[0].get_samples(num_samples)
return self.process(rx_symbols=samples)
def __call__(self, rx_symbols: np.ndarray) -> np.ndarray:
"""
Maps received symbols to their nearest constellation points based on the maximum likelihood estimation.
:param rx_symbols: The received symbols to be demapped.
:type rx_symbols: np.ndarray
:return: The array of demapped constellation points.
:rtype: numpy array
"""
rx_symbols_extended = np.tile(
rx_symbols.reshape((rx_symbols.shape[0], rx_symbols.shape[1], 1)), (1, 1, len(self.constellation))
)
constellation_extended = self.constellation.reshape((1, 1, -1))
prior_extended = self.prior.reshape((1, 1, -1))
minus_dist = -np.abs(rx_symbols_extended - constellation_extended) ** 2 / self.no + prior_extended
if self.llrs_out:
batches, num_symbols = rx_symbols.shape
bits_per_sym = int(np.log2(len(self.constellation)))
bit_mapping = np.asarray(self.bit_mapping, dtype=np.uint16) # shape (M,)
bit_table = ((bit_mapping[:, None] >> np.arange(bits_per_sym - 1, -1, -1)) & 1).astype(bool)
neg_inf = -1e30
llr = np.empty((batches, num_symbols, bits_per_sym), dtype=np.float32)
for b in range(bits_per_sym):
mask0 = ~bit_table[:, b] # symbols where bit b == 0
mask1 = bit_table[:, b] # symbols where bit b == 1
ll0 = np.where(mask0, minus_dist, neg_inf) # (B,T,M)
ll1 = np.where(mask1, minus_dist, neg_inf)
llr[..., b] = logsumexp(ll0, axis=-1) - logsumexp(ll1, axis=-1)
return llr.reshape(batches, num_symbols * bits_per_sym)
elif self.bits_out:
indices = np.argmax(minus_dist, axis=-1)
return self._decimal_to_bits(self.bit_mapping[indices])
else:
indices = np.argmax(minus_dist, axis=-1)
return self.constellation[indices]

View File

@ -0,0 +1,28 @@
"""
RIA Miscellaneous Signal Processing Blocks Module
This module provides auxiliary blocks for use in signal processing chains within the RIA block-based signal generator
framework.
Key components:
- Downsampling: Reduces the sampling rate of a signal
- Upsampling: Increases the sampling rate of a signal
Features:
- Integration with other RIA blocks
- Configurable parameters for flexible signal manipulation
- Essential utilities for common signal processing tasks
Usage:
- Import specific blocks to incorporate into your signal processing chain.
For detailed parameters and methods, see individual block documentation.
"""
from ria_toolkit_oss.signal.block_generator.multirate.downsampling import Downsampling
from ria_toolkit_oss.signal.block_generator.multirate.upsampling import Upsampling
__all__ = ["Upsampling", "Downsampling"]

View File

@ -0,0 +1,63 @@
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.block import Block
from ria_toolkit_oss.signal.block_generator.data_types import DataType
class Downsampling(Block):
"""
A class to perform downsampling on input signals.
:param factor: The downsampling factor.
:type factor: int
Methods:
__call__(signal: np.ndarray, delay: Optional[int] = 0, num_samples: Optional[int] = -1) -> np.ndarray:
Downsamples the input signal by the specified factor along the given axes.
"""
def __init__(self, factor: int):
self.factor = factor
def __call__(self, signal: np.ndarray, num_samples: Optional[int], delay: Optional[int] = 0) -> np.ndarray:
"""
Downsamples the input signal by the specified factor along the given axes.
:param signal: The input signal to be downsampled.
:type signal: numpy array
:param num_samples: The number of samples to return after downsampling.
:type num_samples: int, optional
:param delay: The delay to start downsampling, defaults to 0.
:type delay: int, optional
:return: The downsampled signal.
:rtype: numpy array
"""
if num_samples:
return signal[:, delay : delay + self.factor * num_samples : self.factor]
else:
return signal[:, delay :: self.factor]
@property
def input_type(self) -> DataType:
"""
Get the input data type for the downsampling operation.
:return: The input data type.
:rtype: DataType
"""
return DataType.BASEBAND_SIGNAL
@property
def output_type(self) -> DataType:
"""
Get the output data type for the downsampling operation.
:return: The output data type.
:rtype: DataType
"""
return DataType.BASEBAND_SIGNAL
def get_samples(self, num_samples):
raise NotImplementedError

View File

@ -0,0 +1,69 @@
import numpy as np
from ria_toolkit_oss.signal.block_generator.block import Block
from ria_toolkit_oss.signal.block_generator.data_types import DataType
class Upsampling(Block):
"""
A class to perform upsampling on input signals.
:param factor: The upsampling factor.
:type factor: int
Methods:
__call__(signal: np.ndarray, axes: int = 0) -> np.ndarray:
Upsamples the input signal by the specified factor along the given axes.
Example:
--------
# Create an Upsampling instance with a factor of 3
>>> upsampler = Upsampling(3)
# Original signal
>>> signal = np.array([[1, 2], [3, 4]])
# Perform upsampling
>>> upsampled_signal = upsampler(signal)
>>> print(upsampled_signal)
array([[1, 0, 0, 2, 0, 0],
[3, 0, 0, 4, 0, 0]])
"""
def __init__(self, factor: int):
self.factor = factor
@property
def input_type(self) -> DataType:
"""Get the input data type for the upsampling operation.
:return: The input data type.
:rtype: DataType
"""
return DataType.SYMBOLS
@property
def output_type(self) -> DataType:
"""Get the output data type for the upsampling operation.
:return: The output data type.
:rtype: DataType
"""
return DataType.UPSAMPLED_SYMBOLS
def get_samples(self, num_samples):
raise NotImplementedError
def __call__(self, signal: np.ndarray) -> np.ndarray:
"""Upsample the input signal by inserting zeros between samples.
:param signal: The input signal to be upsampled. Shape should be (n_samples, n_bits).
:type signal: numpy array
:return: The upsampled signal. Shape will be (n_samples, n_bits * factor).
:rtype: numpy array
"""
n_samples, n_bits = signal.shape
us_signal = np.zeros((n_samples, n_bits * self.factor), dtype=signal.dtype)
us_signal[:, :: self.factor] = signal
return us_signal

View File

@ -0,0 +1,87 @@
from abc import ABC, abstractmethod
import numpy as np
from ria_toolkit_oss.signal.block_generator.block import Block
from ria_toolkit_oss.signal.block_generator.data_types import DataType
class ProcessBlock(Block, ABC):
def __init__(self):
self.input: list[Block] = []
def _validate_input(self, input) -> None:
"""
Validate input block formats.
Must be a list of Block object of the correct length.
:raises ValueError: if block configuration is invalid.
"""
if not isinstance(input, list):
raise ValueError(
f"Block '{self.__class__.__name__}' input must be a list of block objects but was {type(input)}."
)
elif not all(isinstance(item, Block) for item in input):
raise ValueError(
f"Invalid input to block '{self.__class__.__name__}'. \
Expected a list of Block objects but got \
{'[' + ',' .join(f'{item.__class__.__name__}({repr(item)})' for item in input) + ']'}"
)
elif len(input) != len(self.input_type):
raise ValueError(
f"Block '{self.__class__.__name__}' requires {len(self.input_type)} input but got {len(input)}"
)
def connect_input(self, input: list[Block]) -> None:
"""
Declare the input block(s) for this block.
:param input: Input blocks.
:type input: list of Block objects.
"""
self._validate_input(input)
self.input = input
@property
@abstractmethod
def input_type(self) -> list[DataType]:
"""
Get the input data types for the block.
:return: The data type of each input.
:rtype: list[DataType]
"""
pass
@abstractmethod
def __call__(self, samples: list[np.array]):
"""
Process input samples and return output samples.
:param samples: A list of n input arrays, where length and datatypes are defined by block.input_type.
:type samples: list of np.array
:returns: The processed output array, where datatype is defined by block.output_type.
:rtype: np.array"""
pass
def get_samples(self, num_samples: int):
"""
Get num_samples samples from this block by recursively requesting samples from upstream blocks.
:param num_samples: The number of samples to output.
:type num_samples: int
Note: If a new block implementation decimates or multiplies the number of samples from upstream blocks
this method must be overridden to implement the correct sample requests from input blocks.
"""
input_signals = [input.get_samples(num_samples) for input in self.input]
output = self.__call__(samples=input_signals)
if len(output) != num_samples:
raise ValueError(
f"Error in block {self.__class__.__name__}: requested {num_samples} samples but got {len(output)}."
)
return output

View File

@ -0,0 +1,52 @@
"""
A set of blocks to pulse shape a modulated signal.
Pulse shaping is a signal processing technique
used in digital communications to modify the waveform
of transmitted pulses to improve efficiency and reduce
interference.
It helps control the bandwidth of the
transmitted signal and minimizes intersymbol
interference (ISI), which occurs when overlapping
pulses cause errors in symbol detection.
Common filters include Sinc, Raised Cosine and Root Raised Cosine.
Filters are applied to upsampled signal, which consists of
each input symbol followed by n-1 0 samples, where n is the
upsampling factor.
Example Usage:
>>> from ria_toolkit_oss.signal.block_generator import BinarySource, Mapper, Upsampling, RaisedCosineFilter
>>> # create digital modulaiton symbols
>>> source = BinarySource()
>>> mapper = Mapper(constellation_type='psk', num_bits_per_symbol=2)
>>> mapper.connect_input([source])
>>> # pulse shape the symbols
>>> upsampling_factor = 4
>>> upsampler = Upsampling(factor = upsampling_factor)
>>> upsampler.connect_input([mapper])
>>> filter = RaisedCosineFilter(span_in_symbols=100, upsampling_factor=upsampling_factor, beta=0.1)
>>> filter.connect_input([upsampler])
>>> filter.record(num_samples = 10000)
"""
from .gaussian_filter import GaussianFilter
from .pulse_shaping_filter import PulseShapingFilter
from .raised_cosine_filter import RaisedCosineFilter
from .rect_filter import RectFilter
from .root_raised_cosine_filter import RootRaisedCosineFilter
from .sinc_filter import SincFilter
from .upsampling import Upsampling
__all__ = [
"PulseShapingFilter",
"GaussianFilter",
"RaisedCosineFilter",
"RootRaisedCosineFilter",
"RectFilter",
"SincFilter",
"Upsampling",
]

View File

@ -0,0 +1,95 @@
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.pulse_shaping.pulse_shaping_filter import (
PulseShapingFilter,
)
class GaussianFilter(PulseShapingFilter):
r"""
A class to implement the Gaussian filter used in GMSK.
The Gaussian filter impulse response in continuous time can be expressed as:
.. math::
h(t) = \frac{1}{\sqrt{2\pi}\,\sigma} \exp\!\Bigl(-\frac{t^2}{2\,\sigma^2}\Bigr),
where :math:`\sigma` is related to the bandwidth-time product (BT). In many references, one sets
:math:`BT` for the 3 dB bandwidth and the symbol period :math:`T=1`, leading to
.. math::
\sigma = \frac{\sqrt{\ln(2)}}{2\,\pi\,BT}.
For discrete-time implementation, we sample :math:`h(t)` over a finite span in symbols (``span_in_symbols``)
and at ``upsampling_factor`` samples per symbol. If ``normalize=True``, the filter coefficients are normalized
according to the base class's :meth:`_normalize_weights` method (which might be unit-energy or unit-sum, depending
on your implementation).
:param span_in_symbols: The span of the filter in terms of symbols.
:type span_in_symbols: int
:param upsampling_factor: The number of samples per symbol.
:type upsampling_factor: int
:param bt: The bandwidth-time product, a key parameter for Gaussian filters.
:type bt: float
:param normalize: Whether to normalize the filter coefficients, defaults to True.
:type normalize: bool, optional
"""
def __init__(self, span_in_symbols: int, upsampling_factor: int, bt: float, normalize: Optional[bool] = True):
self.bt = bt
# Calculate the total number of taps; ensure it's odd (like in SincFilter).
num_taps = span_in_symbols * upsampling_factor
if num_taps % 2 == 0:
num_taps += 1
# Generate and optionally normalize the filter coefficients
weights = self._generate_weights(num_taps, upsampling_factor)
super().__init__(span_in_symbols, upsampling_factor, weights, normalize)
def _generate_weights(self, num_taps, upsampling_factor) -> np.ndarray:
r"""
Generate the Gaussian filter coefficients for GMSK.
In normalized units (symbol period :math:`T = 1`), we define:
.. math::
\sigma = \frac{\sqrt{\ln(2)}}{2\,\pi\,BT}
and compute the discrete-time Gaussian:
.. math::
h[n] = \frac{1}{\sqrt{2\pi}\,\sigma} \exp\!\Bigl(-\frac{t^2}{2\,\sigma^2}\Bigr),
where :math:`t = \frac{n}{\text{upsampling_factor}}` in the range
:math:`\pm \frac{\text{span_in_symbols}}{2}` symbols.
:return: A 1D numpy array of Gaussian filter taps.
:rtype: np.ndarray
"""
# Define sigma based on the bandwidth-time product (BT)
sigma = np.sqrt(np.log(2)) / (2 * np.pi * self.bt)
# Create a symmetric time axis in "symbol units".
# Example: if num_taps=11, we get n from -5..5, so time from -5/upsamp..+5/upsamp
half = num_taps // 2
n = np.arange(-half, half + 1)
t_axis = n / upsampling_factor # in "symbol durations"
# Compute the Gaussian pulse
gauss = 1.0 / (np.sqrt(2.0 * np.pi) * sigma) * np.exp(-0.5 * (t_axis / sigma) ** 2)
return gauss
def __str__(self) -> str:
"""
Return a string representation of the GaussianFilter object.
:return: A string describing the GaussianFilter with its parameters.
:rtype: str
"""
return (
f"GaussianFilter(span_in_symbols={self.span_in_symbols}, "
f"upsampling_factor={self.upsampling_factor}, bt={self.bt})"
)

View File

@ -0,0 +1,200 @@
import os
from datetime import datetime
from typing import List, Optional, Tuple
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as ss
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.process_block import ProcessBlock
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
class PulseShapingFilter(ProcessBlock, RecordableBlock):
"""
Pulse Shaping Block
Applies a pulse shaping filter to an upsampled signal.
Input Type: UPSAMPLED_SYMBOLS
Output Type: BASEBAND_SIGNAL
:param span_in_symbols: The span of the filter in terms of symbols.
:type span_in_symbols: int
:param upsampling_factor: Number of samples per symbol.
:type upsampling_factor: int
:param weights: The filter coefficients, defaults to None.
:type weights: np.ndarray | None
:param normalize: Whether to normalize the filter coefficients, defaults to True.
:type normalize: bool, optional
"""
def __init__(
self,
span_in_symbols: Optional[int] = 100,
upsampling_factor: Optional[int] = 4,
weights: Optional[np.ndarray] = None,
normalize: Optional[bool] = True,
):
self.span_in_symbols = span_in_symbols
self.upsampling_factor = upsampling_factor
self.weights: Optional[np.ndarray] = weights
self.num_taps: Optional[int] = len(self.weights) if self.weights is not None else None
if normalize:
self._normalize_weights()
super().__init__()
@property
def input_type(self) -> DataType:
"""
Get the input data type for the filter.
:return: The input data type.
:rtype: DataType
"""
return [DataType.UPSAMPLED_SYMBOLS]
@property
def output_type(self) -> DataType:
"""
Get the output data type for the filter.
:return: The output data type.
:rtype: DataType
"""
return DataType.BASEBAND_SIGNAL
def __str__(self) -> str:
"""
Return a string representation of the PulseShapingFilter.
:return: A string describing the filter's parameters.
:rtype: str
"""
return f"CustomFilter(span_in_symbols={self.span_in_symbols}, " f"upsampling_factor={self.upsampling_factor})"
def _normalize_weights(self) -> None:
"""
Normalize the filter weights so that their energy sums to 1.
"""
if self.weights is not None:
self.weights /= np.sqrt(np.sum(np.abs(self.weights) ** 2))
def _pad_signals(self, signal: np.ndarray, padding_axis: int = -1) -> Tuple[np.ndarray, np.ndarray]:
"""
Pad the upsampled signal and weights to the maximum length.
:param signal: The signal to be padded.
:type signal: np.ndarray
:param padding_axis: The axis along which to perform the padding.
:type padding_axis: int
:return: The padded signal and weights as a tuple of numpy arrays.
:rtype: tuple of np.ndarray
"""
# Ensure weights are 1D array
weights = self.weights
# Determine the maximum length for padding
max_len = max(weights.shape[0], signal.shape[1])
# Pad the upsampled signal to the maximum length
if signal.shape[1] < max_len:
pad_width: List[Tuple[int, int]] = [(0, 0)] * signal.ndim
pad_width[padding_axis] = (0, max_len - signal.shape[1])
signal_padded = np.concatenate((signal, np.zeros(pad_width, dtype=signal.dtype)), axis=padding_axis)
else:
signal_padded = signal
# Pad the weights if they are smaller than the signal
if weights.shape[0] < max_len:
weights_padded = np.concatenate((weights, np.zeros(max_len - weights.shape[0], weights.dtype)))
else:
weights_padded = weights
weights_padded = np.tile(weights_padded.reshape((1, -1)), (signal_padded.shape[0], 1))
return signal_padded, weights_padded
def _trim_output(self, signal: np.ndarray, input_length: int) -> np.ndarray:
"""
Trim the output signal to the expected length.
:param signal: The filtered signal.
:type signal: np.ndarray
:param input_length: The length of the input signal.
:type input_length: int
:return: The trimmed signal.
:rtype: np.ndarray
"""
expected_length = input_length + self.num_taps - 1
return signal[..., :expected_length]
def __call__(self, samples):
"""
Apply the filter to an upsampled signal using convolution and trim the output.
:param samples: The signal to be filtered.
:type samples: list of np.array, length = 1
:return: The filtered and trimmed signal.
:rtype: np.array
"""
padding = "full"
upsampled_signal = np.array([samples[0]])
upsampled_signal_padded, weights_padded = self._pad_signals(upsampled_signal, 1)
filtered_signal = ss.fftconvolve(upsampled_signal_padded, weights_padded, mode=padding, axes=-1)
return self._trim_output(filtered_signal, upsampled_signal.shape[-1])[0, : len(samples[0])]
def apply_matched_filter(
self, upsampled_signal: np.ndarray, padding: str = "full", padding_axis: int = 0
) -> np.ndarray:
"""
Apply the matched filter to an upsampled signal using convolution and trim the output.
:param upsampled_signal: The signal to be filtered.
:type upsampled_signal: np.ndarray
:param padding: The type of padding to use, defaults to 'full'. Options are 'full', 'same', 'valid'.
:type padding: str
:param padding_axis: The axis along which to perform the padding, defaults to 0.
:type padding_axis: int
:return: The filtered and trimmed signal.
:rtype: np.ndarray
"""
upsampled_signal_padded, weights_padded = self._pad_signals(upsampled_signal, padding_axis)
filtered_signal = ss.fftconvolve(upsampled_signal_padded, np.conj(weights_padded[::-1]), mode=padding, axes=-1)
return self._trim_output(filtered_signal, upsampled_signal.shape[-1])
def show(self) -> None:
"""
Display the impulse response, phase response, and frequency response of the filter.
"""
fft_size = 4096
phase_response = np.angle(self.weights)
freq_response = np.abs(np.fft.fftshift(np.fft.fft(self.weights, fft_size)))
num_taps = self.num_taps
fig, axs = plt.subplots(figsize=(10, 10), nrows=3, ncols=1)
t_axis = np.linspace(-self.span_in_symbols // 2, self.span_in_symbols // 2, num_taps)
f_axis = np.linspace(-fft_size // 2, fft_size // 2, fft_size)
axs[0].plot(t_axis, self.weights, linewidth=3)
axs[0].set_title("Impulse Response")
axs[0].set_ylabel("Amplitude")
axs[0].set_xlabel(r"Normalized time with respect to symbol duration $T_s$")
axs[1].plot(t_axis, phase_response, linewidth=3)
axs[1].set_title("Phase Response")
axs[1].set_ylabel("Phase")
axs[1].set_xlabel(r"Normalized time with respect to symbol duration $T_s$")
axs[2].plot(f_axis, 10 * np.log10(freq_response), linewidth=3)
axs[2].set_title("Frequency Response")
axs[2].set_ylabel("Magnitude (dB)")
axs[2].set_xlabel("Frequency bins")
plt.tight_layout()
# ToDo: this saving approach needs to change - not sure how yet :D
os.makedirs("images", exist_ok=True)
now = datetime.now()
formatted_time = now.strftime("%Y%m%d_%H%M%S")
file_name = f"images/impulse_response_{formatted_time}.png"
fig.savefig(file_name, dpi=800)
plt.show()

View File

@ -0,0 +1,111 @@
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.pulse_shaping.pulse_shaping_filter import (
PulseShapingFilter,
)
class RaisedCosineFilter(PulseShapingFilter):
r"""
Raised Cosine Filter Block
Applies a raised cosine filter to an upsampled signal.
Input Type: UPSAMPLED_SYMBOLS
Output Type: BASEBAND_SIGNAL
The raised cosine filter is defined by the following equation:
.. math::
h(t) =
\begin{cases}
\frac{\pi}{4T} \text{sinc}\left(\frac{1}{2\beta}\right), & \text { if }t = \pm \frac{T}{2\beta}\\
\frac{1}{T}\text{sinc}\left(\frac{t}{T}\right)\
\frac{\cos\left(\frac{\pi\beta t}{T}\right)}{1-\left(\frac{2\beta t}{T}\right)^2}, & \text{otherwise}
\end{cases}
where :math:`\beta` is the roll-off factor and :math:`T` the symbol duration.
:param span_in_symbols: The span of the filter in terms of symbols.
:type span_in_symbols: int
:param upsampling_factor: The number of samples per symbol.
:type upsampling_factor: int
:param beta: The roll-off factor of the raised cosine filter. Must be between 0 and 1.
:type beta: float
:param normalize: Whether to normalize the filter coefficients, defaults to True.
:type normalize: bool, optional
"""
def __init__(
self,
span_in_symbols: Optional[int] = 100,
upsampling_factor: Optional[int] = 4,
beta: Optional[float] = 0.1,
normalize: Optional[bool] = True,
):
super().__init__(span_in_symbols, upsampling_factor, None, normalize)
assert 0 < beta <= 1, "Beta must be between 0 and 1"
self.beta = beta
num_taps = self.span_in_symbols * self.upsampling_factor
if num_taps % 2 == 0:
num_taps += 1
self.num_taps = num_taps
self.weights = self._generate_weights()
if normalize:
self._normalize_weights()
def _generate_weights(self) -> np.ndarray:
"""
Generate the weights for the raised cosine filter.
:return: The filter coefficients.
:rtype: np.ndarray
"""
num_taps = self.num_taps
half = num_taps // 2
t_axis = np.arange(-half, half + 1)
return self._raised_cosine(t_axis)
def _raised_cosine(self, t: np.ndarray) -> np.ndarray:
"""
Calculate the raised cosine filter coefficients for a given time axis.
This method implements the raised cosine filter equation, including
handling the limit case where t = ±T/(2β).
:param t: The time axis.
:type t: np.ndarray
:return: The raised cosine filter coefficients.
:rtype: np.ndarray
"""
t_symbol = self.upsampling_factor
beta = self.beta
with np.errstate(divide="ignore", invalid="ignore"):
f_val = (
1
/ t_symbol
* np.sinc(t / t_symbol)
* np.cos(np.pi * beta * t / t_symbol)
/ (1 - (2 * beta * t / t_symbol) ** 2)
)
idx_limit_case = np.where(np.abs(np.abs(t) - (t_symbol / (2 * beta))) < 1e-6)[0]
if idx_limit_case.size > 0:
f_val[idx_limit_case] = np.pi / (4 * t_symbol) * np.sinc(1 / (2 * beta))
return f_val
def __str__(self) -> str:
"""
Return a string representation of the RaisedCosineFilter object.
:returns: A string containing the class name and its main parameters.
:rtype: str
"""
return (
f"RaisedCosineFilter(span_in_symbols={self.span_in_symbols}, "
f"upsampling_factor={self.upsampling_factor}, beta={self.beta})"
)

View File

@ -0,0 +1,53 @@
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.pulse_shaping.pulse_shaping_filter import (
PulseShapingFilter,
)
class RectFilter(PulseShapingFilter):
r"""
A class to implement the rectangular (boxcar) filter.
The rectangular filter is defined by a constant amplitude over its span. In discrete time,
this translates to filter coefficients that are all ones (or all some constant). If normalization
is enabled, the base class's :meth:`_normalize_weights` method will apply the chosen normalization
rule (e.g., unit energy or unit sum).
:param span_in_symbols: The span of the filter in terms of symbols.
:type span_in_symbols: int
:param upsampling_factor: The number of samples per symbol.
:type upsampling_factor: int
:param normalize: Whether to normalize the filter coefficients, defaults to True.
:type normalize: bool, optional
"""
def __init__(self, span_in_symbols: int, upsampling_factor: int, normalize: Optional[bool] = True):
# Calculate the total number of taps (ensure it's odd, similar to SincFilter)
num_taps = span_in_symbols * upsampling_factor
if num_taps % 2 == 0:
num_taps += 1
# Generate and optionally normalize the filter coefficients
weights = self._generate_weights(num_taps)
super().__init__(span_in_symbols, upsampling_factor, weights, normalize)
def _generate_weights(self, num_taps) -> np.ndarray:
"""
Generate the weights for the rectangular filter.
:return: A 1D numpy array of ones of length `self.num_taps`.
:rtype: np.ndarray
"""
return np.ones(num_taps)
def __str__(self) -> str:
"""
Return a string representation of the RectFilter object.
:return: A string describing the RectFilter with its parameters.
:rtype: str
"""
return f"RectFilter(span_in_symbols={self.span_in_symbols}, upsampling_factor={self.upsampling_factor})"

View File

@ -0,0 +1,112 @@
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.pulse_shaping.pulse_shaping_filter import (
PulseShapingFilter,
)
class RootRaisedCosineFilter(PulseShapingFilter):
r"""
Root Raised Cosine Filter Block
Applies a root raised cosine filter to an upsampled signal.
Input Type: UPSAMPLED_SYMBOLS
Output Type: BASEBAND_SIGNAL
The root-raised cosine filter is defined by the following equation:
.. math::
h(t) =
\begin{cases}
\frac{1}{T} \left(1 + \beta\left(\frac{4}{\pi}-1\right) \right), & \text{if } t = 0 \\
\frac{\beta}{T\sqrt{2}} \left[ \left(1+\frac{2}{\pi}\right)\sin\left(\frac{\pi}{4\beta}\right) +
\left(1-\frac{2}{\pi}\right)\cos\left(\frac{\pi}{4\beta}\right) \right], & \text{if } t = \pm\frac{T}{4\beta}\\
\frac{1}{T} \frac{\sin\left(\pi\frac{t}{T}(1-\beta)\right) + 4\beta\frac{t}{T}\cos\left(\pi\frac{t}{T}
(1+\beta)\right)}{\pi\frac{t}{T}\left(1-\left(4\beta\frac{t}{T}\right)^2\right)}, & \text{otherwise}
\end{cases}
where :math:`\beta` is the roll-off factor and :math:`T` the symbol duration.
:param span_in_symbols: The span of the filter in terms of symbols.
:type span_in_symbols: int
:param upsampling_factor: The number of samples per symbol.
:type upsampling_factor: int
:param beta: The roll-off factor of the raised cosine filter. Must be between 0 and 1.
:type beta: float
:param normalize: Whether to normalize the filter coefficients, defaults to True.
:type normalize: bool, optional
"""
def __init__(
self,
span_in_symbols: Optional[int] = 100,
upsampling_factor: Optional[int] = 4,
beta: Optional[float] = 0.1,
normalize: Optional[bool] = True,
):
super().__init__(span_in_symbols, upsampling_factor, None, normalize)
assert 0 < beta <= 1, "Beta must be between 0 and 1"
self.beta = beta
num_taps = self.span_in_symbols * self.upsampling_factor
if num_taps % 2 == 0:
num_taps += 1
self.num_taps = num_taps
self.weights = self._generate_weights()
if normalize:
self._normalize_weights()
def _generate_weights(self) -> np.ndarray:
"""
Generate the weights for the root raised cosine filter.
:return: The filter coefficients.
:rtype: np.ndarray
"""
num_taps = self.num_taps
half = num_taps // 2
t_axis = np.arange(-half, half + 1)
return self._root_raised_cosine(t_axis)
def _root_raised_cosine(self, t: np.ndarray) -> np.ndarray:
"""
Calculate the root raised cosine filter coefficients for a given time axis.
:param t: The time axis.
:type t: np.ndarray
:return: The root raised cosine filter coefficients.
:rtype: np.ndarray
"""
beta = self.beta
t_symbol = self.upsampling_factor
alpha = 4 * beta * t / t_symbol
t[t == 0] = 1e9
with np.errstate(divide="ignore", invalid="ignore"):
f_val = (np.sin(np.pi * t / t_symbol * (1 - beta)) + alpha * np.cos(np.pi * t / t_symbol * (1 + beta))) / (
np.pi * t * (1 - alpha**2)
)
f_val[t == 1e9] = (1 + beta * (4 / np.pi - 1)) / t_symbol
idx_limit_case = np.where(np.abs(np.abs(t) - (t_symbol / (4 * beta))) < 1e-6)[0]
if idx_limit_case.size > 0:
f_val[idx_limit_case] = (beta / t_symbol / np.sqrt(2)) * (
(1 + 2 / np.pi) * np.sin(np.pi / 4 / beta) + (1 - 2 / np.pi) * np.cos(np.pi / 4 / beta)
)
return f_val
def __str__(self) -> str:
"""
Return a string representation of the RootRaisedCosineFilter object.
:return: A string describing the filter's parameters.
:rtype: str
"""
return (
f"RootRaisedCosineFilter(span_in_symbols={self.span_in_symbols}, "
f"upsampling_factor={self.upsampling_factor}, beta={self.beta})"
)

View File

@ -0,0 +1,73 @@
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.pulse_shaping.pulse_shaping_filter import (
PulseShapingFilter,
)
class SincFilter(PulseShapingFilter):
r"""
Sinc Filter Block
Apply a sinc filter to an upsampled signal.
Input Type: UPSAMPLED_SYMBOLS
Output Type: BASEBAND_SIGNAL
The sinc filter is defined by the following equation:
.. math::
h(t) = \frac{1}{T}\text{sinc}\left(\frac{t}{T}\right)
where :math:`T` the symbol duration.
:param span_in_symbols: The span of the filter in terms of symbols.
:type span_in_symbols: int
:param upsampling_factor: The number of samples per symbol.
:type upsampling_factor: int
:param normalize: Whether to normalize the filter coefficients, defaults to True.
:type normalize: bool, optional
"""
def __init__(
self,
span_in_symbols: Optional[int] = 100,
upsampling_factor: Optional[int] = 4,
normalize: Optional[bool] = True,
):
super().__init__(span_in_symbols, upsampling_factor, None, normalize)
num_taps = self.span_in_symbols * self.upsampling_factor
if num_taps % 2 == 0:
num_taps += 1
self.num_taps = num_taps
self.weights = self._generate_weights()
if normalize:
self._normalize_weights()
def _generate_weights(self) -> np.ndarray:
"""
Generate the weights for the sinc filter.
:return: The filter coefficients.
:rtype: np.ndarray
"""
num_taps = self.num_taps
t_symbol = self.upsampling_factor
half = num_taps // 2
n = np.arange(-half, half + 1)
t_axis = n / t_symbol
return np.sinc(t_axis)
def __str__(self) -> str:
"""
Return a string representation of the SincFilter object.
:return: A string describing the SincFilter with its parameters.
:rtype: str
"""
return f"SincFilter(span_in_symbols={self.span_in_symbols}, " f"upsampling_factor={self.upsampling_factor})"

View File

@ -0,0 +1,75 @@
import math
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.process_block import ProcessBlock
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
class Upsampling(ProcessBlock, RecordableBlock):
"""
Upsampling Block
Upsample the input signal. This means that each input symbol will be followed by n-1 0 samples,
where n is the upsampling factor. This process is performed before a pulse shaping filter to convert
symbols into IQ samples. Ensure that the upsampling factor of both the upsampler and the filter are the same.
For example, if factor = 4:
Input = [1,1,1,1]
Output = [1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0]
Input Type: SYMBOLS
Output Type: UPSAMPLED_SYMBOLS
:param factor: The upsampling factor.
:type factor: int
"""
def __init__(self, factor: Optional[int] = 4):
self.factor = factor
@property
def input_type(self) -> DataType:
"""Get the input data type for the upsampling operation.
:return: The input data type.
:rtype: DataType
"""
return [DataType.SYMBOLS]
@property
def output_type(self) -> DataType:
"""Get the output data type for the upsampling operation.
:return: The output data type.
:rtype: DataType
"""
return DataType.UPSAMPLED_SYMBOLS
def get_samples(self, num_samples) -> np.ndarray:
"""Upsample the input signal by inserting zeros between samples.
:param signal: The input signal to be upsampled. Shape should be (n_samples, n_bits).
:type signal: numpy array
:return: The upsampled signal. Shape will be (n_samples, n_bits * factor).
:rtype: numpy array
"""
return self.__call__([self.input[0].get_samples(int(math.ceil(num_samples / self.factor)))[:num_samples]])
def __call__(self, samples):
"""
Upsample an array of complex samples.
:param samples: A list containing a single array of complex samples.
:type samples: list of np.array
:returns: Processed samples.
:rtype: np.array"""
signal = samples[0]
us_signal = np.zeros(len(signal) * self.factor, dtype=signal.dtype)
us_signal[:: self.factor] = signal
return us_signal

View File

@ -0,0 +1,30 @@
from ria_toolkit_oss.datatypes import Recording
from ria_toolkit_oss.signal import Recordable
from ria_toolkit_oss.signal.block_generator.block import Block
class RecordableBlock(Block, Recordable):
def record(self, num_samples: int) -> Recording:
"""
Create a Recording object (samples and metadata), num_samples long,
generated by this block and all connected input blocks.
Metadata includes all object parameters of all connected blocks.
:param num_samples: The number of samples to record.
:type num_samples: int
:returns: A recording object.
:rtype: :ref:`Recording <ria_toolkit_oss.data.Recording>`
:raises ValueError: If input blocks have incompatible output and input datatypes.
:raises ValueError: If the number of samples is incorrect."""
samples = self.get_samples(num_samples)
if len(samples) != num_samples:
raise ValueError(
f"Error in block {self.__class__.__name__} record(). \
Requested {num_samples} samples but got {len(samples)}"
)
metadata = self._get_metadata()
return Recording(data=samples, metadata=metadata)
# TODO enforce output type = IQ_SAMPLES

View File

@ -0,0 +1,141 @@
import os
from datetime import datetime
import click
import numpy as np
from ria_toolkit_oss.datatypes.recording import Recording
from ria_toolkit_oss.signal.block_generator.mapping.mapper import Mapper
from ria_toolkit_oss.signal.block_generator.multirate.upsampling import Upsampling
from ria_toolkit_oss.signal.block_generator.pulse_shaping.raised_cosine_filter import (
RaisedCosineFilter,
)
from ria_toolkit_oss.signal.block_generator.pulse_shaping.root_raised_cosine_filter import (
RootRaisedCosineFilter,
)
from ria_toolkit_oss.signal.block_generator.pulse_shaping.sinc_filter import SincFilter
from ria_toolkit_oss.signal.block_generator.siso_channel.awgn_channel import AWGNChannel
from ria_toolkit_oss.signal.block_generator.siso_channel.flat_rayleigh import (
FlatRayleigh,
)
@click.command()
@click.option("--num_samples", default=10, help="Number of samples.")
@click.option("--num_bits", default=40096, help="Number of bits.")
@click.option("--num_bits_per_symbol", default=4, help="Number of bits per symbol.")
@click.option("--modulation_list", multiple=True, default=["QAM", "PSK", "PAM"], help="List of modulation schemes.")
@click.option(
"--filter_type", default="RRC", type=click.Choice(["SINC", "RC", "RRC"], case_sensitive=False), help="Filter type."
)
@click.option("--span_in_symbols", default=6, help="Span in symbols.")
@click.option("--samples_per_symbol", default=8, help="Samples per symbol.")
@click.option("--beta", default=0.25, help="Roll-off factor for RC and RRC filters.")
@click.option(
"--channel_type",
default="Rayleigh",
type=click.Choice(["Rayleigh", "AWGN"], case_sensitive=False),
help="Channel type.",
)
@click.option("--path_gain", default=0, help="Path gain in dB for Rayleigh channel.")
@click.option("--noise_power", multiple=True, default=[1e-5, 1e-4, 1e-3], help="Noise power for the AWGN channel.")
@click.option("--verbose", is_flag=True, help="Enable verbose output.")
def generate_signal(
num_samples,
num_bits,
num_bits_per_symbol,
modulation_list,
filter_type,
span_in_symbols,
samples_per_symbol,
beta,
channel_type,
path_gain,
noise_power,
verbose,
):
now = datetime.now()
formatted_time = now.strftime("%Y%m%d_%H%M%S")
os.makedirs("recordings", exist_ok=True)
recordings_dir_name = os.path.join("recordings", f"recording_set_{formatted_time}")
os.makedirs(recordings_dir_name)
if verbose:
click.echo(f"Output directory: {recordings_dir_name}")
click.echo("Starting signal generation...")
for modulation in modulation_list:
if verbose:
click.echo(f"Processing modulation: {modulation}")
f = _choose_filter(filter_type, span_in_symbols, samples_per_symbol, beta)
us = Upsampling(samples_per_symbol)
if modulation in ["QAM", "PSK", "PAM"]:
mapper = Mapper(modulation, num_bits_per_symbol, normalize=True)
else:
raise ValueError("modulation must be QAM, PSK or PAM")
if channel_type == "Rayleigh":
chan = FlatRayleigh(path_gain)
rx_noise = AWGNChannel()
elif channel_type == "AWGN":
chan = None
rx_noise = AWGNChannel()
else:
raise ValueError("channel_type must be Rayleigh or AWGN")
for no in noise_power:
if verbose:
click.echo(f" Noise power: {np.round(10 * np.log10(no * 1000), 2)} dBm")
metadata = {
"modulation": modulation,
"channel_type": channel_type,
"noise_power": no,
"filter_type": filter_type,
"span_in_symbols": span_in_symbols,
"samples_per_symbol": samples_per_symbol,
"roll_off_factor": beta,
}
if chan:
metadata["path_gain_db"] = path_gain
rx_noise.var = no
bits = np.random.randint(0, 2, (num_samples, num_bits))
symbols = mapper(bits)
sig = f(us(symbols))
if chan:
sig_chan = rx_noise(chan(sig))
else:
sig_chan = rx_noise(sig)
total_samples_generated = 0
for i, sig_chan_sample in enumerate(sig_chan):
now = datetime.now()
formatted_time = now.strftime("%Y%m%d_%H%M%S")
file_name = f"{modulation}_{channel_type}_{filter_type}_{formatted_time}_{i}"
recording = Recording(sig_chan_sample, metadata=metadata)
recording.to_npy(filename=file_name, path=recordings_dir_name)
total_samples_generated += 1
if verbose:
click.echo(f"Generated {total_samples_generated} recordings for {modulation} modulation.")
def _choose_filter(filter_type, span_in_symbols, samples_per_symbol, beta):
if filter_type == "RRC":
return RootRaisedCosineFilter(span_in_symbols, samples_per_symbol, beta)
elif filter_type == "RC":
return RaisedCosineFilter(span_in_symbols, samples_per_symbol, beta)
elif filter_type == "SINC":
return SincFilter(span_in_symbols, samples_per_symbol)
else:
raise ValueError("filter_type must be RRC or RC or Sinc")
if __name__ == "__main__":
generate_signal()

View File

@ -0,0 +1,30 @@
"""
RIA Block-Based Signal Generator Module
This module provides a flexible framework for simulating communication systems using configurable blocks. It includes:
- Various block types: filters, mappers, modulators, demodulators, and channels
- Easy-to-use classes for creating custom signal processing chains
- Pre-configured generators for common use cases
Key features:
- Modular design for building complex systems
- Customizable block parameters
- Ready-to-use generators for quick prototyping
Usage:
1. Import desired blocks
2. Configure block parameters
3. Connect blocks to create a processing chain
4. Run simulations with custom or provided input signals
For detailed examples and API reference, see the documentation.
"""
from .awgn_channel import AWGNChannel
from .flat_rayleigh import FlatRayleigh
from .siso_channel import SISOChannel
__all__ = [AWGNChannel, FlatRayleigh, SISOChannel]

View File

@ -0,0 +1,61 @@
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.siso_channel.siso_channel import SISOChannel
class AWGNChannel(SISOChannel):
"""
Additive White Gaussian Noise (AWGN) channel class.
:param var: The noise variance.
:type var: float
Methods:
--------
__call__(signal: np.ndarray) -> np.ndarray:
Adds AWGN to the input signal.
"""
def __init__(self, var: Optional[float] = 0):
self._var = var
self.rng = np.random.default_rng()
@property
def var(self) -> float:
"""Get the noise variance."""
return self._var
@var.setter
def var(self, var: float) -> None:
"""Set the noise variance."""
self._var = var
def __call__(self, samples: list[np.ndarray]) -> np.ndarray:
"""
Add AWGN to the input signal.
:param samples: The input signal to be processed as a list containing a single numpy array.
:type samples: list[numpy array]
:returns: The output signal with added noise.
:rtype: numpy array
Example:
--------
# Create an AWGN channel with variance 0.1
awgn_channel = AWGN(0.1)
# Original signal
signal = np.array([1+1j, 2+2j, 3+3j])
# Pass the signal through the AWGN channel
noisy_signal = awgn_channel(signal)
print(noisy_signal)
"""
signal = samples[0]
noise = np.sqrt(self._var / 2) * (
self.rng.standard_normal(signal.shape) + 1j * self.rng.standard_normal(signal.shape)
)
return signal + noise

View File

@ -0,0 +1,41 @@
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.siso_channel.siso_channel import SISOChannel
class FlatRayleigh(SISOChannel):
"""
Flat Rayleigh Fading Channel Block
:param path_gain_db: The path gain in decibels, defaults to 0.
:type path_gain_db: float, optional
Methods:
--------
__call__(signal: np.ndarray) -> np.ndarray:
Applies the flat Rayleigh fading effect to the input signal.
"""
def __init__(self, path_gain_db: Optional[float] = 0):
self.path_gain_db = path_gain_db
self.rng = np.random.default_rng()
def __call__(self, samples: list[np.array]) -> np.ndarray:
"""
Applies the flat Rayleigh fading effect to the input signal.
:param samples: The input signal to be processed, as a list containing 1 numpy array.
:type samples: numpy array
:return: The signal after being affected by the flat Rayleigh fading.
:rtype: numpy array
"""
signal = np.array(samples)
num_signals, sig_len = signal.shape
path_gain = 10 ** (self.path_gain_db / 10)
h = np.sqrt(path_gain / 2) * (
self.rng.standard_normal((num_signals, 1)) + 1j * self.rng.standard_normal((num_signals, 1))
)
output = h * signal
return output[0]

View File

@ -0,0 +1,54 @@
from abc import abstractmethod
import numpy as np
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.process_block import ProcessBlock
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
class SISOChannel(ProcessBlock, RecordableBlock):
"""
Abstract base class for Single-Input Single-Output (SISO) communication channels.
Methods:
--------
__call__(signal: np.ndarray) -> np.ndarray:
Apply the channel effect to the input signal.
"""
def __init__(self, input):
super().__init__(input=input)
@property
def input_type(self) -> DataType:
"""
Get the input data type for the SISO channel.
:return: The input data type.
:rtype: DataType
"""
return [DataType.BASEBAND_SIGNAL]
@property
def output_type(self) -> DataType:
"""
Get the output data type for the SISO channel.
:return: The output data type.
:rtype: DataType
"""
return DataType.BASEBAND_SIGNAL
@abstractmethod
def __call__(self, signal: np.ndarray) -> np.ndarray:
"""
Apply the channel effect to the input signal.
:param signal: The input signal to be processed by the channel.
:type signal: numpy array
:returns: The output signal after applying the channel effect.
:rtype: numpy array
"""
raise NotImplementedError

View File

@ -0,0 +1,19 @@
from .awgn_source import AWGNSource
from .binary_source import BinarySource
from .constant_source import ConstantSource
from .lfm_chirp_source import LFMChirpSource
from .recording_source import RecordingSource
from .sawtooth_source import SawtoothSource
from .sine_source import SineSource
from .square_source import SquareSource
__all__ = [
"AWGNSource",
"ConstantSource",
"LFMChirpSource",
"BinarySource",
"RecordingSource",
"SawtoothSource",
"SineSource",
"SquareSource",
]

View File

@ -0,0 +1,47 @@
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
from ria_toolkit_oss.signal.block_generator.source_block import SourceBlock
class AWGNSource(SourceBlock, RecordableBlock):
"""
AWGN Block
Produces Additive White Gaussian Noise (AWGN) samples.
Output Type: BASEBAND_SIGNAL
:param variance: The variance of the AWGN.
:type variance: float
"""
def __init__(self, variance: Optional[float] = 1):
self.input = []
self.variance = variance
pass
@property
def input_type(self):
return [DataType.NONE]
@property
def output_type(self):
return DataType.BASEBAND_SIGNAL
def __call__(self, num_samples: int):
"""
Create an array of complex noise samples.
:param num_samples: The number of samples to return.
:type num_samples: int
:returns: Output samples.
:rtype: np.array
"""
real = np.random.normal(loc=0, scale=np.sqrt(self.variance), size=num_samples)
imag = 1j * np.random.normal(loc=0, scale=np.sqrt(self.variance), size=num_samples)
return np.array(real + imag)

View File

@ -0,0 +1,90 @@
from pathlib import Path
from typing import Literal, Optional, Union
import numpy as np
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.source_block import SourceBlock
class BinarySource(SourceBlock):
"""
Generates bit sequences either randomly or from a file's raw bytes.
- Random mode (default): uses `p` as the probability of generating a 0.
- File mode: if `file_path` is passed to __call__, the file is read as BYTES
and converted to bits using numpy.unpackbits (no assumption of '0'/'1' chars).
Args:
p: Probability of outputting 0 in random mode (0..1).
rng: Optional numpy Generator to control randomness.
"""
def __init__(self, p: float = 0.5, rng: Optional[np.random.Generator] = None):
self.p = float(p)
self.rng = rng if rng is not None else np.random.default_rng()
@property
def input_type(self) -> DataType:
return [DataType.NONE]
@property
def output_type(self) -> DataType:
return DataType.BITS
def __call__(
self,
num_samples: int = 1,
num_bits: Optional[int] = None,
file_path: Optional[Union[str, Path]] = None,
*,
cycle: bool = True,
bitorder: Literal["big", "little"] = "big",
) -> np.ndarray:
"""
Generate binary sequences.
Args:
num_samples: number of sequences (rows).
num_bits: bits per sequence (columns).
file_path: optional path to a file; if provided, read BYTES and convert to bits.
cycle: if True and requested bits exceed available, repeat from start.
bitorder: 'big' (MSB-first) or 'little' (LSB-first) for byte-to-bits conversion.
Returns:
Array shape (num_samples, num_bits), dtype float32 with values {0.0, 1.0}.
"""
if file_path is None:
# Random mode: 0 with prob p, 1 with prob (1-p)
if num_bits:
return (self.rng.random((num_samples, num_bits)) > self.p).astype(np.float32)
else:
return (self.rng.random((num_samples)) > self.p).astype(np.float32)
# File mode: read raw bytes and unpack to bits
path = Path(file_path)
if not path.exists():
raise FileNotFoundError(f"File not found: {path}")
data = path.read_bytes()
if not data:
raise ValueError(f"File is empty: {path}")
# Convert bytes -> bits (uint8 -> 8 bits each)
byte_arr = np.frombuffer(data, dtype=np.uint8)
bits_u8 = np.unpackbits(byte_arr, bitorder=bitorder)
file_bits = bits_u8.astype(np.float32) # {0., 1.}
total_bits = num_samples * num_bits
if total_bits > file_bits.size:
if not cycle:
raise ValueError(
f"Requested {total_bits} bits, but file provides {file_bits.size}. "
f"Set cycle=True (default) to repeat."
)
reps = int(np.ceil(total_bits / file_bits.size))
out = np.tile(file_bits, reps)[:total_bits]
else:
out = file_bits[:total_bits]
return out.reshape(num_samples, num_bits)

View File

@ -0,0 +1,43 @@
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
from ria_toolkit_oss.signal.block_generator.source_block import SourceBlock
class ConstantSource(SourceBlock, RecordableBlock):
"""
Constant Source Block
Produces constant real samples and 0 imaginary samples.
:param amplitude: The value of the real samples.
:type amplitude: float.
"""
def __init__(self, amplitude: Optional[float] = 1):
self.amplitude = amplitude
pass
@property
def input_type(self):
return [DataType.NONE]
@property
def output_type(self):
return DataType.BASEBAND_SIGNAL
def __call__(self, num_samples):
"""
Create an array of constant value samples with 0 imaginary component.
:param num_samples: The number of samples to return.
:type num_samples: int
:returns: Output samples.
:rtype: np.array
"""
return np.ones(num_samples, dtype=np.complex64) * self.amplitude

View File

@ -0,0 +1,107 @@
from typing import Optional
import numpy as np
from scipy.signal import chirp
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
from ria_toolkit_oss.signal.block_generator.source_block import SourceBlock
class LFMChirpSource(SourceBlock, RecordableBlock):
"""
LFM Chirp Source Block
Produces Linear Frequency Modulation (LFM) Chirp signals.
:param sample_rate: The sample rate.
:type sample_rate: float
:param bandwidth: The bandwidth of the chirp signal, must be < sample_rate/2.
:type bandwidth: float.
:param chirp_period: The chirp period in seconds.
:type period: float.
:param chirp_type: The direction (on a spectrogram) of the LFM chirps.
Options: 'up','down', or 'up_down', defaults to 'up'.
:type chirp_type: str."""
def __init__(
self,
sample_rate: Optional[float] = 1e6,
bandwidth: Optional[float] = 5e5,
chirp_period: Optional[float] = 0.01,
chirp_type: Optional[str] = "up",
):
self.sample_rate = sample_rate
self.bandwidth = bandwidth
self.chirp_period = chirp_period
self.chirp_type = chirp_type
@property
def input_type(self):
return [DataType.NONE]
@property
def output_type(self):
return DataType.BASEBAND_SIGNAL
def __call__(self, num_samples):
"""
Create an array of samples of an LFM signal with previously initialized parameters.
:param num_samples: The number of samples to return.
:type num_samples: int
:returns: Output samples.
:rtype: np.array
"""
chirp_length = int(self.chirp_period * self.sample_rate)
t_chirp = np.linspace(0, self.chirp_period, chirp_length)
if len(t_chirp) > chirp_length:
t_chirp = t_chirp[:chirp_length]
# Generate one chirp from 0 Hz to the full width
if self.chirp_type == "up":
baseband_chirp = chirp(
t_chirp,
f0=1000,
f1=self.bandwidth,
t1=self.chirp_period,
method="linear",
complex=True,
)
elif self.chirp_type == "down":
baseband_chirp = chirp(
t_chirp,
f0=self.bandwidth,
f1=0,
t1=self.chirp_period,
method="linear",
complex=True,
)
elif self.chirp_type == "up_down":
half_duration = self.chirp_period / 2
t_up_half, t_down_half = np.array_split(t_chirp, 2)
up_part = chirp(
t_up_half,
f0=0,
t1=half_duration,
f1=self.bandwidth,
method="linear",
complex=True,
)
down_part = np.flip(up_part)
baseband_chirp = np.concatenate([up_part, down_part])
num_chirps = int(np.ceil(num_samples / chirp_length))
full_signal = np.tile(baseband_chirp, num_chirps)
trimmed_signal = full_signal[:num_samples]
# Create an analytic signal (complex with no negative frequency components)
# Shift the chirp to the signal center frequency
total_time = num_samples / self.sample_rate
t_full = np.linspace(0, total_time, len(trimmed_signal))
complex_chirp = trimmed_signal * np.exp(1j * 2 * np.pi * (0 - self.bandwidth / 2) * t_full)
if len(complex_chirp) != num_samples:
raise ValueError("LFMJammer did not produce the correct number of samples.")
return complex_chirp

View File

@ -0,0 +1,47 @@
from ria_toolkit_oss.datatypes import Recording
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
from ria_toolkit_oss.signal.block_generator.source_block import SourceBlock
class RecordingSource(SourceBlock, RecordableBlock):
"""
Recording Source Block
Passes samples from the provided recording to downstream blocks.
:param recording: The :ref:`Recording <ria_toolkit_oss.data.Recording>` that provides samples.
:type recording: :ref:`Recording <ria_toolkit_oss.data.Recording>`
Warning: Only uses channel 0 of multi-channel recordings."""
def __init__(self, recording: Recording):
self.recording = recording
@property
def input_type(self):
return [DataType.NONE]
@property
def output_type(self):
return DataType.BASEBAND_SIGNAL
def __call__(self, num_samples):
"""
Return the first num_samples samples of the recording, channel 0.
:param num_samples: The number of samples to return.
:type num_samples: int
:returns: Output samples.
:rtype: np.array
:raises ValueError: If num_samples is greater than the recording length.
"""
if num_samples - 1 >= self.recording.data.shape[1]:
raise ValueError(
f"{num_samples} samples requested from recording source with \
{self.recording.data.shape[1]} samples available."
)
return self.recording.data[0, 0:num_samples]

View File

@ -0,0 +1,66 @@
from typing import Optional
import numpy as np
import scipy
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
from ria_toolkit_oss.signal.block_generator.source_block import SourceBlock
class SawtoothSource(SourceBlock, RecordableBlock):
"""
Sawtooth Source Block
Creates a sawtooth signal real part and 0 imaginary part.
:param frequency: The frequency of the saw wave.
:type frequency: float.
:param sample_rate: The sample rate.
:type sample_rate: float
:param amplitude: The maximum amplitude of the signal, defaults to 1.
:type amplitude: float.
:param phase_shift: The phase shift of the saw wave in radians
relative to the wave period. NOT a complex phase shift.
:type phase_shift: float.
"""
def __init__(
self,
frequency: Optional[float] = 100e3,
sample_rate: Optional[float] = 1e6,
amplitude: Optional[float] = 1,
phase_shift: Optional[float] = 0,
):
self.input = []
self.frequency = frequency
self.amplitude = amplitude
self.sample_rate = sample_rate
self.phase_shift = phase_shift
pass
@property
def input_type(self) -> DataType:
return [DataType.NONE]
@property
def output_type(self):
return DataType.BASEBAND_SIGNAL
def __call__(self, num_samples):
"""
Create a sawtooth signal.
:param num_samples: The number of samples to return.
:type num_samples: int
:returns: Output samples.
:rtype: np.array
"""
t = np.arange(num_samples)
saw_wave = self.amplitude * scipy.signal.sawtooth(
2 * np.pi * self.frequency * (t / self.sample_rate - (self.phase_shift / (2 * np.pi)))
)
saw_wave = np.array(saw_wave, dtype=np.complex64)
return saw_wave

View File

@ -0,0 +1,64 @@
from typing import Optional
import numpy as np
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
from ria_toolkit_oss.signal.block_generator.source_block import SourceBlock
class SineSource(SourceBlock, RecordableBlock):
"""
Sine Source Block
Creates a sine signal with a sinusoidal real part and 0 imaginary part.
:param frequency: The frequency of the sine wave.
:type frequency: float.
:param sample_rate: The sample rate.
:type sample_rate: float
:param amplitude: The maximum amplitude of the signal, defaults to 1.
:type amplitude: float.
:param phase_shift: The phase shift of the sine wave in radians
relative to the wave period. NOT a complex phase shift.
:type phase_shift: float.
"""
def __init__(
self,
frequency: Optional[float] = 100e3,
sample_rate: Optional[float] = 1e6,
amplitude: Optional[float] = 1,
phase_shift: Optional[float] = 0,
):
self.input = []
self.frequency = frequency
self.amplitude = amplitude
self.sample_rate = sample_rate
self.phase_shift = phase_shift
pass
@property
def input_type(self) -> DataType:
return [DataType.NONE]
@property
def output_type(self):
return DataType.BASEBAND_SIGNAL
def __call__(self, num_samples):
"""
Create a sine signal.
:param num_samples: The number of samples to return.
:type num_samples: int
:returns: Output samples.
:rtype: np.array
"""
total_time = num_samples / self.sample_rate
t = np.linspace(0, total_time, num_samples, endpoint=False)
sine_wave = self.amplitude * np.sin(2 * np.pi * self.frequency * t + self.phase_shift)
sine_wave = np.array(sine_wave, dtype=np.complex64)
return sine_wave

View File

@ -0,0 +1,70 @@
from typing import Optional
import numpy as np
import scipy
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
from ria_toolkit_oss.signal.block_generator.source_block import SourceBlock
class SquareSource(RecordableBlock, SourceBlock):
"""
Square Source Block
Creates a square wave signal with a square shaped real part and 0 imaginary part.
:param frequency: The frequency of the square wave.
:type frequency: float.
:param sample_rate: The sample rate.
:type sample_rate: float
:param amplitude: The maximum amplitude of the signal, defaults to 1.
:type amplitude: float.
:param duty_cycle: The ratio of positive to negative values in single period.
:type duty_cycle: float
:param phase_shift: The phase shift of the sine wave in radians
relative to the wave period. NOT a complex phase shift.
:type phase_shift: float.
"""
def __init__(
self,
frequency: Optional[float] = 100e3,
sample_rate: Optional[float] = 1e6,
amplitude: Optional[int] = 1,
duty_cycle: Optional[float] = 0.5,
phase_shift: Optional[float] = 0,
):
self.input = []
self.frequency = frequency
self.amplitude = amplitude
self.sample_rate = sample_rate
self.phase_shift = phase_shift
self.duty_cycle = duty_cycle
pass
@property
def input_type(self):
return [DataType.NONE]
@property
def output_type(self):
return DataType.BASEBAND_SIGNAL
def __call__(self, num_samples):
"""
Create a square wave signal.
:param num_samples: The number of samples to return.
:type num_samples: int
:returns: Output samples.
:rtype: np.array
"""
t = np.arange(num_samples)
square_wave = self.amplitude * scipy.signal.square(
2 * np.pi * self.frequency * (t / self.sample_rate - (self.phase_shift / (2 * np.pi))),
duty=self.duty_cycle,
)
square_wave = np.array(square_wave, dtype=np.complex64)
return square_wave

View File

@ -0,0 +1,37 @@
import json
from abc import ABC, abstractmethod
from ria_toolkit_oss.signal.block_generator.block import Block
class SourceBlock(Block, ABC):
@abstractmethod
def __call__(self, num_samples: int):
"""
Create num_samples samples.
:param num_samples: The number of samples to create.
:type num_samples: int"""
pass
def get_samples(self, num_samples):
"""
Return num_samples samples from this source block.
:param num_samples: The number of samples to return.
:type num_samples: int"""
return self.__call__(num_samples=num_samples)
def _get_metadata(self):
metadata = {}
for key, value in vars(self).items():
try:
# Try to serialize the value to check if it's JSON serializable
json.dumps(value)
metadata[f"BlockGenerator:{self.__class__.__name__}:{key}"] = value
except (TypeError, ValueError):
# If the value is not JSON serializable, skip it
continue
return metadata

View File

@ -0,0 +1,5 @@
from .gmsk_modulator import GMSKModulator
from .ook_modulator import OOKModulator
from .oqpsk_modulator import OQPSKModulator
__all__ = ["GMSKModulator", "OOKModulator", "OQPSKModulator"]

View File

@ -0,0 +1,65 @@
import numpy as np
from ria_toolkit_oss.signal.block_generator.block import Block
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
class GMSKModulator(RecordableBlock):
"""Gaussian Minimum Shift Keying Modulator"""
def __init__(self, input_block: Block, samples_per_symbol: int = 8, bt: float = 0.3):
self.input = [input_block]
self.sps = samples_per_symbol
self.bt = bt
# Generate Gaussian filter
# Let's use a simplified approximation or standard formula
sigma = np.sqrt(np.log(2)) / (2 * np.pi * self.bt)
# t is normalized by T (symbol period)
t_norm = np.arange(-4 * self.sps, 4 * self.sps + 1) / self.sps
# Gaussian shape
g = (1 / (np.sqrt(2 * np.pi) * sigma)) * np.exp(-(t_norm**2) / (2 * sigma**2))
# Normalize area to 0.5 (pulse area for MSK is 0.5)
g = g / np.sum(g) * 0.5
self.pulse = g
@property
def input_type(self) -> DataType:
return [DataType.BITS]
@property
def output_type(self) -> DataType:
return DataType.BASEBAND_SIGNAL
def get_samples(self, num_samples: int):
# Samples needed
num_symbols = int(np.ceil(num_samples / self.sps))
bits = self.input[0].get_samples(num_symbols)
# NRZ: 0->-1, 1->1
symbols = 2 * bits - 1
# Upsample (Impulse train)
upsampled = np.zeros(len(symbols) * self.sps)
upsampled[:: self.sps] = symbols
# Convolve with Gaussian pulse -> Frequency
freq_signal = np.convolve(upsampled, self.pulse, mode="same")
# Integrate Frequency -> Phase
# Phase = 2 * pi * integral(freq)
# Cumulative sum
phase = np.cumsum(freq_signal) * np.pi # scale factor?
# MSK index h=0.5. Pulse area is 0.5.
# phase(t) = 2*pi*h * integral(q(tau))
# If pulse area is 0.5, total phase change per symbol is 0.5 * pi (90 deg). Correct for MSK.
iq = np.exp(1j * phase)
return iq[:num_samples]
def __call__(self, num_samples):
return self.get_samples(num_samples=num_samples)

View File

@ -0,0 +1,40 @@
import numpy as np
from ria_toolkit_oss.signal.block_generator.block import Block
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
class OOKModulator(RecordableBlock):
"""On-Off Keying Modulator"""
def __init__(self, input_block: Block, samples_per_symbol: int = 8):
self.input = [input_block]
self.sps = samples_per_symbol
@property
def input_type(self) -> DataType:
return [DataType.BITS]
@property
def output_type(self) -> DataType:
return DataType.BASEBAND_SIGNAL
def get_samples(self, num_samples: int):
# Needed bits = num_samples / sps
num_symbols = int(np.ceil(num_samples / self.sps))
bits = self.input[0].get_samples(num_symbols)
# Map 0 -> 0, 1 -> 1
# Upsample
# Rectangular pulse shape (repeat)
# bits is array of 0.0 and 1.0
samples = np.repeat(bits, self.sps)
# Convert to complex
samples = samples.astype(np.complex64)
return samples[:num_samples]
def __call__(self, num_samples):
return self.get_samples(num_samples=num_samples)

View File

@ -0,0 +1,70 @@
import numpy as np
from ria_toolkit_oss.signal.block_generator.block import Block
from ria_toolkit_oss.signal.block_generator.data_types import DataType
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
class OQPSKModulator(RecordableBlock):
"""Offset QPSK Modulator"""
def __init__(self, input_block: Block, samples_per_symbol: int = 8):
self.input = [input_block]
self.sps = samples_per_symbol
# QPSK: 2 bits per symbol
self.bps = 2
@property
def input_type(self) -> DataType:
return [DataType.BITS]
@property
def output_type(self) -> DataType:
return DataType.BASEBAND_SIGNAL
def get_samples(self, num_samples: int):
# Need enough bits. 1 sample comes from 1 symbol? No, sps.
# total symbols = num_samples / sps
# total bits = total symbols * 2
num_symbols = int(np.ceil(num_samples / self.sps))
num_bits = num_symbols * 2
bits = self.input[0].get_samples(num_bits)
# Reshape to (N, 2)
# Even bits -> I, Odd bits -> Q
i_bits = bits[0::2]
q_bits = bits[1::2]
# Map 0->-1, 1->1
i_syms = 2 * i_bits - 1
q_syms = 2 * q_bits - 1
# Upsample (Rectangular pulse for now, or should we use RRC?)
# OQPSK usually implies pulse shaping, often RRC or Half-Sine.
# User requested "OQPSK". Standard OQPSK often has rectangular or shaped pulses.
# The prototype used "2*bits-1" and "roll".
# We will implement rectangular pulse OQPSK (staggered).
i_samples = np.repeat(i_syms, self.sps)
q_samples = np.repeat(q_syms, self.sps)
# Offset Q channel by T_sym / 2 (half symbol)
offset = self.sps // 2
# Pad I with offset zeros at start? Or pad Q?
# Delay Q by half symbol.
# Prepend offset zeros to Q, append offset zeros to I to match length?
# To keep alignment simple for streaming, we just roll/shift.
q_samples_delayed = np.roll(q_samples, offset)
# Zero out the wrap-around part if non-circular?
q_samples_delayed[:offset] = 0 # Initialize
# Complex sum
iq = i_samples + 1j * q_samples_delayed
return iq[:num_samples]
def __call__(self, num_samples):
return self.get_samples(num_samples=num_samples)

View File

@ -0,0 +1,17 @@
from abc import ABC, abstractmethod
from ria_toolkit_oss.datatypes import Recording
class Recordable(ABC):
"""Base class for all recordables, including SDRs and synthetic signal generators, that produce ``Recording``
objects.
"""
@abstractmethod
def record(self, *args, **kwargs) -> Recording:
"""Generate Recording object.
:rtype: Recording
"""
pass

View File

@ -0,0 +1,11 @@
"""
The package contains assorted plotting and report generation utilities to help visualize RIA components such as
recordings and radio datasets.
"""
__all__ = [
"view_channels",
"view_sig",
]
from .view_signal import view_channels, view_sig

View File

@ -0,0 +1,63 @@
import os
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
from ria_toolkit_oss.io.recording import from_npy
def create_dataset_pdf(dataset_path, output_path, div=64, metadata_keys=None):
i = 0
with PdfPages(output_path) as pdf:
for root, _, files in os.walk(dataset_path):
for file in files:
if file.endswith(".npy"):
i = i + 1
print(f"{i}/{len(files)}")
full_path = os.path.join(root, file)
recording = from_npy(full_path)
samples = recording.data[0]
metadata = recording.metadata
if metadata_keys is not None:
metadata_to_print = {}
for key in metadata_keys:
metadata_to_print[key] = metadata.get(key, "None")
else:
metadata_to_print = metadata
signal_length = len(samples)
nfft = max(2 ** int(np.log2(signal_length // div)), 64)
dict_text = dict_text = "\n".join([f"{key}: {value}" for key, value in metadata_to_print.items()])
fig, axs = plt.subplots(2, 1, figsize=(10, 10), gridspec_kw={"height_ratios": [4, 1]})
# Create the spectrogram in the first subplot
axs[0].specgram(samples, NFFT=nfft, Fs=metadata["sample_rate"], cmap="twilight", noverlap=128)
axs[0].set_title(file)
axs[0].set_xlabel("Time (s)")
axs[0].set_ylabel("Frequency (Hz)")
# axs[0].colorbar(label='Intensity (dB)')
# Adjust layout so that there's enough space for the second subplot (text)
plt.subplots_adjust(hspace=0.5)
# Add the text in the second subplot
axs[1].text(0.1, 0.5, dict_text, ha="left", va="center", fontsize=10, color="black", wrap=True)
axs[1].axis("off") # Turn off axes for the text subplot
# Save the figure (spectrogram and text) to the PDF
pdf.savefig(fig)
plt.close()
if __name__ == "__main__":
create_dataset_pdf("/mnt/hddstorage/alec/qesa1_c4/nov15/low_mod2", "dataset.pdf")

View File

@ -0,0 +1,192 @@
import numpy as np
import plotly.graph_objects as go
import scipy.signal as signal
from plotly.graph_objs import Figure
from scipy.fft import fft, fftshift
from ria_toolkit_oss.datatypes import Recording
def spectrogram(rec: Recording, thumbnail: bool = False) -> Figure:
"""Create a spectrogram for the recording.
:param rec: Signal to plot.
:type rec: utils.data.Recording
:param thumbnail: Whether to return a small thumbnail version or full plot.
:type thumbnail: bool
:return: Spectrogram, as a Plotly figure.
"""
complex_signal = rec.data[0]
sample_rate = int(rec.metadata.get("sample_rate", 1))
plot_length = len(complex_signal)
# Determine FFT size
if plot_length < 2000:
fft_size = 64
elif plot_length < 10000:
fft_size = 256
elif plot_length < 1000000:
fft_size = 1024
else:
fft_size = 2048
frequencies, times, Sxx = signal.spectrogram(
complex_signal,
fs=sample_rate,
nfft=fft_size,
nperseg=fft_size,
noverlap=fft_size // 8,
scaling="density",
mode="complex",
return_onesided=False,
)
# Convert complex values to amplitude and then to log scale for visualization
Sxx_magnitude = np.abs(Sxx)
Sxx_log = np.log10(Sxx_magnitude + 1e-6)
# Normalize spectrogram values between 0 and 1 for plotting
Sxx_log_shifted = Sxx_log - np.min(Sxx_log)
Sxx_log_norm = Sxx_log_shifted / np.max(Sxx_log_shifted)
# Shift frequency bins and spectrogram rows so frequencies run from negative to positive
frequencies_shifted = np.fft.fftshift(frequencies)
Sxx_shifted = np.fft.fftshift(Sxx_log_norm, axes=0)
fig = go.Figure(
data=go.Heatmap(
z=Sxx_shifted,
x=times / 1e6,
y=frequencies_shifted,
colorscale="Viridis",
zmin=0,
zmax=1,
reversescale=False,
showscale=False,
)
)
if thumbnail:
fig.update_xaxes(showticklabels=False)
fig.update_yaxes(showticklabels=False)
fig.update_layout(
template="plotly_dark",
width=200,
height=100,
margin=dict(l=5, r=5, t=5, b=5),
xaxis=dict(scaleanchor=None),
yaxis=dict(scaleanchor=None),
)
else:
fig.update_layout(
title="Spectrogram",
xaxis_title="Time [s]",
yaxis_title="Frequency [Hz]",
template="plotly_dark",
height=300,
width=800,
)
return fig
def iq_time_series(rec: Recording) -> Figure:
"""Create a time series plot of the real and imaginary parts of signal.
:param rec: Signal to plot.
:type rec: utils.data.Recording
:return: Time series plot as a Plotly figure.
"""
complex_signal = rec.data[0]
sample_rate = int(rec.metadata.get("sample_rate", 1))
plot_length = len(complex_signal)
t = np.arange(0, plot_length, 1) / sample_rate
fig = go.Figure()
fig.add_trace(go.Scatter(x=t, y=complex_signal.real, mode="lines", name="I (In-phase)", line=dict(width=0.6)))
fig.add_trace(go.Scatter(x=t, y=complex_signal.imag, mode="lines", name="Q (Quadrature)", line=dict(width=0.6)))
fig.update_layout(
title="IQ Time Series",
xaxis_title="Time [s]",
yaxis_title="Amplitude",
template="plotly_dark",
height=300,
width=800,
showlegend=True,
)
return fig
def frequency_spectrum(rec: Recording) -> Figure:
"""Create a frequency spectrum plot from the recording.
:param rec: Input signal to plot.
:type rec: utils.data.Recording
:return: Frequency spectrum as a Plotly figure.
"""
complex_signal = rec.data[0]
center_frequency = int(rec.metadata.get("center_frequency", 0))
sample_rate = int(rec.metadata.get("sample_rate", 1))
epsilon = 1e-10
spectrum = np.abs(fftshift(fft(complex_signal)))
freqs = np.linspace(-sample_rate / 2, sample_rate / 2, len(complex_signal)) + center_frequency
log_spectrum = np.log10(spectrum + epsilon)
scaled_log_spectrum = (log_spectrum - log_spectrum.min()) / (log_spectrum.max() - log_spectrum.min())
fig = go.Figure()
fig.add_trace(go.Scatter(x=freqs, y=scaled_log_spectrum, mode="lines", name="Spectrum", line=dict(width=0.4)))
fig.update_layout(
title="Frequency Spectrum",
xaxis_title="Frequency [Hz]",
yaxis_title="Magnitude",
yaxis_type="log",
template="plotly_dark",
height=300,
width=800,
showlegend=False,
)
return fig
def constellation(rec: Recording) -> Figure:
"""Create a constellation plot from the recording.
:param rec: Input signal to plot.
:type rec: utils.data.Recording
:return: Constellation as a Plotly figure.
"""
complex_signal = rec.data[0]
# Downsample the IQ samples to a target number of points
# This reduces the amount of data plotted, improving performance and interactivity
# without losing significant detail in the constellation visualization.
target_number_of_points = 5000
step = max(1, len(complex_signal) // target_number_of_points)
i_ds = complex_signal.real[::step]
q_ds = complex_signal.imag[::step]
fig = go.Figure()
fig.add_trace(go.Scatter(x=i_ds, y=q_ds, mode="lines", name="Constellation", line=dict(width=0.2)))
fig.update_layout(
title="Constellation",
xaxis_title="In-phase (I)",
yaxis_title="Quadrature (Q)",
template="plotly_dark",
height=400,
width=400,
showlegend=False,
xaxis=dict(range=[-1.1, 1.1]),
yaxis=dict(range=[-1.1, 1.1]),
)
return fig

View File

@ -39,6 +39,67 @@ def set_spines(ax, spines):
ax.spines["left"].set_visible(False) ax.spines["left"].set_visible(False)
def view_channels(
recording: Recording,
output_path: Optional[str] = "images/signal.png",
title: Optional[str] = "Multichannel Signal Plot",
) -> None:
"""Create a PNG of the recording samples, spectrogram, and constellation plot.
Plot is automatically saved to file at output_path.
:param recording: The recording object to plot
:type recording: Recording
:param output_path: The path to save the image. Defaults to "images/signal.png".
:type output_path: str, optional
:param title: The plot title. Defaults to "Multichannel Signal Plot".
:type title: str, optional
:return: None
**Examples:**
.. todo:: Usage examples coming soon.
"""
num_channels = recording.data.shape[0]
fig, axes = plt.subplots(nrows=num_channels, ncols=2)
fig.subplots_adjust(wspace=0.5, hspace=0.5)
plt.style.use("dark_background")
fig.suptitle(title, fontsize=16)
axes[0, 0].set_title("IQ Signal", color=COLORS["light"])
axes[0, 1].set_title("Spectrogram", color=COLORS["light"])
linewidth = 0.5
tick_fontsize = 4
center_frequency = recording.metadata.get("center_frequency", 0)
sample_rate = recording.metadata.get("sample_rate", 1)
sample_indexes = np.arange(0, len(recording.data[0]), 1)
t = sample_indexes / sample_rate
for i in range(num_channels):
axes[i, 0].plot(t, np.real(recording.data[i]), linewidth=linewidth)
axes[i, 0].plot(t, np.imag(recording.data[i]), linewidth=linewidth)
axes[i, 1].specgram(recording.data[i], Fs=sample_rate, Fc=center_frequency)
axes[i, 0].tick_params(labelsize=tick_fontsize, colors=COLORS["light"])
axes[i, 1].tick_params(labelsize=tick_fontsize, colors=COLORS["light"])
axes[i, 0].set_ylabel("Amplitude", fontsize=6, color=COLORS["light"])
axes[i, 1].set_ylabel("Freq (Hz)", fontsize=6, color=COLORS["light"])
if i != num_channels - 1:
axes[i, 0].set_xticks([])
axes[i, 1].set_xticks([])
else:
axes[i, 0].set_xlabel("Time (s)", fontsize=6, color=COLORS["light"])
axes[i, 1].set_xlabel("Time (s)", fontsize=6, color=COLORS["light"])
output_path, _ = set_path(output_path=output_path)
plt.savefig(output_path, dpi=1000)
print(f"Saved signal plot to {output_path}")
def view_sig( def view_sig(
recording: Recording, recording: Recording,
output_path: Optional[str] = "images/signal.png", output_path: Optional[str] = "images/signal.png",

View File

View File

@ -0,0 +1,20 @@
"""
This module contains the main group for the ria toolkit oss CLI.
"""
import click
from ria_toolkit_oss_cli.ria_toolkit_oss import commands
@click.group()
@click.option("-v", "--verbose", is_flag=True, type=bool, help="Increase verbosity, especially useful for debugging.")
def cli(verbose):
pass
# Loop through project commands, binding them all to the CLI.
for command_name in dir(commands):
command = getattr(commands, command_name)
if isinstance(command, click.Command):
cli.add_command(command, name=command_name)

View File

@ -0,0 +1,414 @@
"""Capture command for SDR devices."""
import os
import click
from ria_toolkit_oss.io import to_blue, to_npy, to_sigmf, to_wav
from ria_toolkit_oss.io.recording import generate_filename
from ria_toolkit_oss.view.view_signal_simple import view_simple_sig
from .common import (
echo_progress,
echo_verbose,
format_frequency,
format_sample_rate,
get_sdr_device,
load_yaml_config,
parse_frequency,
parse_metadata_args,
)
from .config import load_user_config
from .discover import (
find_bladerf_devices,
find_hackrf_devices,
find_pluto_devices,
find_rtlsdr_devices,
find_thinkrf_devices,
find_uhd_devices,
load_sdr_drivers,
)
def list_all_devices():
# Load drivers and collect all devices
load_sdr_drivers(verbose=False)
all_devices = []
all_devices.extend(find_uhd_devices())
all_devices.extend(find_pluto_devices())
all_devices.extend(find_hackrf_devices())
all_devices.extend(find_bladerf_devices())
all_devices.extend(find_rtlsdr_devices())
all_devices.extend(find_thinkrf_devices())
return all_devices
def auto_select_device(quiet: bool = False) -> str:
"""Auto-select device if only one is connected.
Args:
quiet: Suppress warning messages
Returns:
Device type string
Raises:
click.ClickException: If no devices or multiple devices found
"""
all_devices = list_all_devices()
if len(all_devices) == 0:
raise click.ClickException("No SDR devices found.\n" "Run 'ria discover' to see available devices.")
elif len(all_devices) == 1:
device = all_devices[0]
device_type = device.get("type", "Unknown").lower().replace("-", "").replace(" ", "")
# Map device type names to internal names
type_map = {
"plutosdr": "pluto",
"hackrf": "hackrf",
"hackrfone": "hackrf",
"bladerf": "bladerf",
"usrp": "usrp",
"b200": "usrp",
"b210": "usrp",
"rtlsdr": "rtlsdr",
"thinkrf": "thinkrf",
}
device_type = type_map.get(device_type, device_type)
if not quiet:
click.echo(
click.style("Warning: ", fg="yellow")
+ f"No device specified. Auto-detected {device.get('type', 'Unknown')}",
err=True,
)
click.echo(f"Use --device {device_type} to suppress this warning.\n", err=True)
return device_type
else:
device_list = "\n".join(f" - {d.get('type', 'Unknown')}" for d in all_devices)
raise click.ClickException(
f"Multiple devices found. Specify with --device\n\n"
f"Available devices:\n{device_list}\n\n"
f"Run 'ria discover' for more details."
)
def get_metadata_dict(config, metadata):
# Parse metadata - start with user config defaults
metadata_dict = config.get("metadata", {})
# Load user config and apply defaults
user_config = load_user_config()
# Apply user config metadata (if user config exists)
if user_config:
# Add standard metadata fields from config
for key in ["author", "organization", "project", "location", "testbed"]:
if key in user_config and key not in metadata_dict:
metadata_dict[key] = user_config[key]
# Add SigMF fields from config
if "sigmf" in user_config:
sigmf = user_config["sigmf"]
for key in ["license", "hw", "dataset"]:
if key in sigmf and key not in metadata_dict:
metadata_dict[key] = sigmf[key]
# CLI metadata overrides everything
if metadata:
metadata_dict.update(parse_metadata_args(metadata))
return metadata_dict
def save_visualization(recording, output_file: str, quiet: bool = False):
"""Save visualization of recording.
Args:
recording: Recording object
output_file: Path to save visualization (PNG)
quiet: Suppress progress messages
"""
# Generate image filename matching recording filename
base_name = os.path.splitext(output_file)[0]
if output_file.endswith(".sigmf-data"):
base_name = output_file.replace(".sigmf-data", "")
output_file = base_name + ".png"
try:
echo_progress(f"Generating visualization: {output_file}", quiet)
view_simple_sig(recording, output_path=output_file, saveplot=True, fast_mode=False, labels_mode=True)
except ImportError as e:
click.echo(click.style("Warning: ", fg="yellow") + f"Could not save visualization: {e}", err=True)
except Exception as e:
click.echo(click.style("Warning: ", fg="yellow") + f"Failed to save visualization: {e}", err=True)
def select_params(device, sample_rate, gain, bandwidth, quiet, verbose):
# Auto-select device if not specified
if device is None:
device = auto_select_device(quiet)
# Apply device-specific defaults (matching signal-testbed)
if sample_rate is None:
# Sample rate defaults based on signal-testbed hardware limits
device_sample_rates = {
"rtlsdr": 2.4e6, # RTL-SDR max is 3.2 MHz, use 2.4 MHz safe default
"thinkrf": 31.25e6, # ThinkRF decimation 4 (from 125 MS/s)
"pluto": 20e6, # PlutoSDR up to 61 MHz, 20 MHz safe
"hackrf": 20e6, # HackRF up to 20 MHz
"bladerf": 40e6, # BladeRF up to 61 MHz, 40 MHz safe
"usrp": 50e6, # USRP up to 200 MHz, 50 MHz default from signal-testbed
}
sample_rate = device_sample_rates.get(device, 20e6)
if gain is None:
# RX gain defaults (matching signal-testbed's 32 dB baseline, adjusted per device)
default_gains = {
"pluto": 32,
"hackrf": 32,
"bladerf": 32,
"usrp": 32,
"rtlsdr": 32, # RTL-SDR will auto-select closest valid gain
"thinkrf": 0, # ThinkRF uses attenuation, 0 = no attenuation
}
gain = default_gains.get(device, 32)
echo_verbose(f"Using default RX gain: {gain} dB for {device}", verbose)
if bandwidth is None:
# Bandwidth defaults (match sample rate for most devices)
device_bandwidths = {
"rtlsdr": None, # RTL-SDR doesn't support bandwidth setting
"thinkrf": None, # ThinkRF manages bandwidth internally
"pluto": sample_rate,
"hackrf": sample_rate,
"bladerf": sample_rate,
"usrp": sample_rate,
}
bandwidth = device_bandwidths.get(device)
return device, sample_rate, gain, bandwidth
def determine_output_format(output, output_format, output_dir):
# Determine output format and save
# If output specified, parse directory and filename
if output:
# Auto-detect format from extension if not specified
if output_format is None:
ext = os.path.splitext(output)[1].lower().lstrip(".")
if ext in ["sigmf", "sigmf-data"]:
output_format = "sigmf"
elif ext == "npy":
output_format = "npy"
elif ext == "wav":
output_format = "wav"
elif ext == "blue":
output_format = "blue"
else:
# Default to SigMF
output_format = "sigmf"
# Get output directory and filename from provided path
output_path_dir = os.path.dirname(output)
if output_path_dir:
output_dir = output_path_dir
output_filename = os.path.basename(output)
# Remove extension for formats that add it
if output_format == "sigmf":
output_filename = output_filename.replace(".sigmf-data", "").replace(".sigmf", "")
else:
# Use auto-generated filename based on timestamp and rec_id
output_filename = None # Will be auto-generated by save functions
if output_format is None:
output_format = "sigmf" # Default format
return output_format, output_filename, output_dir
# ============================================================================
# Main command
# ============================================================================
@click.command()
@click.option(
"--device",
"-d",
type=click.Choice(["pluto", "hackrf", "bladerf", "usrp", "rtlsdr", "thinkrf"]),
help="Device type",
)
@click.option("--ident", "-i", help="Device identifier (IP address or name=value, e.g., 192.168.2.1 or name=mypluto)")
@click.option(
"--config", "-c", "config_file", type=click.Path(exists=True), help="Load parameters from YAML config file"
)
@click.option(
"--sample-rate", "-s", type=float, default=None, help="Sample rate in Hz (e.g., 2e6) [default: device-specific]"
)
@click.option(
"--center-frequency",
"-f",
type=str,
default="2440M",
show_default=True,
help="Center frequency (e.g., 915e6, 2.4G)",
)
@click.option("--gain", "-g", type=float, help="RX gain in dB [default: device-specific]")
@click.option("--bandwidth", "-b", type=float, help="Bandwidth in Hz (if supported) [default: device-specific]")
@click.option("--num-samples", "-n", type=int, show_default=True, help="Number of samples to capture")
@click.option("--duration", "-t", type=float, help="Duration in seconds (alternative to --num-samples)")
@click.option("--output", "-o", help="Output filename (defaults to auto-generated with timestamp)")
@click.option("--output-dir", default="recordings", help="Output directory (default: recordings/)")
@click.option(
"--format",
"output_format",
type=click.Choice(["npy", "sigmf", "wav", "blue"]),
help="Output format (default: sigmf)",
)
@click.option("--save-image", is_flag=True, help="Save visualization PNG alongside recording")
@click.option("--metadata", "-m", multiple=True, help="Add custom metadata (KEY=VALUE)")
@click.option("--verbose", "-v", is_flag=True, help="Verbose output")
@click.option("--quiet", "-q", is_flag=True, help="Suppress progress output")
def capture(
device,
ident,
config_file,
sample_rate,
center_frequency,
gain,
bandwidth,
num_samples,
duration,
output,
output_dir,
output_format,
save_image,
metadata,
verbose,
quiet,
):
"""Capture IQ samples from SDR device and save to file.
\b
Examples:
ria capture -d hackrf -s 2e6 -f 2.44e6 -b 2e6
ria capture -d pluto -s 1e6 -f 2e9 -b 2e6 -n 50
"""
# Load config file if specified
config = {}
if config_file:
config = load_yaml_config(config_file)
echo_verbose(f"Loaded config from: {config_file}", verbose)
# Command-line args override config file
device = device or config.get("device")
ident = ident or config.get("ident") or config.get("serial") # Support legacy 'serial' in config
sample_rate = sample_rate or config.get("sample_rate")
center_frequency = center_frequency or config.get("center_frequency")
gain = gain or config.get("gain")
bandwidth = bandwidth or config.get("bandwidth")
num_samples = num_samples or config.get("num_samples")
duration = duration or config.get("duration")
output = output or config.get("output")
output_format = output_format or config.get("format")
# Parse metadata
metadata_dict = get_metadata_dict(config=config, metadata=metadata)
# Select parameters
device, sample_rate, gain, bandwidth = select_params(
device=device, sample_rate=sample_rate, gain=gain, bandwidth=bandwidth, quiet=quiet, verbose=verbose
)
# Parse frequency
center_freq_hz = parse_frequency(center_frequency)
# Calculate num_samples from duration if needed
if duration is not None and num_samples is None:
num_samples = int(duration * sample_rate)
echo_verbose(f"Duration {duration}s = {num_samples} samples at {format_sample_rate(sample_rate)}", verbose)
elif duration is None and num_samples is None:
raise click.ClickException("Must provide either --num-samples or --duration")
# Show capture parameters
echo_progress(f"Capturing from {device.upper()}...", quiet)
echo_progress(f"Sample rate: {format_sample_rate(sample_rate)}", quiet)
echo_progress(f"Center frequency: {format_frequency(center_freq_hz)}", quiet)
if gain is not None:
echo_progress(f"Gain: {gain} dB", quiet)
if bandwidth is not None:
echo_progress(f"Bandwidth: {format_sample_rate(bandwidth)}", quiet)
# Initialize device
echo_verbose("Initializing device...", verbose)
sdr = get_sdr_device(device, ident)
try:
# Initialize RX with parameters
echo_verbose("Initializing RX...", verbose)
sdr.init_rx(
sample_rate=sample_rate, center_frequency=center_freq_hz, gain=gain, channel=0 # Default to channel 0
)
# Set bandwidth if supported (after init_rx)
if bandwidth is not None and hasattr(sdr, "set_rx_bandwidth"):
sdr.set_rx_bandwidth(bandwidth)
# Capture
echo_progress(f"Capturing {num_samples} samples...", quiet)
recording = sdr.record(num_samples=num_samples)
echo_progress(
f"Captured {recording.data.shape[1] if len(recording.data.shape) > 1 else len(recording.data)} samples",
quiet,
)
# Add custom metadata to recording
if metadata_dict:
for key, value in metadata_dict.items():
recording.update_metadata(key, value)
output_format, output_filename, output_dir = determine_output_format(
output=output, output_format=output_format, output_dir=output_dir
)
echo_progress(f"Saving to {output_format.upper()} format...", quiet)
# Save recording (filenames with timestamp auto-generated if output_filename is None)
# All to_* functions handle directory creation internally
# Note: to_sigmf returns None, others return path
if output_format == "sigmf":
to_sigmf(recording, filename=output_filename, path=output_dir)
# Build path manually since to_sigmf doesn't return it
base_name = (
os.path.splitext(output_filename)[0] if output_filename else generate_filename(recording=recording)
)
saved_path = os.path.join(output_dir, f"{base_name}.sigmf-data")
elif output_format == "npy":
saved_path = to_npy(recording, filename=output_filename, path=output_dir)
elif output_format == "wav":
saved_path = to_wav(recording, filename=output_filename, path=output_dir)
elif output_format == "blue":
saved_path = to_blue(recording, filename=output_filename, path=output_dir)
echo_progress(f"Saved to: {saved_path}", quiet)
# Save visualization if requested
if save_image:
save_visualization(recording, saved_path, quiet)
finally:
# Clean up device
echo_verbose("Closing device...", verbose)
sdr.close()
echo_progress("Capture complete!", quiet)

View File

@ -0,0 +1,494 @@
"""Combine command - Combine multiple recordings into a single file."""
import copy
import time
from pathlib import Path
import click
import numpy as np
from ria_toolkit_oss.datatypes import Recording
from ria_toolkit_oss.io import from_npy_legacy, load_recording
from ria_toolkit_oss_cli.ria_toolkit_oss.common import (
echo_progress,
echo_verbose,
format_sample_count,
save_recording,
)
def load_recording_list(inputs, legacy, verbose, quiet):
recordings = []
for input_path in inputs:
input_path = Path(input_path)
try:
if legacy:
recording = from_npy_legacy(str(input_path))
else:
recording = load_recording(str(input_path))
# Store original filename in metadata if not present
if "original_file" not in recording._metadata:
recording._metadata["original_file"] = input_path.name
num_samples = recording.data.shape[1]
echo_verbose(f" Loading {input_path.name} ({format_sample_count(num_samples)} samples)... Done", verbose)
recordings.append(recording)
except Exception as e:
raise click.ClickException(f"Failed to load {input_path}: {e}")
return recordings
def pad(recordings, max_len, verbose):
if verbose:
click.echo(f"Aligning (zero-pad to {format_sample_count(max_len)} samples)...")
aligned = []
for i, rec in enumerate(recordings):
if rec.data.shape[1] < max_len:
pad_width = max_len - rec.data.shape[1]
padded = np.pad(rec.data, ((0, 0), (0, pad_width)), mode="constant")
if verbose:
click.echo(f" Recording {i+1}: +{format_sample_count(pad_width)} zeros at end")
aligned.append(padded)
else:
aligned.append(rec.data)
return aligned
def pad_start(recordings, max_len, pad_start_sample, verbose):
if verbose:
click.echo(f"Aligning (pad-start at sample {format_sample_count(pad_start_sample)})...")
aligned = []
for i, rec in enumerate(recordings):
if rec.data.shape[1] < max_len:
pad_before = pad_start_sample
pad_after = max_len - rec.data.shape[1] - pad_before
if pad_after < 0:
raise click.ClickException(
f"Invalid --pad-start-sample\n"
f"Start sample {format_sample_count(pad_start_sample)} with recording length "
f"{format_sample_count(rec.data.shape[1])} exceeds max length {format_sample_count(max_len)}"
)
padded = np.pad(rec.data, ((0, 0), (pad_before, pad_after)), mode="constant")
if verbose:
click.echo(
f" Recording {i+1}: +{format_sample_count(pad_before)} zeros before, "
f"+{format_sample_count(pad_after)} zeros after"
)
aligned.append(padded)
else:
aligned.append(rec.data)
return aligned
def pad_center(recordings, max_len, verbose):
if verbose:
click.echo(f"Aligning (pad-center in {format_sample_count(max_len)} samples)...")
aligned = []
for i, rec in enumerate(recordings):
if rec.data.shape[1] < max_len:
total_pad = max_len - rec.data.shape[1]
pad_before = total_pad // 2
pad_after = total_pad - pad_before
padded = np.pad(rec.data, ((0, 0), (pad_before, pad_after)), mode="constant")
if verbose:
click.echo(
f" Recording {i+1}: +{format_sample_count(pad_before)} zeros before, "
f"+{format_sample_count(pad_after)} zeros after"
)
aligned.append(padded)
else:
aligned.append(rec.data)
return aligned
def pad_end(recordings, max_len, verbose):
if verbose:
click.echo(f"Aligning (pad-end, align to {format_sample_count(max_len)} samples)...")
aligned = []
for i, rec in enumerate(recordings):
if rec.data.shape[1] < max_len:
pad_width = max_len - rec.data.shape[1]
padded = np.pad(rec.data, ((0, 0), (pad_width, 0)), mode="constant")
if verbose:
click.echo(f" Recording {i+1}: +{format_sample_count(pad_width)} zeros at beginning")
aligned.append(padded)
else:
aligned.append(rec.data)
return aligned
def repeat(recordings, max_len, verbose):
if verbose:
click.echo(f"Aligning (repeat pattern to match {format_sample_count(max_len)} samples)...")
aligned = []
for i, rec in enumerate(recordings):
if rec.data.shape[1] < max_len:
n_repeats = int(np.ceil(max_len / rec.data.shape[1]))
repeated = np.tile(rec.data, (1, n_repeats))
truncated = repeated[:, :max_len]
if verbose:
click.echo(
f" Recording {i+1}: repeated {n_repeats} times, "
f"truncated to {format_sample_count(max_len)} samples"
)
aligned.append(truncated)
else:
aligned.append(rec.data)
return aligned
def repeat_spaced(recordings, max_len, repeat_spacing, verbose):
if repeat_spacing <= 0:
raise click.ClickException("Error: --align-mode repeat-spaced requires --repeat-spacing SAMPLES (must be > 0)")
if verbose:
click.echo(f"Aligning (repeat with {format_sample_count(repeat_spacing)} sample spacing)...")
aligned = []
for i, rec in enumerate(recordings):
if rec.data.shape[1] < max_len:
result = np.zeros((rec.data.shape[0], max_len), dtype=rec.data.dtype)
pattern_len = rec.data.shape[1]
pos = 0
repetitions = 0
while pos < max_len:
end = min(pos + pattern_len, max_len)
result[:, pos:end] = rec.data[:, : end - pos]
repetitions += 1
pos = end + repeat_spacing
if verbose:
click.echo(
f" Recording {i+1}: {repetitions} repetitions "
f"({format_sample_count(pattern_len)} samples + {format_sample_count(repeat_spacing)} spacing)"
)
aligned.append(result)
else:
aligned.append(rec.data)
return aligned
def align_for_add(recordings, align_mode, pad_start_sample=0, repeat_spacing=0, verbose=False):
"""Align recordings for add mode based on alignment strategy.
Args:
recordings: List of Recording objects
align_mode: Alignment mode string
pad_start_sample: Sample offset for pad-start mode
repeat_spacing: Spacing between repetitions for repeat-spaced mode
verbose: Verbose output
Returns:
List of aligned numpy arrays
Raises:
click.ClickException: If alignment fails or is invalid
"""
lengths = [rec.data.shape[1] for rec in recordings]
max_len = max(lengths)
min_len = min(lengths)
# All same length, no alignment needed
if len(set(lengths)) == 1:
if verbose:
click.echo(f" All recordings same length ({format_sample_count(max_len)} samples)")
return [rec.data for rec in recordings]
if align_mode == "error":
raise click.ClickException(
f"Recordings have different lengths: {[format_sample_count(len) for len in lengths]}\n"
f"Use --align-mode to specify alignment strategy:\n"
f" --align-mode truncate (use shortest: {format_sample_count(min_len)} samples)\n"
f" --align-mode pad (zero-pad to longest: {format_sample_count(max_len)} samples)\n"
f" --align-mode pad-center (center shorter in longer)\n"
f" --align-mode pad-end (align end of recordings)\n"
f" --align-mode repeat (repeat shorter to match longest)"
)
elif align_mode == "truncate":
if verbose:
click.echo(f"Aligning (truncate to {format_sample_count(min_len)} samples)...")
for i, rec in enumerate(recordings):
if rec.data.shape[1] > min_len:
click.echo(f" Recording {i+1}: truncated from {format_sample_count(rec.data.shape[1])} samples")
return [rec.data[:, :min_len] for rec in recordings]
elif align_mode == "pad":
return pad(recordings, max_len, verbose)
elif align_mode == "pad-start":
return pad_start(recordings, max_len, pad_start_sample, verbose)
elif align_mode == "pad-center":
return pad_center(recordings, max_len, verbose)
elif align_mode == "pad-end":
return pad_end(recordings, max_len, verbose)
elif align_mode == "repeat":
return repeat(recordings, max_len, verbose)
elif align_mode == "repeat-spaced":
return repeat_spaced(recordings, max_len, repeat_spacing, verbose)
else:
raise click.ClickException(f"Unknown alignment mode: {align_mode}")
def concat_recordings(recordings, verbose=False):
"""Concatenate recordings end-to-end.
Args:
recordings: List of Recording objects
verbose: Verbose output
Returns:
Recording: Combined recording
"""
if verbose:
click.echo("Concatenating...")
# Concatenate data
combined_data = np.concatenate([r.data for r in recordings], axis=1)
# Merge annotations with adjusted indices
combined_annotations = []
offset = 0
for rec in recordings:
for ann in rec._annotations:
new_ann = copy.deepcopy(ann)
new_ann.sample_start += offset
combined_annotations.append(new_ann)
offset += rec.data.shape[1]
# Use metadata from first recording
combined_metadata = recordings[0]._metadata.copy()
combined_metadata["combined_from"] = [rec._metadata.get("original_file", "unknown") for rec in recordings]
combined_metadata["combine_mode"] = "concat"
combined_metadata["num_inputs"] = len(recordings)
combined_metadata["combine_timestamp"] = time.time()
# Create combined recording
result = Recording(data=combined_data, metadata=combined_metadata)
result._annotations = combined_annotations
if verbose:
click.echo(f"Total: {format_sample_count(combined_data.shape[1])} samples")
return result
def add_recordings(recordings, align_mode="error", pad_start_sample=0, repeat_spacing=0, verbose=False):
"""Add/mix recordings sample-by-sample.
Args:
recordings: List of Recording objects
align_mode: Alignment mode for different-length recordings
pad_start_sample: Sample offset for pad-start mode
repeat_spacing: Spacing for repeat-spaced mode
verbose: Verbose output
Returns:
Recording: Combined recording
"""
# Align recordings
aligned_data = align_for_add(
recordings, align_mode, pad_start_sample=pad_start_sample, repeat_spacing=repeat_spacing, verbose=verbose
)
if verbose:
click.echo("Adding signals...")
# Add all signals
combined_data = sum(aligned_data)
# Keep first recording's annotations only
combined_metadata = recordings[0]._metadata.copy()
combined_metadata["combined_from"] = [rec._metadata.get("original_file", "unknown") for rec in recordings]
combined_metadata["combine_mode"] = "add"
combined_metadata["align_mode"] = align_mode
combined_metadata["num_inputs"] = len(recordings)
combined_metadata["combine_timestamp"] = time.time()
# Warn if other recordings had annotations
if any(len(rec._annotations) > 0 for rec in recordings[1:]):
click.echo("Warning: Only first recording's annotations preserved (others discarded in add mode)", err=True)
# Create combined recording
result = Recording(data=combined_data, metadata=combined_metadata)
result._annotations = recordings[0]._annotations.copy()
if verbose:
click.echo(f"Total: {format_sample_count(combined_data.shape[1])} samples")
return result
@click.command()
@click.argument("inputs", nargs=-1, required=True, type=click.Path(exists=True))
@click.argument("output", nargs=1, required=True, type=click.Path())
@click.option(
"--mode",
type=click.Choice(["concat", "add"], case_sensitive=False),
default="concat",
help="Combination mode (default: concat)",
)
@click.option(
"--align-mode",
type=click.Choice(
["error", "truncate", "pad", "pad-start", "pad-center", "pad-end", "repeat", "repeat-spaced"],
case_sensitive=False,
),
default="error",
help="Add mode alignment strategy (default: error)",
)
@click.option("--pad-start-sample", type=int, default=0, metavar="N", help="Sample offset for pad-start mode")
@click.option(
"--repeat-spacing",
type=int,
default=0,
metavar="SAMPLES",
help="Spacing between repetitions for repeat-spaced mode",
)
@click.option("--legacy", is_flag=True, help="Load inputs as legacy NPY format")
@click.option("--normalize", is_flag=True, help="Normalize after combining")
@click.option(
"--output-format",
type=click.Choice(["sigmf", "npy", "wav", "blue"], case_sensitive=False),
help="Force output format",
)
@click.option("--overwrite", is_flag=True, help="Overwrite existing output file")
@click.option(
"--metadata", multiple=True, metavar="KEY=VALUE", help="Add custom metadata (can be used multiple times)"
)
@click.option("--verbose", is_flag=True, help="Verbose output")
@click.option("--quiet", is_flag=True, help="Suppress output")
def combine(
inputs,
output,
mode,
align_mode,
pad_start_sample,
repeat_spacing,
legacy,
normalize,
output_format,
overwrite,
metadata,
verbose,
quiet,
):
"""Combine multiple recordings into a single file.
\b
INPUTS Input recording files (2 or more)
OUTPUT Output filename
\b
Modes:
concat Concatenate recordings end-to-end (default)
add Add signals sample-by-sample (mix/superimpose)
\b
Examples:
# Concatenate recordings
ria combine chunk1.npy chunk2.npy chunk3.npy full.npy
\b
# Add signal and noise
ria combine signal.npy noise.npy noisy.npy --mode add\n
\b
# Add with center alignment
ria combine long.npy short.npy output.npy --mode add --align-mode pad-center\n
\b
# Repeat pattern with spacing
ria combine signal.npy pattern.npy output.npy --mode add --align-mode repeat-spaced --repeat-spacing 10000
"""
# Validate inputs
if len(inputs) < 2:
raise click.ClickException(
"Error: At least 2 input files required\n" "Usage: ria combine INPUT1 INPUT2 [INPUT3 ...] OUTPUT"
)
# Special case: single input (though we require 2+ above, this handles edge case)
if len(inputs) == 1:
echo_progress("Warning: Only one input file specified", quiet)
echo_progress("Nothing to combine. Copying to output...", quiet)
mode = mode.lower()
align_mode = align_mode.lower()
# Load recordings
align_str = ", " + align_mode + " alignment" if mode == "add" and align_mode != "error" else ""
echo_progress(
f"Combining {len(inputs)} recordings ({mode} mode{align_str})...",
quiet,
)
recordings = load_recording_list(inputs, legacy, verbose, quiet)
# Validate for empty recordings
for i, rec in enumerate(recordings):
if rec.data.shape[1] == 0:
raise click.ClickException(
f"Error: Input file '{inputs[i]}' has 0 samples\n" "Cannot combine empty recordings"
)
# Validate for add mode
if mode == "add":
# Check sample rates match
sample_rates = [rec._metadata.get("sample_rate") for rec in recordings]
sample_rates = [sr for sr in sample_rates if sr is not None]
if len(sample_rates) > 1 and len(set(sample_rates)) > 1:
raise click.ClickException(
f"Error: Recordings have different sample rates (add mode)\n"
f"Sample rates: {sample_rates}\n"
"All recordings must have matching sample rates for add mode"
)
# Check channel counts match
channel_counts = [rec.data.shape[0] for rec in recordings]
if len(set(channel_counts)) > 1:
raise click.ClickException(
f"Error: Recordings have different channel counts\n"
f"Channels: {channel_counts}\n"
"All recordings must have same number of channels"
)
# Combine recordings
if mode == "concat":
combined = concat_recordings(recordings, verbose=verbose)
elif mode == "add":
combined = add_recordings(
recordings,
align_mode=align_mode,
pad_start_sample=pad_start_sample,
repeat_spacing=repeat_spacing,
verbose=verbose,
)
else:
raise click.ClickException(f"Unknown mode: {mode}")
# Add custom metadata
for meta_item in metadata:
if "=" not in meta_item:
raise click.ClickException(f"Invalid metadata format: {meta_item} (expected KEY=VALUE)")
key, value = meta_item.split("=", 1)
combined.update_metadata(key, value)
# Normalize if requested
if normalize:
echo_verbose("Normalizing...", verbose)
combined = combined.normalize()
combined.update_metadata("normalized", True)
# Save output
try:
save_recording(combined, output, output_format=output_format, overwrite=overwrite, verbose=verbose)
echo_progress(f"Saved to: {output}", quiet)
except Exception as e:
raise click.ClickException(f"Failed to save output: {e}")
if __name__ == "__main__":
combine()

View File

@ -0,0 +1,25 @@
# flake8: noqa: F401
"""
This module contains all the CLI bindings for the ria package.
"""
from .capture import capture
from .combine import combine
from .convert import convert
# Import all command functions
from .discover import discover
from .generate import generate
# from .generate import generate
from .init import init
from .split import split
from .transform import transform
from .transmit import transmit
from .view import view
# Aliases
synth = generate
# All commands will be automatically registered by cli.py
# Commands must be click.Command instances

View File

@ -0,0 +1,408 @@
"""Common utilities for CLI commands."""
import os
from pathlib import Path
from typing import Any, Dict, List, Optional
import click
import yaml
from ria_toolkit_oss.datatypes.recording import Recording
from ria_toolkit_oss.io.recording import to_blue, to_npy, to_sigmf, to_wav
def load_yaml_config(config_file: str) -> Dict[str, Any]:
"""Load YAML configuration file.
Args:
config_file: Path to YAML file
Returns:
Dictionary of configuration parameters
Raises:
click.ClickException: If file cannot be loaded
"""
try:
with open(config_file, "r") as f:
config = yaml.safe_load(f)
return config or {}
except FileNotFoundError:
raise click.ClickException(f"Config file not found: {config_file}")
except yaml.YAMLError as e:
raise click.ClickException(f"Error parsing YAML config: {e}")
def detect_file_format(filepath):
"""Detect file format from extension.
Args:
filepath: Path to file
Returns:
str: Format name ('sigmf', 'npy', 'wav', 'blue')
Raises:
click.ClickException: If format cannot be determined
"""
filepath = Path(filepath)
ext = filepath.suffix.lower()
if ext in [".sigmf", ".sigmf-data", ".sigmf-meta"]:
return "sigmf"
elif ext == ".npy":
return "npy"
elif ext == ".wav":
return "wav"
elif ext == ".blue":
return "blue"
else:
raise click.ClickException(
f"Unknown format for '{filepath}'\n" f"Supported extensions: .sigmf, .npy, .wav, .blue"
)
def parse_metadata_args(metadata_args: List[str]) -> Dict[str, Any]:
"""Parse metadata KEY=VALUE arguments.
Args:
metadata_args: List of "KEY=VALUE" strings
Returns:
Dictionary of parsed metadata
Raises:
click.ClickException: If metadata format is invalid
"""
metadata = {}
for arg in metadata_args:
if "=" not in arg:
raise click.ClickException(f"Invalid metadata format: '{arg}'. Expected KEY=VALUE")
key, value = arg.split("=", 1)
if key in ["experiment", "campaign", "project"]:
metadata[key] = value
else:
# Try to parse numeric values
try:
# Try float first (handles both int and float)
if "." in value or "e" in value.lower():
metadata[key] = float(value)
else:
metadata[key] = int(value)
except ValueError:
# Keep as string
metadata[key] = value
return metadata
def parse_frequency(freq_str: str) -> float:
"""Parse frequency string with suffixes (k, M, G).
Args:
freq_str: Frequency string (e.g., "915e6", "2.4G", "433M")
Returns:
Frequency in Hz
Raises:
click.ClickException: If frequency format is invalid
"""
try:
# Handle scientific notation and plain numbers
if "e" in freq_str.lower() or freq_str.replace(".", "").replace("-", "").isdigit():
return float(freq_str)
# Handle suffix notation (k, M, G)
multipliers = {"k": 1e3, "K": 1e3, "M": 1e6, "G": 1e9}
for suffix, mult in multipliers.items():
if freq_str.endswith(suffix):
return float(freq_str[:-1]) * mult
# No suffix, try as plain number
return float(freq_str)
except ValueError:
raise click.ClickException(
f"Invalid frequency format: '{freq_str}'. " "Use formats like: 915e6, 2.4G, 433M, 100k"
)
def format_frequency(freq_hz: float) -> str:
"""Format frequency in human-readable form.
Args:
freq_hz: Frequency in Hz
Returns:
Formatted string (e.g., "915.0 MHz")
"""
if freq_hz >= 1e9:
return f"{freq_hz/1e9:.2f} GHz"
elif freq_hz >= 1e6:
return f"{freq_hz/1e6:.2f} MHz"
elif freq_hz >= 1e3:
return f"{freq_hz/1e3:.2f} kHz"
else:
return f"{freq_hz:.2f} Hz"
def format_sample_rate(rate_hz: float) -> str:
"""Format sample rate in human-readable form.
Args:
rate_hz: Sample rate in Hz
Returns:
Formatted string (e.g., "2.0 MSPS")
"""
if rate_hz >= 1e6:
return f"{rate_hz/1e6:.2f} MS/s"
elif rate_hz >= 1e3:
return f"{rate_hz/1e3:.2f} kS/s"
else:
return f"{rate_hz:.2f} S/s"
def format_sample_count(count):
"""Format sample count with thousands separator."""
return f"{count:,}"
def get_output_path(filename: Optional[str], path: Optional[str], default_dir: str = "recordings") -> str:
"""Generate full output path.
Args:
filename: Output filename (can be None for auto-generated)
path: Output directory path
default_dir: Default directory if path not specified
Returns:
Full path for output file
"""
if path is None:
path = default_dir
# Create directory if it doesn't exist
if not os.path.exists(path):
os.makedirs(path)
if filename:
return os.path.join(path, filename)
else:
return path
def save_recording(recording: Recording, output_path=None, output_format=None, overwrite=False, verbose=False):
"""Save recording to file with format-specific handling.
Args:
recording: Recording object to save
output_path: Output file path
output_format: Optional format override
overwrite: Whether to overwrite existing files
verbose: Verbose output
Raises:
click.ClickException: If save fails
"""
if output_path is None:
# Auto-generate filename
timestamp = recording.timestamp
rec_id = recording.rec_id[:8]
signal_type = recording.metadata.get("signal_type", "signal")
output_path = f"{signal_type}_{rec_id}_{int(timestamp)}"
output_path = Path(output_path)
# Detect format if not specified
if output_format is None:
output_format = detect_file_format(output_path)
# For sigmf, strip extension to get base name
if output_format == "sigmf" and output_path.suffix not in [".sigmf-data", ".sigmf-meta", ".sigmf"]:
base_name = output_path.name
else:
base_name = output_path.stem
output_dir = output_path.parent
# Create output directory if needed
if output_dir and not output_dir.exists():
output_dir.mkdir(parents=True, exist_ok=True)
echo_verbose(f"Created directory: {output_dir}", verbose)
# Check for overwriting
check_for_overwriting(overwrite, output_format, output_path)
# Save based on format
try:
if output_format == "sigmf":
to_sigmf(recording, filename=base_name, path=str(output_dir), overwrite=overwrite)
elif output_format == "npy":
to_npy(recording, filename=str(output_path), overwrite=overwrite)
elif output_format == "wav":
to_wav(recording, filename=str(output_path), overwrite=overwrite)
elif output_format == "blue":
to_blue(recording, filename=str(output_path), overwrite=overwrite)
else:
raise click.ClickException(f"Unsupported output format: {output_format}")
except Exception as e:
raise click.ClickException(f"Failed to save output: {e}")
def echo_verbose(message: str, verbose: bool):
"""Print message only in verbose mode.
Args:
message: Message to print
verbose: Whether verbose mode is enabled
"""
if verbose:
click.echo(message)
def echo_progress(message: str, quiet: bool = False):
"""Print progress message unless in quiet mode.
Args:
message: Progress message
quiet: Whether quiet mode is enabled
"""
if not quiet:
click.echo(message, err=True)
def confirm_dangerous_operation(message: str, skip_confirm: bool = False) -> bool:
"""Ask for confirmation of potentially dangerous operation.
Args:
message: Warning message
skip_confirm: Skip confirmation (for automation)
Returns:
True if user confirmed, False otherwise
"""
if skip_confirm:
return True
click.echo(click.style("WARNING: ", fg="yellow", bold=True) + message, err=True)
return click.confirm("Continue?", default=False)
def check_for_overwriting(overwrite, output_format, output_path):
# Check if output exists (unless overwriting)
if not overwrite:
output_path = Path(output_path)
if output_format == "sigmf":
data_file = output_path.with_suffix(".sigmf-data")
meta_file = output_path.with_suffix(".sigmf-meta")
if data_file.exists() or meta_file.exists():
raise click.ClickException(
f"Output files exist: {data_file.name}, {meta_file.name}\n" f"Use --overwrite to replace"
)
elif output_path.exists():
raise click.ClickException(f"Output file '{output_path}' already exists\n" f"Use --overwrite to replace")
def parse_ident(ident: Optional[str]) -> tuple[Optional[str], Optional[str]]:
"""
Parse device identifier into IP address or name.
Args:
ident: Device identifier (IP address or name=value)
Returns:
Tuple of (ip_address, name) where one will be None
"""
if not ident:
return None, None
if "=" in ident:
key, value = ident.split("=", 1)
if key.lower() == "name":
return None, value
else:
return ident, None
else:
return ident, None
def get_sdr_device(device_type: str, ident: Optional[str] = None, tx=False):
"""
Get TX-capable SDR device instance.
Args:
device_type: Type of device (pluto, hackrf, bladerf, usrp)
ident: Device identifier (IP address or name=value)
Returns:
SDR device instance
Raises:
click.ClickException: If device cannot be initialized or doesn't support TX
"""
TX_CAPABLE_DEVICES = ["pluto", "hackrf", "bladerf", "usrp"]
if tx and device_type not in TX_CAPABLE_DEVICES:
raise click.ClickException(
f"Device '{device_type}' does not support transmission (RX only)\n"
f"TX-capable devices: {', '.join(TX_CAPABLE_DEVICES)}"
)
ip_addr, name = parse_ident(ident)
try:
if device_type == "pluto":
from ria_toolkit_oss.sdr.pluto import Pluto
if ip_addr:
return Pluto(identifier=ip_addr)
else:
return Pluto()
elif device_type == "hackrf":
from ria_toolkit_oss.sdr.hackrf import HackRF
return HackRF()
elif device_type == "bladerf":
from ria_toolkit_oss.sdr.blade import Blade
return Blade()
elif device_type == "usrp":
from ria_toolkit_oss.sdr.usrp import USRP
if ip_addr:
return USRP(identifier=f"addr={ip_addr}")
elif name:
return USRP(identifier=f"name={name}")
else:
return USRP()
elif device_type == "rtlsdr":
from ria_toolkit_oss.sdr.rtlsdr import RTLSDR
return RTLSDR()
elif device_type == "thinkrf":
from ria_toolkit_oss.sdr.thinkrf import ThinkRF
if ip_addr:
return ThinkRF(identifier=ip_addr)
else:
return ThinkRF()
else:
raise click.ClickException(f"Unknown device type: {device_type}")
except ImportError as e:
raise click.ClickException(
f"Failed to import {device_type} driver: {e}\n" f"Ensure required dependencies are installed"
)
except Exception as e:
raise click.ClickException(f"Failed to initialize {device_type}: {e}")

View File

@ -0,0 +1,206 @@
"""Configuration file utilities for ria_toolkit_oss CLI.
This module provides utilities for managing the user configuration file.
The core integration (actually using these configs) is TODO for the core team.
"""
import os
from pathlib import Path
from typing import Optional
import yaml
def get_config_path(config_path: Optional[str] = None) -> Path:
"""Get path to user config file.
Args:
config_path: Optional custom config path
Returns:
Path to config file
"""
if config_path:
return Path(config_path)
# Try XDG_CONFIG_HOME first (Linux standard)
xdg_config = os.environ.get("XDG_CONFIG_HOME")
if xdg_config:
return Path(xdg_config) / "ria" / "config.yaml"
# Fall back to ~/.ria/config.yaml
return Path.home() / ".ria" / "config.yaml"
def load_user_config(config_path: Optional[str] = None) -> Optional[dict]:
"""Load user configuration from file.
Args:
config_path: Optional custom config path
Returns:
Config dict if file exists, None otherwise
"""
path = get_config_path(config_path)
if not path.exists():
return None
try:
with open(path, "r") as f:
config = yaml.safe_load(f)
return config if config else {}
except yaml.YAMLError as e:
raise ValueError(f"Invalid YAML in config file: {e}")
except Exception as e:
raise IOError(f"Error reading config file: {e}")
def save_user_config(config: dict, config_path: Optional[str] = None) -> Path:
"""Save user configuration to file.
Args:
config: Configuration dictionary
config_path: Optional custom config path
Returns:
Path where config was saved
"""
path = get_config_path(config_path)
# Create parent directory if it doesn't exist
path.parent.mkdir(parents=True, exist_ok=True)
# Write config
with open(path, "w") as f:
f.write("# Ria SDR CLI Configuration\n")
f.write("# Auto-generated by 'ria init'\n")
f.write("# Edit with 'ria init' or modify this file directly\n\n")
yaml.dump(config, f, default_flow_style=False, sort_keys=False)
# Set secure permissions (user read/write only)
try:
os.chmod(path, 0o600)
except Exception:
pass # Best effort on Windows
return path
def validate_config(config: dict) -> list[str]:
"""Validate configuration and return list of warnings.
Args:
config: Configuration dictionary
Returns:
List of warning messages (empty if no issues)
"""
warnings = []
# Check for empty author
if not config.get("author"):
warnings.append("Author field is empty - consider setting your name")
# Check for non-standard license (but allow Proprietary as valid)
if "sigmf" in config and "license" in config["sigmf"]:
license_id = config["sigmf"]["license"]
# Common licenses (Proprietary is valid, not open source)
common_licenses = [
"Proprietary",
"CC0-1.0",
"CC-BY-4.0",
"CC-BY-SA-4.0",
"MIT",
"Apache-2.0",
"GPL-3.0",
"BSD-3-Clause",
]
if license_id not in common_licenses:
warnings.append(
f"License '{license_id}' is not a common identifier. "
f"Consider: Proprietary, CC-BY-4.0, MIT, or other SPDX identifier"
)
return warnings
def format_config_display(config: dict) -> str:
"""Format configuration for display.
Args:
config: Configuration dictionary
Returns:
Formatted string
"""
lines = []
# Main metadata
if config.get("author"):
lines.append(f"Author: {config['author']}")
if config.get("organization"):
lines.append(f"Organization: {config['organization']}")
if config.get("project"):
lines.append(f"Project: {config['project']}")
if config.get("location"):
lines.append(f"Location: {config['location']}")
if config.get("testbed"):
lines.append(f"Testbed: {config['testbed']}")
# SigMF metadata
if "sigmf" in config:
sigmf = config["sigmf"]
if sigmf.get("license"):
lines.append(f"License: {sigmf['license']}")
if sigmf.get("hw"):
lines.append(f"Hardware: {sigmf['hw']}")
if sigmf.get("dataset"):
lines.append(f"Dataset: {sigmf['dataset']}")
return "\n".join(lines) if lines else "(empty configuration)"
# TODO for core team: Integration functions
# These will be implemented when wiring config into core ria logic
def merge_config(user_config: dict, cli_args: dict) -> dict:
"""Merge configs with precedence: cli_args > user_config > defaults.
TODO: Implement this when integrating with capture/convert/transmit commands.
Args:
user_config: User configuration from file
cli_args: Arguments from CLI
Returns:
Merged configuration
"""
# Placeholder implementation
merged = user_config.copy()
merged.update({k: v for k, v in cli_args.items() if v is not None})
return merged
def apply_config_to_metadata(metadata: dict, config: dict) -> dict:
"""Apply configuration defaults to recording metadata.
TODO: Implement this in capture.py, convert.py when core team wires it in.
Args:
metadata: Existing metadata dict
config: User configuration
Returns:
Updated metadata dict
"""
# Placeholder implementation
updated = metadata.copy()
# Add config values if not already present
for key in ["author", "organization", "project", "location", "testbed"]:
if key in config and key not in updated:
updated[key] = config[key]
return updated

View File

@ -0,0 +1,303 @@
"""Convert command - Convert recordings between file formats."""
import os
from pathlib import Path
import click
from ria_toolkit_oss.io.recording import (
from_npy,
load_recording,
to_blue,
to_npy,
to_sigmf,
to_wav,
)
from ria_toolkit_oss_cli.ria_toolkit_oss.common import (
check_for_overwriting,
detect_file_format,
echo_progress,
echo_verbose,
format_sample_count,
)
from .config import load_user_config
def parse_metadata_override(metadata_str):
"""Parse KEY=VALUE metadata string.
Args:
metadata_str: String in format "key=value"
Returns:
tuple: (key, value) where value is converted to appropriate type
"""
if "=" not in metadata_str:
raise click.BadParameter(f"Metadata must be in KEY=VALUE format, got: {metadata_str}")
key, value = metadata_str.split("=", 1)
# Try to convert to number if possible
try:
# Try int first
if "." not in value:
return (key, int(value))
else:
return (key, float(value))
except ValueError:
# Keep as string
return (key, value)
@click.command()
@click.argument("input", type=click.Path(exists=True))
@click.argument("output", type=click.Path(), required=False)
@click.option(
"--format",
"output_format",
type=click.Choice(["npy", "sigmf", "wav", "blue"]),
help="Output format (required if OUTPUT not specified, otherwise auto-detected from extension)",
)
@click.option("--output-dir", type=click.Path(), help="Output directory (default: current directory)")
@click.option("--legacy", is_flag=True, help="Load input as legacy NPY format")
@click.option("--wav-sample-rate", type=float, default=48000, show_default=True, help="Target WAV sample rate in Hz")
@click.option(
"--wav-bits", type=click.Choice(["16", "32"]), default="32", show_default=True, help="WAV bits per sample"
)
@click.option(
"--blue-format",
type=click.Choice(["CI", "CF", "CD"]),
default="CI",
show_default=True,
help="MIDAS Blue format: CI (int16), CF (float32), CD (float64)",
)
@click.option("--overwrite", is_flag=True, help="Overwrite output if it exists")
@click.option("--metadata", multiple=True, help="Add/override metadata as KEY=VALUE (can be repeated)")
@click.option("--verbose", "-v", is_flag=True, help="Verbose output")
@click.option("--quiet", "-q", is_flag=True, help="Suppress output")
def convert( # noqa: C901
input,
output,
output_format,
output_dir,
legacy,
wav_sample_rate,
wav_bits,
blue_format,
overwrite,
metadata,
verbose,
quiet,
):
"""Convert recordings between file formats.
Automatically detects input format and converts to desired output format.
Supports SigMF, NumPy (.npy), WAV IQ stereo, and MIDAS Blue formats.
If OUTPUT is not specified, the input filename is used with a new extension
based on the --format option.
\b
Examples:
# SigMF to NumPy (explicit output)
ria convert recording.sigmf-data output.npy
\b
# Auto-generate output filename
ria convert recording.npy --format sigmf
\b
# Convert to specific directory
ria convert long_path/recording.npy --format sigmf --output-dir converted
\b
# NumPy to WAV with decimation
ria convert high_rate.npy audio.wav --wav-sample-rate 48000
\b
# Legacy NPY to SigMF
ria convert old.npy --format sigmf --legacy --overwrite
\b
# Add metadata during conversion
ria convert raw.npy --format sigmf --metadata "location=lab" --metadata "antenna=dipole"
"""
# Generate output filename if not provided
if output is None:
if output_format is None:
raise click.ClickException(
"Either OUTPUT or --format must be specified\n"
"Examples:\n"
" ria convert input.npy output.sigmf\n"
" ria convert input.npy --format sigmf"
)
# Get input filename without extension
input_path = Path(input)
input_stem = input_path.stem
# For SigMF input, remove .sigmf-data or .sigmf-meta suffix
if input_stem.endswith(".sigmf-data") or input_stem.endswith(".sigmf-meta"):
input_stem = input_stem[:-11] # Remove '.sigmf-data'/'.sigmf-meta'
elif input_stem.endswith(".sigmf"):
input_stem = input_stem[:-6] # Remove '.sigmf'
# Determine output directory
if output_dir:
out_dir = Path(output_dir)
else:
out_dir = Path(".") # Current directory
# Generate output filename with new extension
extension_map = {"sigmf": ".sigmf", "npy": ".npy", "wav": ".wav", "blue": ".blue"}
output = str(out_dir / f"{input_stem}{extension_map[output_format]}")
echo_verbose(f"Auto-generated output: {output}", verbose)
# Detect input and output formats
input_format = detect_file_format(input)
if output_format is None:
output_format = detect_file_format(output)
# Check for overwriting
output_path = Path(output)
check_for_overwriting(overwrite, output_format, output_path)
echo_progress(f"Converting: {os.path.basename(input)}{os.path.basename(output)}", quiet)
echo_progress(f"Input format: {input_format.upper()}", quiet)
echo_progress(f"Output format: {output_format.upper()}", quiet)
# Load input recording
echo_verbose("Reading input...", verbose)
try:
if legacy:
echo_verbose("Using legacy NPY loader", verbose)
recording = from_npy(input, legacy=True)
else:
recording = load_recording(input)
except Exception as e:
raise click.ClickException(f"Failed to load input file: {e}")
# Get sample count
if hasattr(recording.data, "shape"):
if len(recording.data.shape) == 2:
num_samples = recording.data.shape[1]
num_channels = recording.data.shape[0]
else:
num_samples = len(recording.data)
num_channels = 1
else:
num_samples = len(recording.data)
num_channels = 1
echo_progress(f"Samples: {format_sample_count(num_samples)}", quiet)
if num_channels > 1:
echo_progress(f"Channels: {num_channels}", quiet)
echo_verbose("Input loaded successfully", verbose)
# Load user config and apply default metadata
user_config = load_user_config()
if user_config:
echo_verbose("Applying user config metadata...", verbose)
# Add standard metadata fields from config (if not already present)
for key in ["author", "organization", "project", "location", "testbed"]:
if key in user_config and key not in recording.metadata:
recording._metadata[key] = user_config[key]
echo_verbose(f" {key} = {user_config[key]} (from config)", verbose)
# Add SigMF fields from config (if not already present)
if "sigmf" in user_config:
sigmf = user_config["sigmf"]
for key in ["license", "hw", "dataset"]:
if key in sigmf and key not in recording.metadata:
recording._metadata[key] = sigmf[key]
echo_verbose(f" {key} = {sigmf[key]} (from config)", verbose)
# Apply metadata overrides from CLI (highest priority)
if metadata:
echo_verbose("Applying metadata overrides from CLI...", verbose)
for meta_str in metadata:
key, value = parse_metadata_override(meta_str)
recording._metadata[key] = value
echo_verbose(f" {key} = {value} (CLI override)", verbose)
# Convert to output format
echo_verbose(f"Writing {output_format.upper()} output...", verbose)
# Split output into directory and filename for functions that need it
output_dir = output_path.parent
output_filename = output_path.name
# If output_dir is empty (relative path with no dir), use current directory
if str(output_dir) == ".":
output_dir = None
elif not output_dir.exists():
# Create output directory if it doesn't exist
output_dir.mkdir(parents=True, exist_ok=True)
try:
# Note: All to_* functions use (recording, filename, path) signature
# We split the output path into directory and filename components
if output_format == "sigmf":
to_sigmf(recording, filename=output_filename, path=output_dir, overwrite=overwrite)
echo_progress(
(
f"Conversion complete: {output_path.with_suffix('.sigmf-data').name}, "
f"{output_path.with_suffix('.sigmf-meta').name}"
),
quiet,
)
elif output_format == "npy":
to_npy(recording, filename=output_filename, path=output_dir, overwrite=overwrite)
echo_progress(f"Conversion complete: {output}", quiet)
elif output_format == "wav":
# Check for multichannel
if num_channels > 1:
raise click.ClickException(
f"WAV export not supported for multichannel recordings\n"
f"Input has {num_channels} channels, WAV export requires single channel"
)
# Show decimation info if applicable
original_sample_rate = recording.metadata.get("sample_rate", wav_sample_rate)
if original_sample_rate > wav_sample_rate:
decimation_factor = int(original_sample_rate / wav_sample_rate)
new_sample_count = num_samples // decimation_factor
echo_progress(f"Original sample rate: {original_sample_rate / 1e6:.1f} MHz", quiet)
echo_progress(f"Target sample rate: {wav_sample_rate / 1e3:.1f} kHz", quiet)
echo_progress(f"Decimation factor: {decimation_factor}", quiet)
echo_progress(f"Output samples: {format_sample_count(new_sample_count)}", quiet)
echo_verbose("Decimating...", verbose)
to_wav(
recording,
filename=output_filename,
path=output_dir,
target_sample_rate=wav_sample_rate,
bits_per_sample=int(wav_bits),
overwrite=overwrite,
)
echo_progress(f"Conversion complete: {output}", quiet)
elif output_format == "blue":
# Convert blue format string to format expected by to_blue
format_map = {"CI": "CI", "CF": "CF", "CD": "CD"} # Complex int16 # Complex float32 # Complex float64
blue_data_format = format_map[blue_format]
echo_verbose(f"Using MIDAS Blue format: {blue_format} ({blue_data_format})", verbose)
to_blue(
recording, filename=output_filename, path=output_dir, data_format=blue_data_format, overwrite=overwrite
)
echo_progress(f"Conversion complete: {output}", quiet)
except Exception as e:
raise click.ClickException(f"Failed to write output file: {e}")
# Show metadata preservation info in verbose mode
if verbose and recording.metadata:
echo_verbose("\nMetadata preserved:", verbose)
for key, value in recording.metadata.items():
echo_verbose(f" {key}: {value}", verbose)
if __name__ == "__main__":
convert()

View File

@ -0,0 +1,518 @@
"""Device discovery utilities for SDR devices."""
import json
import re
import subprocess
from typing import Any, Dict, List, Tuple
import click
# Track loaded and failed drivers
_loaded_drivers = []
_failed_drivers = []
_failure_reasons = {}
def load_sdr_drivers(verbose: bool = False) -> Tuple[List[str], List[str], Dict[str, str]]:
"""
Load available SDR drivers.
Args:
verbose: Show detailed error messages
Returns:
Tuple of (loaded_drivers, failed_drivers, failure_reasons)
"""
global _loaded_drivers, _failed_drivers, _failure_reasons # noqa: F824
_loaded_drivers.clear()
_failed_drivers.clear()
_failure_reasons.clear()
# Try to import each SDR driver
drivers = {
"pluto": "ria_toolkit_oss.sdr.pluto",
"hackrf": "ria_toolkit_oss.sdr.hackrf",
"bladerf": "ria_toolkit_oss.sdr.blade",
"usrp": "ria_toolkit_oss.sdr.usrp",
"rtlsdr": "ria_toolkit_oss.sdr.rtlsdr",
"thinkrf": "ria_toolkit_oss.sdr.thinkrf",
}
for driver_name, module_path in drivers.items():
try:
# Attempt to import the driver module
if not verbose:
# Suppress output for quiet loading
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
__import__(module_path)
else:
__import__(module_path)
_loaded_drivers.append(driver_name)
except ImportError as e:
_failed_drivers.append(driver_name)
error_msg = str(e)
if "No module named" in error_msg:
module_name = error_msg.split("'")[1] if "'" in error_msg else "unknown"
_failure_reasons[driver_name] = f"ModuleNotFoundError: {module_name}"
else:
_failure_reasons[driver_name] = f"ImportError: {error_msg}"
except Exception as e:
_failed_drivers.append(driver_name)
_failure_reasons[driver_name] = f"{type(e).__name__}: {str(e)}"
return _loaded_drivers, _failed_drivers, _failure_reasons
def find_hackrf_devices() -> List[Dict[str, Any]]:
"""Find HackRF devices using hackrf_info command."""
devices = []
try:
result = subprocess.check_output(["hackrf_info"], universal_newlines=True, stderr=subprocess.STDOUT, timeout=5)
# Parse device info
device = {"type": "HackRF One"}
for line in result.split("\n"):
if "Index: " in line:
if "serial" in device:
devices.append(device)
device = {"type": "HackRF One", "device_index": line.split(":")[1].strip()}
if "Serial number:" in line:
device["serial"] = line.split(":")[1].strip()
elif "Board ID Number:" in line:
device["board_id"] = line.split(":")[1].strip()
elif "Firmware Version:" in line:
device["firmware"] = line.split(":")[1].strip()
if "serial" in device:
devices.append(device)
except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError):
pass
return devices
def find_bladerf_devices() -> List[Dict[str, Any]]:
"""Find BladeRF devices using bladeRF-cli command."""
devices = []
try:
result = subprocess.check_output(
["bladeRF-cli", "-p"], universal_newlines=True, stderr=subprocess.STDOUT, timeout=5
)
# Parse device info
device = {"type": "BladeRF"}
for line in result.strip().split("\n"):
line = line.strip()
if ":" in line:
key, value = line.split(":", 1)
device[key.strip()] = value.strip()
if device:
devices.append(device)
except (subprocess.CalledProcessError, FileNotFoundError, subprocess.TimeoutExpired):
pass
return devices
def find_uhd_devices() -> List[Dict[str, Any]]:
"""Find USRP/UHD devices using uhd_find_devices command."""
devices = []
try:
result = subprocess.check_output(
["uhd_find_devices"], universal_newlines=True, stderr=subprocess.STDOUT, timeout=10
)
# Parse device blocks
if "-- UHD Device" in result:
device_blocks = result.split("-- UHD Device")[1:]
for block in device_blocks:
device = {}
lines = block.strip().split("\n")
for line in lines:
line = line.strip()
if ":" in line and not line.startswith("--"):
key, value = line.split(":", 1)
device[key.strip()] = value.strip()
if device:
devices.append(device)
except (subprocess.CalledProcessError, FileNotFoundError, subprocess.TimeoutExpired):
pass
return devices
def find_rtlsdr_devices() -> List[Dict[str, Any]]:
"""Find RTL-SDR devices using rtl_test command."""
devices = []
try:
result = subprocess.check_output(
["rtl_test", "-t"], universal_newlines=True, stderr=subprocess.STDOUT, timeout=5
)
# Parse device count
for line in result.split("\n"):
if "Found" in line and "device" in line:
match = re.search(r"Found (\d+) device", line)
if match:
count = int(match.group(1))
elif "SN: " in line:
device_match = re.search(r"(\d+): .*SN: (\w+)", line)
if device_match:
devices.append(
{"type": "RTL-SDR", "device_index": device_match.group(1), "serial": device_match.group(2)}
)
if "count" in locals() and len(devices) != count:
raise ValueError("Number of stated devices does not match number of found devices")
except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError):
pass
return devices
def ping_ip(ip: str, timeout: int = 1) -> bool:
"""
Ping an IP address to check if device is reachable.
Args:
ip: IP address to ping
timeout: Timeout in seconds
Returns:
True if ping successful, False otherwise
"""
try:
subprocess.check_output(
["ping", "-c", "1", "-W", str(timeout), ip], stderr=subprocess.STDOUT, timeout=timeout + 1
)
return True
except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
return False
def find_pluto_network() -> List[Dict[str, Any]]:
"""Find PlutoSDR devices on the network by pinging common addresses."""
devices = []
network_candidates = ["pluto.local", "192.168.2.1", "192.168.3.1"]
for addr in network_candidates:
if ping_ip(addr, timeout=1):
devices.append(
{
"type": "PlutoSDR",
"uri": f"ip:{addr}",
"description": "Network PlutoSDR",
}
)
return devices
def find_pluto_devices() -> List[Dict[str, Any]]:
"""Find PlutoSDR devices using pyadi-iio."""
devices = []
try:
import iio
contexts = iio.scan_contexts()
for uri, description in contexts.items():
if "PlutoSDR" in description or "pluto" in uri.lower():
try:
ctx = iio.Context(uri)
device_info = {
"type": "PlutoSDR",
"uri": uri,
"serial": ctx.attrs.get("hw_serial", "unknown"),
"firmware": ctx.attrs.get("fw_version", "unknown"),
"ip_addr": ctx.attrs.get("ip,ip-addr", "unknown"),
"model": ctx.attrs.get("hw_model", "unknown"),
"description": description,
}
unique = True
for existing_device in devices:
if existing_device["serial"] == device_info["serial"]:
unique = False
if unique:
devices.append(device_info)
ctx._destroy()
except Exception:
pass
except ImportError:
# Fallback to network ping discovery if pyadi-iio not available
devices.extend(find_pluto_network())
if not devices:
usb_devices = get_usb_devices()
pluto_usb = [d for d in usb_devices if "PlutoSDR" in d.get("sdr_type", "")]
for pluto in pluto_usb:
pluto["type"] = "PlutoSDR"
pluto["uri"] = "usb:" + pluto["bus"]
devices.append(pluto)
return devices
def find_thinkrf_devices() -> List[Dict[str, Any]]:
"""Find ThinkRF devices (placeholder for future implementation)."""
# ThinkRF uses network-based discovery with proprietary SDK
# TODO: Implement when pyrf is available and working
return []
def get_usb_devices() -> List[Dict[str, Any]]:
"""Get USB devices using lsusb for SDR identification."""
sdr_devices = []
sdr_ids = {
"2cf0:5250": "BladeRF 2.0",
"2cf0:5246": "BladeRF 1.0",
"0bda:2838": "RTL-SDR",
"0456:b673": "PlutoSDR (ADALM-PLUTO)",
"2500:0020": "USRP B210",
"2500:0021": "USRP B200",
"1d50:604b": "HackRF One",
}
try:
result = subprocess.check_output(["lsusb"], universal_newlines=True, timeout=5)
for line in result.strip().split("\n"):
for vid_pid, device_name in sdr_ids.items():
if vid_pid in line:
match = re.match(r"Bus (\d+) Device (\d+): ID ([0-9a-f:]+) (.+)", line)
if match:
bus, device, usb_id, description = match.groups()
sdr_devices.append(
{
"bus": bus,
"device": device,
"usb_id": usb_id,
"description": description,
"sdr_type": device_name,
}
)
except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError):
pass
return sdr_devices
def discover_all_devices(verbose: bool = False, json_output: bool = False) -> int:
"""
Discover all SDR devices with signal-testbed style output.
Args:
verbose: Show detailed error messages
Returns:
A dictionary containing information
"""
load_sdr_drivers(verbose=verbose)
uhd_devices = find_uhd_devices()
pluto_devices = find_pluto_devices()
rtlsdr_devices = find_rtlsdr_devices()
bladerf_devices = find_bladerf_devices()
hackrf_devices = find_hackrf_devices()
# Collect all device info
all_devices = []
all_devices.extend(uhd_devices)
all_devices.extend(pluto_devices)
all_devices.extend(rtlsdr_devices)
all_devices.extend(bladerf_devices)
all_devices.extend(hackrf_devices)
output = {
"loaded_drivers": _loaded_drivers,
"failed_drivers": _failed_drivers,
"devices": all_devices,
"total_devices": len(all_devices),
}
if verbose:
output["failure_reasons"] = _failure_reasons
if not json_output:
output["uhd_devices"] = uhd_devices
output["pluto_devices"] = pluto_devices
output["rtlsdr_devices"] = rtlsdr_devices
output["bladerf_devices"] = bladerf_devices
output["hackrf_devices"] = hackrf_devices
return output
def print_all_devices(device_dict: dict, verbose: bool = False) -> int: # noqa: C901
"""
Print all SDR devices with signal-testbed style output.
Args:
device_dict: Dictionary containing all device info
verbose: Show detailed error messages
Returns:
Total number of devices found
"""
total_devices = 0
# USRP/UHD Discovery - Try command-line tool even if driver failed to load
uhd_devices = device_dict["uhd_devices"]
if uhd_devices:
click.echo(f"\n📡 USRP/UHD devices ({len(uhd_devices)}):")
for device in uhd_devices:
name = device.get("name", "Unknown")
product = device.get("product", "Unknown")
serial = device.get("serial", "Unknown")
click.echo(f"{name} ({product}) - Serial: {serial}")
total_devices += len(uhd_devices)
else:
if verbose:
click.echo("\n📡 USRP/UHD devices: None found")
# PlutoSDR Discovery - Try both pyadi-iio and USB detection
pluto_devices = device_dict["pluto_devices"]
pluto_count = len(pluto_devices)
if pluto_count > 0:
click.echo(f"\n📱 PlutoSDR devices ({pluto_count}):")
for device in pluto_devices:
# Determine if network or USB based on URI
uri = device["uri"]
if uri.startswith("ip:"):
click.echo(f" ✅ Network: {uri.replace('ip:', '')}")
elif uri.startswith("usb:"):
click.echo(f" ✅ USB: {device['description']} (Bus {uri.replace('usb:', '').split('.')[0]})")
else:
click.echo(f"{uri}")
total_devices += pluto_count
else:
if verbose:
click.echo("\n📱 PlutoSDR devices: None found")
# RTL-SDR Discovery
if "rtlsdr" in _loaded_drivers:
rtl_devices = device_dict["rtlsdr_devices"]
if rtl_devices:
click.echo(f"\n📻 RTL-SDR devices ({len(rtl_devices)}):")
for device in rtl_devices:
idx = device.get("device_index", 0)
click.echo(f" ✅ Device {idx}: {device.get('type', 'RTL-SDR')}")
total_devices += len(rtl_devices)
else:
if verbose:
click.echo("\n📻 RTL-SDR devices: None found")
# BladeRF Discovery
if "bladerf" in _loaded_drivers:
bladerf_devices = device_dict["bladerf_devices"]
if bladerf_devices:
click.echo(f"\n⚡ BladeRF devices ({len(bladerf_devices)}):")
for device in bladerf_devices:
desc = device.get("Description", "BladeRF")
serial = device.get("Serial", "Unknown")
click.echo(f"{desc} - Serial: {serial}")
total_devices += len(bladerf_devices)
else:
if verbose:
click.echo("\n⚡ BladeRF devices: None found")
# HackRF Discovery
if "hackrf" in _loaded_drivers:
hackrf_devices = device_dict["hackrf_devices"]
if hackrf_devices:
click.echo(f"\n🔧 HackRF devices ({len(hackrf_devices)}):")
for device in hackrf_devices:
serial = device.get("serial", "Unknown")
board = device.get("board_id", "")
firmware = device.get("firmware", "")
info = f"Serial: {serial}"
if board:
info += f" - Board ID: {board}"
if firmware:
info += f" - FW: {firmware}"
click.echo(f"{device.get('type', 'HackRF')} - {info}")
total_devices += len(hackrf_devices)
else:
if verbose:
click.echo("\n🔧 HackRF devices: None found")
# ThinkRF Discovery
if "thinkrf" in _loaded_drivers:
if verbose:
click.echo("\n🌐 ThinkRF devices: Discovery not yet implemented")
return total_devices
@click.command(help="Discover connected SDR devices")
@click.option("--verbose", "-v", is_flag=True, help="Show detailed information and errors")
@click.option("--json-output", is_flag=True, help="Output in JSON format")
def discover(verbose, json_output):
"""Discover connected SDR devices with driver loading."""
device_dict = discover_all_devices(verbose=verbose, json_output=json_output)
# JSON mode: Load drivers and return structured data
if json_output:
click.echo(json.dumps(device_dict, indent=2))
return
# Human-readable mode: Signal-testbed style
# Print loaded drivers
if _loaded_drivers:
click.echo(f"\n✅ Loaded drivers ({len(_loaded_drivers)}):")
for driver in _loaded_drivers:
click.echo(f" {driver}")
else:
click.echo("\n❌ No drivers loaded successfully")
# Print failed drivers
if _failed_drivers:
click.echo(f"\n❌ Failed drivers ({len(_failed_drivers)}):")
for driver in _failed_drivers:
if verbose and driver in _failure_reasons:
click.echo(f" {driver}: {_failure_reasons[driver]}")
else:
click.echo(f" {driver}")
if not verbose and _failed_drivers:
click.echo("\nRun with --verbose to see failure reasons")
# Device discovery
click.echo("\n" + "=" * 40)
click.echo("Attached Devices")
click.echo("=" * 40)
total_devices = print_all_devices(device_dict=device_dict, verbose=verbose)
# Summary
click.echo("\n" + "=" * 40)
click.echo("Discovery Summary")
click.echo("=" * 40)
click.echo(f"Loaded drivers: {len(_loaded_drivers)}")
click.echo(f"Failed drivers: {len(_failed_drivers)}")
click.echo(f"Detected devices: {total_devices}")
if total_devices == 0:
click.echo("\n💡 No devices detected - ensure they are connected and powered on")

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,318 @@
"""Init command - Initialize user configuration."""
import click
from .config import (
format_config_display,
get_config_path,
load_user_config,
save_user_config,
validate_config,
)
def prompt_with_default(text: str, default: str = "") -> str:
"""Prompt user with optional default value.
Args:
text: Prompt text
default: Default value
Returns:
User input or default
"""
if default:
result = click.prompt(text, default=default, show_default=True)
else:
result = click.prompt(text, default="", show_default=False)
if result == "":
return None
return result if result else None
def init_show(config_file_path, config_path):
if not config_file_path.exists():
click.echo(f"No configuration file found at: {config_file_path}")
click.echo("\nRun 'ria init' to create a configuration.")
return
try:
config = load_user_config(config_path)
click.echo(f"Current Configuration ({config_file_path}):")
click.echo("=" * 60)
click.echo()
click.echo(format_config_display(config))
click.echo()
click.echo("To update: ria init")
click.echo("To reset: ria init --reset")
except Exception as e:
click.echo(f"Error reading configuration: {e}", err=True)
click.echo("\nRun 'ria init --reset' to recreate the configuration.")
def init_reset(config_file_path, config_path, yes):
if not config_file_path.exists():
click.echo(f"No configuration file found at: {config_file_path}")
return
# Show current config
try:
config = load_user_config(config_path)
click.echo(f"This will delete your configuration file at: {config_file_path}")
click.echo()
click.echo("Current configuration:")
for line in format_config_display(config).split("\n"):
click.echo(f" {line}")
click.echo()
except Exception:
click.echo(f"Configuration file exists but may be corrupted: {config_file_path}")
click.echo()
# Confirm deletion
if not yes:
if not click.confirm("Are you sure you want to reset?", default=False):
click.echo("Reset cancelled.")
return
# Delete config file
try:
config_file_path.unlink()
click.echo("\n✓ Configuration deleted.")
click.echo("\nRun 'ria init' to create a new configuration.")
except Exception as e:
click.echo(f"Error deleting configuration: {e}", err=True)
def build_config(author, organization, project, location, testbed):
# Build configuration
config = {}
if author:
config["author"] = author
if organization:
config["organization"] = organization
if project:
config["project"] = project
if location:
config["location"] = location
if testbed:
config["testbed"] = testbed
return config
def build_sigmf(license_id, hardware, dataset):
# Build SigMF section
sigmf = {}
if license_id:
sigmf["license"] = license_id
if hardware:
sigmf["hw"] = hardware
if dataset:
sigmf["dataset"] = dataset
return sigmf
def save_config(config, config_path, use_interactive, warnings):
# Save configuration
try:
saved_path = save_user_config(config, config_path)
click.echo(f"\n✓ Configuration saved to: {saved_path}")
if use_interactive:
click.echo()
click.echo("You can view your config anytime with: ria init --show")
click.echo("You can update values by running: ria init")
# Show warnings in non-interactive mode
elif warnings:
click.echo()
click.echo("Warnings:")
for warning in warnings:
click.echo(f" ⚠️ {warning}")
# TODO message for core team
click.echo()
click.echo("NOTE: Automatic config integration is not yet implemented.")
click.echo("Config values must currently be applied manually with --metadata flags.")
click.echo("(Core team TODO: wire config into capture/convert/transmit commands)")
return 0
except Exception as e:
click.echo(f"\nError saving configuration: {e}", err=True)
return 1
@click.command()
@click.option("--author", help="Author name (your name)")
@click.option("--organization", help="Organization/institution name")
@click.option("--project", help="Project name or identifier")
@click.option("--location", help="Physical location (lab name, site, etc.)")
@click.option("--testbed", help="Testbed identifier")
@click.option("--license", "license_id", help="Data license (SPDX identifier, default: Proprietary)")
@click.option("--hw", "hardware", help="Hardware description (e.g., PlutoSDR, USRP B210)")
@click.option("--dataset", help="Dataset identifier")
@click.option("--show", is_flag=True, help="Display current configuration and exit")
@click.option("--reset", is_flag=True, help="Delete existing config")
@click.option("--config-path", type=click.Path(), help="Use alternate config file location")
@click.option("--interactive/--no-interactive", default=None, help="Force interactive mode on/off")
@click.option("--yes", "-y", is_flag=True, help="Skip confirmation prompts")
def init(
author,
organization,
project,
location,
testbed,
license_id,
hardware,
dataset,
show,
reset,
config_path,
interactive,
yes,
):
"""Initialize user configuration.
Creates a configuration file at ~/.ria/config.yaml with default metadata
values that will be used across CLI commands.
Examples:
\b
# Interactive setup
ria init
\b
# Non-interactive setup
ria init --author "Jane Doe" --project "RF_Analysis" --location "Lab_A"
\b
# Show current configuration
ria init --show
\b
# Reset configuration
ria init --reset
"""
config_file_path = get_config_path(config_path)
# Handle --show flag
if show:
init_show(config_file_path, config_path)
return
# Handle --reset flag
if reset:
init_reset(config_file_path, config_path, yes)
return
# Determine if we should use interactive mode
# Interactive if: no CLI args provided OR --interactive flag OR config file doesn't exist
has_cli_args = any([author, organization, project, location, testbed, hardware, dataset])
if interactive is None:
# Auto-detect: interactive if no args provided
use_interactive = not has_cli_args
else:
use_interactive = interactive
# Load existing config if it exists
existing_config = None
if config_file_path.exists():
try:
existing_config = load_user_config(config_path)
except Exception as e:
click.echo(f"Warning: Could not load existing config: {e}", err=True)
click.echo("Creating new configuration...\n")
# Interactive mode
if use_interactive:
click.echo()
click.echo("Welcome to RIA Toolkit Oss SDR CLI Configuration!")
click.echo("=" * 60)
click.echo()
click.echo(f"This will create a configuration file at: {config_file_path}")
click.echo()
click.echo("These values will be automatically added to recordings and conversions.")
click.echo("You can always change these later by running 'ria init' again.")
click.echo()
click.echo("Press Enter to skip optional fields.")
click.echo()
# Required information
click.echo("Required Information:")
click.echo("-" * 20)
# Use existing values as defaults
author_default = existing_config.get("author", "") if existing_config else ""
org_default = existing_config.get("organization", "") if existing_config else ""
proj_default = existing_config.get("project", "") if existing_config else ""
loc_default = existing_config.get("location", "") if existing_config else ""
test_default = existing_config.get("testbed", "") if existing_config else ""
author = click.prompt(
"Author name (your name)", default=author_default or "", show_default=bool(author_default)
)
organization = prompt_with_default("Organization (optional)", org_default)
project = prompt_with_default("Project name (optional)", proj_default)
location = prompt_with_default("Location (optional)", loc_default)
testbed = prompt_with_default("Testbed name (optional)", test_default)
# SigMF metadata
click.echo()
click.echo("SigMF Metadata (optional):")
click.echo("-" * 27)
sigmf_defaults = existing_config.get("sigmf", {}) if existing_config else {}
license_default = sigmf_defaults.get("license", "Proprietary")
hw_default = sigmf_defaults.get("hw", "")
dataset_default = sigmf_defaults.get("dataset", "")
license_id = click.prompt(
"License (e.g., Proprietary, CC-BY-4.0, MIT)", default=license_default, show_default=True
)
hardware = prompt_with_default("Hardware description (e.g., PlutoSDR)", hw_default)
dataset = prompt_with_default("Dataset name (optional)", dataset_default)
# Build configuration
config = build_config(author, organization, project, location, testbed)
# SigMF section
sigmf = build_sigmf(license_id, hardware, dataset)
if sigmf:
config["sigmf"] = sigmf
# Validate configuration
warnings = validate_config(config)
# Show configuration summary
if use_interactive:
click.echo()
click.echo("Configuration Summary:")
click.echo("-" * 22)
click.echo(format_config_display(config))
click.echo()
# Show warnings
if warnings:
click.echo("Warnings:")
for warning in warnings:
click.echo(f" ⚠️ {warning}")
click.echo()
# Confirm save
if not yes:
if not click.confirm("Save this configuration?", default=True):
click.echo("Configuration not saved.")
return
# Save configuration
return save_config(config, config_path, use_interactive, warnings)
if __name__ == "__main__":
init()

View File

@ -0,0 +1,421 @@
"""Split command - Split, trim, and extract portions of recordings."""
from pathlib import Path
import click
import numpy as np
from ria_toolkit_oss.io import from_npy_legacy, load_recording
from ria_toolkit_oss_cli.ria_toolkit_oss.common import (
detect_file_format,
echo_progress,
echo_verbose,
format_sample_count,
save_recording,
)
def get_output_extension(format_name):
"""Get file extension for format name."""
extension_map = {"sigmf": ".sigmf", "npy": ".npy", "wav": ".wav", "blue": ".blue"}
return extension_map[format_name]
def validate_operation(split_at, split_every, split_duration, trim, extract_annotations):
# Validate operation selection
operations = sum(
[split_at is not None, split_every is not None, split_duration is not None, trim, extract_annotations]
)
if operations == 0:
raise click.ClickException(
"No operation specified. Use one of:\n"
" --split-at SAMPLE\n"
" --split-every N\n"
" --split-duration SECONDS\n"
" --trim (with --start and --length or --end)\n"
" --extract-annotations"
)
if operations > 1:
raise click.ClickException(
"Multiple operations specified. Use only one of:\n"
" --split-at, --split-every, --split-duration, --trim, --extract-annotations"
)
@click.command()
@click.argument("input", type=click.Path(exists=True))
@click.option("--split-at", type=int, metavar="SAMPLE", help="Split into two files at sample index")
@click.option("--split-every", type=int, metavar="N", help="Split into chunks of N samples")
@click.option(
"--split-duration",
type=float,
metavar="SECONDS",
help="Split into chunks of specified duration (requires sample_rate in metadata)",
)
@click.option("--trim", is_flag=True, help="Extract portion of recording (use with --start and --length or --end)")
@click.option(
"--start", "start_sample", type=int, default=0, show_default=True, help="Start sample for trim operation"
)
@click.option("--length", "num_samples", type=int, help="Number of samples for trim operation")
@click.option("--end", "end_sample", type=int, help="End sample for trim operation (alternative to --length)")
@click.option("--extract-annotations", is_flag=True, help="Extract each annotated region to separate file")
@click.option("--annotation-label", type=str, help="Only extract annotations with this label")
@click.option("--annotation-index", type=int, help="Extract specific annotation by index")
@click.option("--output-dir", type=click.Path(), help="Output directory (default: current directory)")
@click.option("--output-prefix", type=str, help="Prefix for output filenames")
@click.option(
"--output-format",
type=click.Choice(["npy", "sigmf", "wav", "blue"]),
help="Force output format (default: same as input)",
)
@click.option("--overwrite", is_flag=True, help="Overwrite existing output files")
@click.option("--legacy", is_flag=True, help="Load input as legacy NPY format")
@click.option("--verbose", "-v", is_flag=True, help="Verbose output")
@click.option("--quiet", "-q", is_flag=True, help="Suppress output")
def split( # noqa: C901
input,
split_at,
split_every,
split_duration,
trim,
start_sample,
num_samples,
end_sample,
extract_annotations,
annotation_label,
annotation_index,
output_dir,
output_prefix,
output_format,
overwrite,
legacy,
verbose,
quiet,
):
"""Split, trim, and extract portions of recordings.
Split recordings into multiple files, extract portions, or extract annotated regions.
\b
Examples:
# Split at specific sample
ria split recording.sigmf --split-at 500000 --output-dir split_output
\b
# Split into equal chunks
ria split capture.npy --split-every 100000 --output-dir chunks
\b
# Split by duration (requires sample_rate in metadata)
ria split recording.sigmf --split-duration 1.0 --output-dir segments
\b
# Trim recording
ria split signal.npy --trim --start 1000 --length 5000 --output-dir trimmed
\b
# Trim with end index
ria split signal.npy --trim --start 1000 --end 6000 --output-dir trimmed
\b
# Extract all annotated regions
ria split annotated.sigmf --extract-annotations --output-dir annotations
\b
# Extract specific annotation label
ria split annotated.sigmf --extract-annotations --annotation-label "payload"
\b
# Extract specific annotation by index
ria split annotated.sigmf --extract-annotations --annotation-index 1
"""
# Validate operation selection
validate_operation(split_at, split_every, split_duration, trim, extract_annotations)
# Validate trim parameters
if trim:
if num_samples is None and end_sample is None:
raise click.ClickException("Trim operation requires either --length or --end")
if num_samples is not None and end_sample is not None:
raise click.ClickException("Cannot specify both --length and --end")
# Load input recording
input_path = Path(input)
input_format = detect_file_format(input_path)
echo_progress(f"Loading: {input_path.name}", quiet)
echo_verbose(f"Input format: {input_format.upper()}", verbose)
try:
if legacy:
echo_verbose("Using legacy NPY loader", verbose)
recording = from_npy_legacy(input)
else:
recording = load_recording(input)
except Exception as e:
raise click.ClickException(f"Failed to load input file: {e}")
# Get recording info
if hasattr(recording.data, "shape") and len(recording.data.shape) == 2:
total_samples = recording.data.shape[1]
else:
total_samples = len(recording.data)
echo_progress(f"Total samples: {format_sample_count(total_samples)}", quiet)
# Determine output format
if output_format is None:
output_format = input_format
echo_verbose(f"Output format: {output_format.upper()}", verbose)
# Determine output directory
if output_dir:
out_dir = Path(output_dir)
else:
out_dir = Path(".") # Current directory
# Get base filename for outputs
if output_prefix:
base_name = output_prefix
else:
# Get input stem without format-specific suffixes
base_name = input_path.stem
if base_name.endswith(".sigmf-data") or base_name.endswith(".sigmf-meta"):
base_name = base_name[:-11]
elif base_name.endswith(".sigmf"):
base_name = base_name[:-6]
# Execute operation
if split_at is not None:
# Split at specific sample
if split_at < 0 or split_at >= total_samples:
raise click.ClickException(f"Invalid split point: {split_at}\n" f"Must be between 0 and {total_samples-1}")
echo_progress(f"\nSplitting at sample {format_sample_count(split_at)}...", quiet)
# Create two parts
part1 = recording.trim(start_sample=0, num_samples=split_at)
part2 = recording.trim(start_sample=split_at, num_samples=total_samples - split_at)
# Add metadata about original file
part1._metadata["original_file"] = str(input_path.name)
part1._metadata["original_start_sample"] = 0
part1._metadata["original_end_sample"] = split_at
part1._metadata["split_operation"] = "split_at"
part2._metadata["original_file"] = str(input_path.name)
part2._metadata["original_start_sample"] = split_at
part2._metadata["original_end_sample"] = total_samples
part2._metadata["split_operation"] = "split_at"
# Save parts
ext = get_output_extension(output_format)
output1 = out_dir / f"{base_name}_part1{ext}"
output2 = out_dir / f"{base_name}_part2{ext}"
echo_progress(
f" Part 1: samples 0-{format_sample_count(split_at-1)} ({format_sample_count(split_at)} samples)", quiet
)
save_recording(part1, output1, output_format, overwrite, verbose)
echo_progress(
message=(
f" Part 2: samples {format_sample_count(split_at)}-{format_sample_count(total_samples-1)} "
f"({format_sample_count(total_samples - split_at)} samples)"
),
quiet=quiet,
)
save_recording(part2, output2, output_format, overwrite, verbose)
echo_progress("\nSaved:", quiet)
echo_progress(f" {output1}", quiet)
echo_progress(f" {output2}", quiet)
elif split_every is not None or split_duration is not None:
# Split into equal chunks
if split_duration is not None:
# Convert duration to samples
sample_rate = recording.metadata.get("sample_rate")
if not sample_rate:
raise click.ClickException(
"Cannot split by duration: no sample_rate in metadata\n"
"Use --split-every with sample count instead"
)
split_samples = int(split_duration * sample_rate)
echo_progress(
f"\nSplitting into {split_duration}s chunks ({format_sample_count(split_samples)} samples)...", quiet
)
else:
split_samples = split_every
echo_progress(f"\nSplitting into chunks of {format_sample_count(split_samples)} samples...", quiet)
if split_samples <= 0:
raise click.ClickException(f"Invalid chunk size: {split_samples}")
# Calculate number of chunks
num_chunks = int(np.ceil(total_samples / split_samples))
echo_progress(f"Creating {num_chunks} chunks...", quiet)
# Create chunks
ext = get_output_extension(output_format)
created_files = []
for i in range(num_chunks):
start = i * split_samples
length = min(split_samples, total_samples - start)
end = start + length - 1
# Trim chunk
chunk = recording.trim(start_sample=start, num_samples=length)
# Add metadata
chunk._metadata["original_file"] = str(input_path.name)
chunk._metadata["original_start_sample"] = start
chunk._metadata["original_end_sample"] = start + length
chunk._metadata["split_operation"] = "split_every"
chunk._metadata["chunk_index"] = i + 1
chunk._metadata["total_chunks"] = num_chunks
# Generate output filename
chunk_num = str(i + 1).zfill(len(str(num_chunks)))
output_path = out_dir / f"{base_name}_chunk{chunk_num}{ext}"
echo_progress(
f" Chunk {i+1}/{num_chunks}: samples {format_sample_count(start)}-{format_sample_count(end)}...",
quiet,
)
save_recording(chunk, output_path, output_format, overwrite, verbose)
created_files.append(output_path)
echo_progress(f"\nCreated {num_chunks} chunks in {out_dir}/", quiet)
elif trim:
# Trim operation
if end_sample is not None:
if end_sample <= start_sample:
raise click.ClickException(
f"Invalid range: end ({end_sample}) must be greater than start ({start_sample})"
)
num_samples = end_sample - start_sample
if start_sample < 0 or num_samples < 0:
raise click.ClickException("Invalid trim range: start and length must be non-negative")
if start_sample + num_samples > total_samples:
raise click.ClickException(
f"Invalid trim range\n"
f"Start: {format_sample_count(start_sample)}, Length: {format_sample_count(num_samples)}, "
f"End: {format_sample_count(start_sample + num_samples)}\n"
f"Recording only has {format_sample_count(total_samples)} samples "
f"(indices 0-{format_sample_count(total_samples-1)})"
)
echo_progress("\nTrimming recording...", quiet)
echo_progress(f" Start: {format_sample_count(start_sample)}", quiet)
echo_progress(f" Length: {format_sample_count(num_samples)} samples", quiet)
echo_progress(f" End: {format_sample_count(start_sample + num_samples - 1)}", quiet)
# Trim recording
trimmed = recording.trim(start_sample=start_sample, num_samples=num_samples)
# Add metadata
trimmed._metadata["original_file"] = str(input_path.name)
trimmed._metadata["original_start_sample"] = start_sample
trimmed._metadata["original_end_sample"] = start_sample + num_samples
trimmed._metadata["split_operation"] = "trim"
# Save trimmed recording
ext = get_output_extension(output_format)
output_path = out_dir / f"{base_name}{ext}"
save_recording(trimmed, output_path, output_format, overwrite, verbose)
echo_progress(f"\nOutput: {output_path}", quiet)
echo_progress("Done.", quiet)
elif extract_annotations:
# Extract annotated regions
if not recording.annotations:
raise click.ClickException(
"No annotations found in recording\n" "Use 'ria annotate' to add annotations first"
)
# Filter annotations
annotations_to_extract = recording.annotations
if annotation_index is not None:
if annotation_index < 0 or annotation_index >= len(annotations_to_extract):
raise click.ClickException(
f"Invalid annotation index: {annotation_index}\n"
f"Recording has {len(annotations_to_extract)} annotations "
f"(indices 0-{len(annotations_to_extract)-1})"
)
annotations_to_extract = [annotations_to_extract[annotation_index]]
if annotation_label is not None:
filtered = [ann for ann in annotations_to_extract if ann.label == annotation_label]
if not filtered:
available_labels = list(set(ann.label for ann in recording.annotations))
raise click.ClickException(
f"No annotations with label '{annotation_label}'\n"
f"Available labels: {', '.join(available_labels)}"
)
annotations_to_extract = filtered
echo_progress(f"\nExtracting {len(annotations_to_extract)} annotated region(s)...", quiet)
# Extract each annotation
ext = get_output_extension(output_format)
created_files = []
for ann in annotations_to_extract:
# Get annotation bounds
start = ann.sample_start
count = ann.sample_count
end = start + count - 1
# Trim to annotation bounds
chunk = recording.trim(start_sample=start, num_samples=count)
# Clear annotations - the trimmed chunk IS the annotation,
# and trim() may produce invalid annotations
chunk._annotations = []
# Add metadata
chunk._metadata["original_file"] = str(input_path.name)
chunk._metadata["original_start_sample"] = start
chunk._metadata["original_end_sample"] = start + count
chunk._metadata["split_operation"] = "extract_annotation"
chunk._metadata["annotation_label"] = ann.label
# Generate filename
label_safe = ann.label.replace(" ", "_").replace("/", "_")
output_filename = f"{base_name}_{label_safe}_{start}-{start+count}{ext}"
output_path = out_dir / output_filename
# Get original index in full annotation list if we filtered
if annotation_index is not None:
display_idx = annotation_index
else:
display_idx = recording.annotations.index(ann)
echo_progress(
message=(
f" [{display_idx}] {ann.label} ({format_sample_count(start)}"
f"-{format_sample_count(end)}): {output_filename}"
),
quiet=quiet,
)
save_recording(chunk, output_path, output_format, overwrite, verbose)
created_files.append(output_path)
echo_progress(f"\nExtracted {len(annotations_to_extract)} annotated region(s).", quiet)
if __name__ == "__main__":
split()

Some files were not shown because too many files have changed in this diff Show More