From 8836bb6f3c9cf09770d69e63b61219ff9b6996e7 Mon Sep 17 00:00:00 2001 From: Andriy Kogan Date: Sat, 18 Jan 2025 07:38:39 +0100 Subject: [PATCH 1/5] Lens integration --- .env.dev | 6 +- Pipfile | 3 + Pipfile.lock | 370 +++++++++++++++++++++++++++++- docs/agent/lens_protocol.md | 172 ++++++++++++++ src/core/config.py | 4 + src/tools/lens_protocol.py | 191 +++++++++++++++ tests/tools/test_lens_protocol.py | 190 +++++++++++++++ 7 files changed, 931 insertions(+), 5 deletions(-) create mode 100644 docs/agent/lens_protocol.md create mode 100644 src/tools/lens_protocol.py create mode 100644 tests/tools/test_lens_protocol.py diff --git a/.env.dev b/.env.dev index 68feb02..5c90858 100644 --- a/.env.dev +++ b/.env.dev @@ -79,4 +79,8 @@ SLACK_APP_TOKEN= # Spotify settings SPOTIFY_CLIENT_ID= SPOTIFY_CLIENT_SECRET= -SPOTIFY_REDIRECT_URI= \ No newline at end of file +SPOTIFY_REDIRECT_URI= + +# Lens protocol settings +LENS_API_KEY= +LENS_PROFILE_ID= \ No newline at end of file diff --git a/Pipfile b/Pipfile index 3c1decf..b1c9895 100644 --- a/Pipfile +++ b/Pipfile @@ -19,6 +19,9 @@ torch = "*" transformers = "*" discord = "*" slack-sdk = "*" +lenspy = "*" +gql = "*" +requests-toolbelt = "*" google-auth-oauthlib = "*" google-auth = "*" google-api-python-client = "*" diff --git a/Pipfile.lock b/Pipfile.lock index c1d0010..349cdc7 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "00bbf2a57a98475dfe4b9f60826ef11f88af14870b2f4c979554cf75cc530645" + "sha256": "d3d35671a55a4e80d2393509f820c5d4f1f7f3c4c1b03d2e56b3c897af4b5e8c" }, "pipfile-spec": 6, "requires": { @@ -218,6 +218,14 @@ "markers": "python_full_version >= '3.6.0'", "version": "==4.12.3" }, + "blinker": { + "hashes": [ + "sha256:b4ce2265a7abece45e7cc896e98dbebe6cead56bcf805a3d23136d145f5445bf", + "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc" + ], + "markers": "python_version >= '3.9'", + "version": "==1.9.0" + }, "bracex": { "hashes": [ "sha256:12c50952415bfa773d2d9ccb8e79651b8cdb1f31a42f6091b804f6ba2b4a66b6", @@ -430,6 +438,35 @@ "markers": "python_version >= '3.7'", "version": "==1.2.0" }, + "dash": { + "hashes": [ + "sha256:0ce0479d1bc958e934630e2de7023b8a4558f23ce1f9f5a4b34b65eb3903a869", + "sha256:20e8404f73d0fe88ce2eae33c25bbc513cbe52f30d23a401fa5f24dbb44296c8" + ], + "markers": "python_version >= '3.8'", + "version": "==2.18.2" + }, + "dash-core-components": { + "hashes": [ + "sha256:52b8e8cce13b18d0802ee3acbc5e888cb1248a04968f962d63d070400af2e346", + "sha256:c6733874af975e552f95a1398a16c2ee7df14ce43fa60bb3718a3c6e0b63ffee" + ], + "version": "==2.0.0" + }, + "dash-html-components": { + "hashes": [ + "sha256:8703a601080f02619a6390998e0b3da4a5daabe97a1fd7a9cebc09d015f26e50", + "sha256:b42cc903713c9706af03b3f2548bda4be7307a7cf89b7d6eae3da872717d1b63" + ], + "version": "==2.0.0" + }, + "dash-table": { + "hashes": [ + "sha256:18624d693d4c8ef2ddec99a6f167593437a7ea0bf153aa20f318c170c5bc7308", + "sha256:19036fa352bb1c11baf38068ec62d172f0515f73ca3276c79dee49b95ddc16c9" + ], + "version": "==5.0.0" + }, "deprecated": { "hashes": [ "sha256:353bc4a8ac4bfc96800ddab349d89c25dec1079f65fd53acdcc1e0b975b21320", @@ -493,6 +530,14 @@ "markers": "python_version >= '3.8'", "version": "==3.16.1" }, + "flask": { + "hashes": [ + "sha256:34e815dfaa43340d1d15a5c3a02b8476004037eb4840b34910c6e21679d288f3", + "sha256:ceb27b0af3823ea2737928a4d99d125a06175b8512c445cbd9a9ce200ef76842" + ], + "markers": "python_version >= '3.8'", + "version": "==3.0.3" + }, "flatbuffers": { "hashes": [ "sha256:2910b0bc6ae9b6db78dd2b18d0b7a0709ba240fb5585f286a3a2b30785c22dac", @@ -663,6 +708,22 @@ "markers": "python_version >= '3.7'", "version": "==1.66.0" }, + "gql": { + "hashes": [ + "sha256:70dda5694a5b194a8441f077aa5fb70cc94e4ec08016117523f013680901ecb7", + "sha256:ccb9c5db543682b28f577069950488218ed65d4ac70bb03b6929aaadaf636de9" + ], + "index": "pypi", + "version": "==3.5.0" + }, + "graphql-core": { + "hashes": [ + "sha256:2f150d5096448aa4f8ab26268567bbfeef823769893b39c1a2e1409590939c8a", + "sha256:e671b90ed653c808715645e3998b7ab67d382d55467b7e2978549111bbabf8d5" + ], + "markers": "python_version >= '3.6' and python_version < '4'", + "version": "==3.2.5" + }, "grpcio": { "hashes": [ "sha256:01f834732c22a130bdf3dc154d1053bdbc887eb3ccb7f3e6285cfbfc33d9d5cc", @@ -948,6 +1009,14 @@ "markers": "python_version >= '3.7'", "version": "==2.0.0" }, + "itsdangerous": { + "hashes": [ + "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef", + "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173" + ], + "markers": "python_version >= '3.8'", + "version": "==2.2.0" + }, "jinja2": { "hashes": [ "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb", @@ -1046,6 +1115,15 @@ "markers": "python_version >= '3.6'", "version": "==31.0.0" }, + "lenspy": { + "hashes": [ + "sha256:1ddeb9ad9431a77e8169780deda13f86c74dfcc757dbaa862a6151307f542c1d", + "sha256:6fbd19963b64d2556ba3d78cf245f2681f711788409ba5deca868b5a80614876" + ], + "index": "pypi", + "markers": "python_full_version >= '3.6.0'", + "version": "==1.1.0" + }, "loguru": { "hashes": [ "sha256:19480589e77d47b8d85b2c827ad95d49bf31b0dcde16593892eb51dd18706eb6", @@ -1588,6 +1666,22 @@ "markers": "python_version >= '3.7'", "version": "==8.4.0" }, + "nest-asyncio": { + "hashes": [ + "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe", + "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c" + ], + "markers": "python_version >= '3.5'", + "version": "==1.6.0" + }, + "networkx": { + "hashes": [ + "sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1", + "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f" + ], + "markers": "python_version >= '3.10'", + "version": "==3.4.2" + }, "numpy": { "hashes": [ "sha256:059e6a747ae84fce488c3ee397cee7e5f905fd1bda5fb18c66bc41807ff119b2", @@ -1650,6 +1744,111 @@ "markers": "python_version >= '3.10'", "version": "==2.2.1" }, + "nvidia-cublas-cu12": { + "hashes": [ + "sha256:0f8aa1706812e00b9f19dfe0cdb3999b092ccb8ca168c0db5b8ea712456fd9b3", + "sha256:2fc8da60df463fdefa81e323eef2e36489e1c94335b5358bcb38360adf75ac9b", + "sha256:5a796786da89203a0657eda402bcdcec6180254a8ac22d72213abc42069522dc" + ], + "markers": "python_version >= '3'", + "version": "==12.4.5.8" + }, + "nvidia-cuda-cupti-cu12": { + "hashes": [ + "sha256:5688d203301ab051449a2b1cb6690fbe90d2b372f411521c86018b950f3d7922", + "sha256:79279b35cf6f91da114182a5ce1864997fd52294a87a16179ce275773799458a", + "sha256:9dec60f5ac126f7bb551c055072b69d85392b13311fcc1bcda2202d172df30fb" + ], + "markers": "python_version >= '3'", + "version": "==12.4.127" + }, + "nvidia-cuda-nvrtc-cu12": { + "hashes": [ + "sha256:0eedf14185e04b76aa05b1fea04133e59f465b6f960c0cbf4e37c3cb6b0ea198", + "sha256:a178759ebb095827bd30ef56598ec182b85547f1508941a3d560eb7ea1fbf338", + "sha256:a961b2f1d5f17b14867c619ceb99ef6fcec12e46612711bcec78eb05068a60ec" + ], + "markers": "python_version >= '3'", + "version": "==12.4.127" + }, + "nvidia-cuda-runtime-cu12": { + "hashes": [ + "sha256:09c2e35f48359752dfa822c09918211844a3d93c100a715d79b59591130c5e1e", + "sha256:64403288fa2136ee8e467cdc9c9427e0434110899d07c779f25b5c068934faa5", + "sha256:961fe0e2e716a2a1d967aab7caee97512f71767f852f67432d572e36cb3a11f3" + ], + "markers": "python_version >= '3'", + "version": "==12.4.127" + }, + "nvidia-cudnn-cu12": { + "hashes": [ + "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f", + "sha256:6278562929433d68365a07a4a1546c237ba2849852c0d4b2262a486e805b977a" + ], + "markers": "python_version >= '3'", + "version": "==9.1.0.70" + }, + "nvidia-cufft-cu12": { + "hashes": [ + "sha256:5dad8008fc7f92f5ddfa2101430917ce2ffacd86824914c82e28990ad7f00399", + "sha256:d802f4954291101186078ccbe22fc285a902136f974d369540fd4a5333d1440b", + "sha256:f083fc24912aa410be21fa16d157fed2055dab1cc4b6934a0e03cba69eb242b9" + ], + "markers": "python_version >= '3'", + "version": "==11.2.1.3" + }, + "nvidia-curand-cu12": { + "hashes": [ + "sha256:1f173f09e3e3c76ab084aba0de819c49e56614feae5c12f69883f4ae9bb5fad9", + "sha256:a88f583d4e0bb643c49743469964103aa59f7f708d862c3ddb0fc07f851e3b8b", + "sha256:f307cc191f96efe9e8f05a87096abc20d08845a841889ef78cb06924437f6771" + ], + "markers": "python_version >= '3'", + "version": "==10.3.5.147" + }, + "nvidia-cusolver-cu12": { + "hashes": [ + "sha256:19e33fa442bcfd085b3086c4ebf7e8debc07cfe01e11513cc6d332fd918ac260", + "sha256:d338f155f174f90724bbde3758b7ac375a70ce8e706d70b018dd3375545fc84e", + "sha256:e77314c9d7b694fcebc84f58989f3aa4fb4cb442f12ca1a9bde50f5e8f6d1b9c" + ], + "markers": "python_version >= '3'", + "version": "==11.6.1.9" + }, + "nvidia-cusparse-cu12": { + "hashes": [ + "sha256:9bc90fb087bc7b4c15641521f31c0371e9a612fc2ba12c338d3ae032e6b6797f", + "sha256:9d32f62896231ebe0480efd8a7f702e143c98cfaa0e8a76df3386c1ba2b54df3", + "sha256:ea4f11a2904e2a8dc4b1833cc1b5181cde564edd0d5cd33e3c168eff2d1863f1" + ], + "markers": "python_version >= '3'", + "version": "==12.3.1.170" + }, + "nvidia-nccl-cu12": { + "hashes": [ + "sha256:8579076d30a8c24988834445f8d633c697d42397e92ffc3f63fa26766d25e0a0" + ], + "markers": "python_version >= '3'", + "version": "==2.21.5" + }, + "nvidia-nvjitlink-cu12": { + "hashes": [ + "sha256:06b3b9b25bf3f8af351d664978ca26a16d2c5127dbd53c0497e28d1fb9611d57", + "sha256:4abe7fef64914ccfa909bc2ba39739670ecc9e820c83ccc7a6ed414122599b83", + "sha256:fd9020c501d27d135f983c6d3e244b197a7ccad769e34df53a42e276b0e25fa1" + ], + "markers": "python_version >= '3'", + "version": "==12.4.127" + }, + "nvidia-nvtx-cu12": { + "hashes": [ + "sha256:641dccaaa1139f3ffb0d3164b4b84f9d253397e38246a4f2f36728b48566d485", + "sha256:781e950d9b9f60d8241ccea575b32f5105a5baf4c2351cab5256a24869f12a1a", + "sha256:7959ad635db13edf4fc65c06a6e9f9e55fc2f92596db928d169c0bb031e88ef3" + ], + "markers": "python_version >= '3'", + "version": "==12.4.127" + }, "oauthlib": { "hashes": [ "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca", @@ -1877,6 +2076,54 @@ ], "version": "==0.5.7" }, + "pandas": { + "hashes": [ + "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a", + "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d", + "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5", + "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4", + "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0", + "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32", + "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea", + "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28", + "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f", + "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348", + "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18", + "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468", + "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5", + "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e", + "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667", + "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645", + "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13", + "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30", + "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3", + "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d", + "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb", + "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3", + "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039", + "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8", + "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd", + "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761", + "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659", + "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57", + "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c", + "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c", + "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4", + "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a", + "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9", + "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42", + "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2", + "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39", + "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc", + "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698", + "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed", + "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015", + "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24", + "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319" + ], + "markers": "python_version >= '3.9'", + "version": "==2.2.3" + }, "parse": { "hashes": [ "sha256:967095588cb802add9177d0c0b6133b5ba33b1ea9007ca800e526f42a85af558", @@ -1978,6 +2225,14 @@ "markers": "python_version >= '3.8'", "version": "==4.3.6" }, + "plotly": { + "hashes": [ + "sha256:dbc8ac8339d248a4bcc36e08a5659bacfe1b079390b8953533f4eb22169b4bae", + "sha256:f67073a1e637eb0dc3e46324d9d51e2fe76e9727c892dde64ddf1e1b51f29089" + ], + "markers": "python_version >= '3.8'", + "version": "==5.24.1" + }, "pluggy": { "hashes": [ "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", @@ -2361,6 +2616,13 @@ "markers": "python_version >= '3.9'", "version": "==21.10" }, + "pytz": { + "hashes": [ + "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a", + "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725" + ], + "version": "==2024.2" + }, "pyyaml": { "hashes": [ "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff", @@ -2660,6 +2922,22 @@ "markers": "python_version >= '3.4'", "version": "==2.0.0" }, + "requests-toolbelt": { + "hashes": [ + "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6", + "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06" + ], + "index": "pypi", + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==1.0.0" + }, + "retrying": { + "hashes": [ + "sha256:345da8c5765bd982b1d1915deb9102fd3d1f7ad16bd84a9700b85f64d24e8f3e", + "sha256:8cc4d43cb8e1125e0ff3344e9de678fefd85db3b750b81b2240dc0183af37b35" + ], + "version": "==1.3.4" + }, "rich": { "hashes": [ "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098", @@ -2701,6 +2979,27 @@ "markers": "python_version >= '3.7'", "version": "==0.9.2" }, + "safetensors": { + "hashes": [ + "sha256:03c937100f38c9ff4c1507abea9928a6a9b02c9c1c9c3609ed4fb2bf413d4975", + "sha256:1506e4c2eda1431099cebe9abf6c76853e95d0b7a95addceaa74c6019c65d8cf", + "sha256:3ab696dfdc060caffb61dbe4066b86419107a24c804a4e373ba59be699ebd8d5", + "sha256:3dfa7c2f3fe55db34eba90c29df94bcdac4821043fc391cb5d082d9922013869", + "sha256:45b6092997ceb8aa3801693781a71a99909ab9cc776fbc3fa9322d29b1d3bef2", + "sha256:46ff2116150ae70a4e9c490d2ab6b6e1b1b93f25e520e540abe1b81b48560c3a", + "sha256:5c5b5d9da594f638a259fca766046f44c97244cc7ab8bef161b3e80d04becc76", + "sha256:6d0d6a8ee2215a440e1296b843edf44fd377b055ba350eaba74655a2fe2c4bae", + "sha256:78abdddd03a406646107f973c7843276e7b64e5e32623529dc17f3d94a20f589", + "sha256:86016d40bcaa3bcc9a56cd74d97e654b5f4f4abe42b038c71e4f00a089c4526c", + "sha256:990833f70a5f9c7d3fc82c94507f03179930ff7d00941c287f73b6fcbf67f19e", + "sha256:a00e737948791b94dad83cf0eafc09a02c4d8c2171a239e8c8572fe04e25960e", + "sha256:cb4a8d98ba12fa016f4241932b1fc5e702e5143f5374bba0bbcf7ddc1c4cf2b8", + "sha256:d3a06fae62418ec8e5c635b61a8086032c9e281f16c63c3af46a6efbab33156f", + "sha256:fe55c039d97090d1f85277d402954dd6ad27f63034fa81985a9cc59655ac3ee2" + ], + "markers": "python_version >= '3.7'", + "version": "==0.5.2" + }, "setuptools": { "hashes": [ "sha256:c5afc8f407c626b8313a86e10311dd3f661c6cd9c09d4bf8c15c0e11f9f2b0e6", @@ -2776,11 +3075,11 @@ }, "sympy": { "hashes": [ - "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73", - "sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9" + "sha256:9cebf7e04ff162015ce31c9c6c9144daa34a93bd082f54fd8f12deca4f47515f", + "sha256:db36cdc64bf61b9b24578b6f7bab1ecdd2452cf008f34faa33776680c26d66f8" ], "markers": "python_version >= '3.8'", - "version": "==1.13.3" + "version": "==1.13.1" }, "tavily-python": { "hashes": [ @@ -2874,6 +3173,30 @@ "markers": "python_version >= '3.7'", "version": "==0.21.0" }, + "torch": { + "hashes": [ + "sha256:1f3b7fb3cf7ab97fae52161423f81be8c6b8afac8d9760823fd623994581e1a3", + "sha256:23d062bf70776a3d04dbe74db950db2a5245e1ba4f27208a87f0d743b0d06e86", + "sha256:31f8c39660962f9ae4eeec995e3049b5492eb7360dd4f07377658ef4d728fa4c", + "sha256:32a037bd98a241df6c93e4c789b683335da76a2ac142c0973675b715102dc5fa", + "sha256:340ce0432cad0d37f5a31be666896e16788f1adf8ad7be481196b503dad675b9", + "sha256:34bfa1a852e5714cbfa17f27c49d8ce35e1b7af5608c4bc6e81392c352dbc601", + "sha256:3f4b7f10a247e0dcd7ea97dc2d3bfbfc90302ed36d7f3952b0008d0df264e697", + "sha256:46c817d3ea33696ad3b9df5e774dba2257e9a4cd3c4a3afbf92f6bb13ac5ce2d", + "sha256:603c52d2fe06433c18b747d25f5c333f9c1d58615620578c326d66f258686f9a", + "sha256:71328e1bbe39d213b8721678f9dcac30dfc452a46d586f1d514a6aa0a99d4744", + "sha256:73e58e78f7d220917c5dbfad1a40e09df9929d3b95d25e57d9f8558f84c9a11c", + "sha256:7974e3dce28b5a21fb554b73e1bc9072c25dde873fa00d54280861e7a009d7dc", + "sha256:8046768b7f6d35b85d101b4b38cba8aa2f3cd51952bc4c06a49580f2ce682291", + "sha256:8c712df61101964eb11910a846514011f0b6f5920c55dbf567bff8a34163d5b1", + "sha256:9b61edf3b4f6e3b0e0adda8b3960266b9009d02b37555971f4d1c8f7a05afed7", + "sha256:de5b7d6740c4b636ef4db92be922f0edc425b65ed78c5076c43c42d362a45457", + "sha256:ed231a4b3a5952177fafb661213d690a72caaad97d5824dd4fc17ab9e15cec03" + ], + "index": "pypi", + "markers": "python_full_version >= '3.8.0'", + "version": "==2.5.1" + }, "tqdm": { "hashes": [ "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", @@ -2882,6 +3205,26 @@ "markers": "python_version >= '3.7'", "version": "==4.67.1" }, + "transformers": { + "hashes": [ + "sha256:03fdfcbfb8b0367fb6c9fbe9d1c9aa54dfd847618be9b52400b2811d22799cb1", + "sha256:6d3de6d71cb5f2a10f9775ccc17abce9620195caaf32ec96542bd2a6937f25b0" + ], + "index": "pypi", + "markers": "python_full_version >= '3.9.0'", + "version": "==4.48.0" + }, + "triton": { + "hashes": [ + "sha256:0f34f6e7885d1bf0eaaf7ba875a5f0ce6f3c13ba98f9503651c1e6dc6757ed5c", + "sha256:6b0dd10a925263abbe9fa37dcde67a5e9b2383fc269fdf59f5657cac38c5d1d8", + "sha256:6dadaca7fc24de34e180271b5cf864c16755702e9f63a16f62df714a8099126a", + "sha256:aafa9a20cd0d9fee523cd4504aa7131807a864cd77dcf6efe7e981f18b8c6c11", + "sha256:c8182f42fd8080a7d39d666814fa36c5e30cc00ea7eeeb1a2983dbb4c99a0fdc" + ], + "markers": "platform_system == 'Linux' and platform_machine == 'x86_64' and python_version < '3.13'", + "version": "==3.1.0" + }, "tweepy": { "hashes": [ "sha256:1345cbcdf0a75e2d89f424c559fd49fda4d8cd7be25cd5131e3b57bad8a21d76", @@ -2916,6 +3259,14 @@ "markers": "python_version >= '3.8'", "version": "==4.12.2" }, + "tzdata": { + "hashes": [ + "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc", + "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd" + ], + "markers": "python_version >= '2'", + "version": "==2024.2" + }, "uritemplate": { "hashes": [ "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0", @@ -3198,6 +3549,14 @@ "markers": "python_version >= '3.9'", "version": "==14.1" }, + "werkzeug": { + "hashes": [ + "sha256:1bc0c2310d2fbb07b1dd1105eba2f7af72f322e1e455f2f93c993bee8c8a5f17", + "sha256:a8dd59d4de28ca70471a34cba79bed5f7ef2e036a76b3ab0835474246eb41f8d" + ], + "markers": "python_version >= '3.8'", + "version": "==3.0.6" + }, "whatsapp-api-client-python": { "hashes": [ "sha256:c87c3b3dc8cb27ee74d371288af9bd59c744c457f70fc4371f6bb93051dffa76" @@ -3816,6 +4175,7 @@ "sha256:f9b57eaa3b0cd8db52049ed0330747b0364e899e8a606a624813452b8203d5f7", "sha256:fce4f615f8ca31b2e61aa0eb5865a21e14f5629515c9151850aa936c02a1ee51" ], + "index": "pypi", "markers": "python_version >= '3.10'", "version": "==2.2.1" }, @@ -4023,6 +4383,7 @@ "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12", "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4" ], + "index": "pypi", "markers": "python_version >= '3.8'", "version": "==6.0.2" }, @@ -4185,6 +4546,7 @@ "sha256:c5afc8f407c626b8313a86e10311dd3f661c6cd9c09d4bf8c15c0e11f9f2b0e6", "sha256:e3982f444617239225d675215d51f6ba05f845d4eec313da4418fdbb56fb27e3" ], + "index": "pypi", "markers": "python_version >= '3.9'", "version": "==75.8.0" }, diff --git a/docs/agent/lens_protocol.md b/docs/agent/lens_protocol.md new file mode 100644 index 0000000..3e4c69f --- /dev/null +++ b/docs/agent/lens_protocol.md @@ -0,0 +1,172 @@ +# Llama Integration + +The agent supports local inference using Meta's Llama models (8B, 70B, and 405B parameters) for scenarios requiring on-premise or self-hosted language models. + +## Setup + +1. Download Llama Model + - Obtain access to Meta's Llama models through [Meta's AI website](https://ai.meta.com/llama/) + - Download your preferred model size: + - Llama-8B (16GB minimum RAM) + - Llama-70B (140GB minimum RAM) + - Llama-405B (780GB minimum RAM) + +2. Install Dependencies + ```bash + pip install torch>=2.2.0 transformers>=4.38.0 + ``` + +3. Configure Environment Variables + Add these to your `.env` file: + ```bash + LLAMA_MODEL_PATH=/path/to/your/llama/model + LLM_PROVIDER=llama + ``` + +### Basic Setup +```python +from src.llm.llm import LLM + +# Initialize LLM with Llama backend +llm = LLM() # Will use Llama if LLM_PROVIDER=llama + +# Generate a response +response = await llm.generate_response([ + {"role": "user", "content": "Explain quantum computing"} +]) +``` + +## Model Selection + +Choose the appropriate model size based on your requirements: + +### Llama-8B +- Minimum Requirements: + - 16GB RAM + - 20GB disk space +- Best for: + - Basic text generation + - Simple chat interactions + - Resource-constrained environments + +### Llama-70B +- Minimum Requirements: + - 140GB RAM + - 140GB disk space +- Best for: + - Complex reasoning + - Code generation + - Advanced chat applications + +### Llama-405B +- Minimum Requirements: + - 780GB RAM + - 800GB disk space +- Best for: + - Research applications + - Maximum model capability + - Enterprise-scale deployments + +## Advanced Configuration + +### Memory Management +The agent automatically manages model loading based on available resources: +```python +# Override default device mapping +response = await llm.generate_response( + messages=[{"role": "user", "content": "Hello"}], + device_map="cpu" # Force CPU inference +) + +# Control memory usage +response = await llm.generate_response( + messages=[{"role": "user", "content": "Hello"}], + max_tokens=512, # Limit response length + torch_dtype="auto" # Automatic precision selection +) +``` + +### Generation Parameters +Customize response generation: +```python +# Creative writing +response = await llm.generate_response( + messages=[{"role": "user", "content": "Write a story"}], + temperature=0.9, # More creative + top_p=0.95 +) + +# Factual responses +response = await llm.generate_response( + messages=[{"role": "user", "content": "Explain TCP/IP"}], + temperature=0.2, # More focused + top_p=0.1 +) +``` + +## Example Use Cases + +### Chat Application +```python +messages = [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What is the capital of France?"} +] +response = await llm.generate_response(messages) +``` + +### Code Generation +```python +messages = [ + {"role": "system", "content": "You are a Python expert."}, + {"role": "user", "content": "Write a function to calculate Fibonacci numbers."} +] +response = await llm.generate_response( + messages, + temperature=0.2 # Lower temperature for code generation +) +``` + +## Best Practices + +1. **Resource Management** + - Monitor system memory usage + - Use appropriate model size for your hardware + - Consider CPU fallback for large models + +2. **Performance Optimization** + - Use GPU acceleration when available + - Adjust batch sizes based on memory constraints + - Cache frequently used responses + +3. **Error Handling** + - Implement proper validation of model files + - Handle out-of-memory scenarios gracefully + - Monitor model performance and errors + +## Troubleshooting + +Common issues and solutions: + +1. **Out of Memory Errors** + - Switch to a smaller model + - Use CPU fallback + - Reduce batch size or context length + +2. **Missing Model Files** + - Verify model path in environment variables + - Check file permissions + - Ensure all required model files are present + +3. **Poor Performance** + - Check GPU utilization + - Monitor system resources + - Adjust generation parameters + +## Reference +For implementation details, see: `src/llm/providers/llama.py` + +For more information, refer to: +- [Llama Model Documentation](https://ai.meta.com/llama/) +- [Hugging Face Transformers](https://huggingface.co/docs/transformers/index) +- [PyTorch Documentation](https://pytorch.org/docs/) diff --git a/src/core/config.py b/src/core/config.py index 22c4e51..34e3741 100644 --- a/src/core/config.py +++ b/src/core/config.py @@ -188,6 +188,10 @@ class Settings(BaseSettings): SPOTIFY_CLIENT_SECRET: str = "" SPOTIFY_REDIRECT_URI: str = "" + # --- Lens protocol settings --- + LENS_API_KEY: str = "" + LENS_PROFILE_ID: str = "" + # --- llama settings --- LLAMA_MODEL_PATH: str = "" LLAMA_MAX_TOKENS: int = 512 diff --git a/src/tools/lens_protocol.py b/src/tools/lens_protocol.py new file mode 100644 index 0000000..2b20678 --- /dev/null +++ b/src/tools/lens_protocol.py @@ -0,0 +1,191 @@ +import logging +from typing import Dict, List, Optional + +from gql import Client, gql +from gql.transport.requests import RequestsHTTPTransport + +from src.core.config import settings + + +class LensProtocolTool: + """Tool for interacting with the Lens Protocol network.""" + + def __init__(self): + self.client: Optional[Client] = None + self.logger = logging.getLogger(__name__) + self.profile_id = settings.LENS_PROFILE_ID + self._initialize_connection() + + def _initialize_connection(self) -> bool: + """ + Initialize connection to Lens Protocol using settings. + + Returns: + bool: True if connection successful, False otherwise + """ + try: + if not settings.LENS_API_KEY: + self.logger.error("Lens API key not configured in settings") + return False + + transport = RequestsHTTPTransport( + url="https://api-v2.lens.dev", headers={"x-api-key": settings.LENS_API_KEY} + ) + self.client = Client(transport=transport, fetch_schema_from_transport=True) + return True + except Exception as e: + self.logger.error(f"Failed to initialize Lens connection: {str(e)}") + return False + + def get_profile(self, profile_id: Optional[str] = None) -> Optional[Dict]: + """ + Retrieve profile information for a given profile ID. + + Args: + profile_id (Optional[str]): Lens Protocol profile ID. If None, uses default from settings + + Returns: + Optional[Dict]: Profile information or None if not found + """ + if not self.client: + self.logger.error("Client not initialized. Call initialize_connection first.") + return None + + try: + profile_id = profile_id or self.profile_id + if not profile_id: + self.logger.error("No profile ID provided or configured in settings") + return None + + query = gql(""" + query Profile($id: ProfileId!) { + profile(request: { profileId: $id }) { + id + handle + bio + stats { + totalFollowers + totalFollowing + } + } + } + """) + + result = self.client.execute(query, variable_values={"id": profile_id}) + profile = result["profile"] + + return { + "id": profile["id"], + "handle": profile["handle"], + "bio": profile["bio"], + "followers": profile["stats"]["totalFollowers"], + "following": profile["stats"]["totalFollowing"], + } + except Exception as e: + self.logger.error(f"Failed to retrieve profile: {str(e)}") + return None + + def publish_content(self, content: str, profile_id: Optional[str] = None) -> Optional[Dict]: + """ + Publish content to the Lens network. + + Args: + content (str): Content to publish + profile_id (Optional[str]): Profile ID to publish under. If None, uses default from settings + + Returns: + Optional[Dict]: Publication details or None if failed + """ + if not self.client: + self.logger.error("Client not initialized. Call initialize_connection first.") + return None + + try: + profile_id = profile_id or self.profile_id + if not profile_id: + self.logger.error("No profile ID provided or configured in settings") + return None + + mutation = gql(""" + mutation CreatePost($request: CreatePublicPostRequest!) { + createPostTypedData(request: $request) { + id + content + createdAt + } + } + """) + + variables = {"request": {"profileId": profile_id, "content": content}} + + result = self.client.execute(mutation, variable_values=variables) + publication = result["createPostTypedData"] + + return { + "id": publication["id"], + "content": publication["content"], + "timestamp": publication["createdAt"], + } + except Exception as e: + self.logger.error(f"Failed to publish content: {str(e)}") + return None + + def fetch_content(self, query_params: Dict) -> List[Dict]: + """ + Fetch content from the Lens network based on query parameters. + + Args: + query_params (dict): Parameters for content query (e.g., orderBy, limit) + + Returns: + List[Dict]: List of publications matching the query + """ + if not self.client: + self.logger.error("Client not initialized. Call initialize_connection first.") + return [] + + try: + query = gql(""" + query ExplorePublications($request: ExplorePublicationRequest!) { + explorePublications(request: $request) { + items { + ... on Post { + id + profile { + id + } + metadata { + content + } + createdAt + stats { + totalAmountOfComments + totalAmountOfMirrors + totalAmountOfReactions + } + } + } + } + } + """) + + result = self.client.execute(query, variable_values={"request": query_params}) + publications = result["explorePublications"]["items"] + + return [ + { + "id": pub["id"], + "profile_id": pub["profile"]["id"], + "content": pub["metadata"]["content"], + "timestamp": pub["createdAt"], + "stats": { + "comments": pub["stats"]["totalAmountOfComments"], + "mirrors": pub["stats"]["totalAmountOfMirrors"], + "reactions": pub["stats"]["totalAmountOfReactions"], + }, + } + for pub in publications + ] + except Exception as e: + self.logger.error(f"Failed to fetch content: {str(e)}") + return [] diff --git a/tests/tools/test_lens_protocol.py b/tests/tools/test_lens_protocol.py new file mode 100644 index 0000000..96c53ca --- /dev/null +++ b/tests/tools/test_lens_protocol.py @@ -0,0 +1,190 @@ +from unittest.mock import Mock, patch + +import pytest + +from src.tools.lens_protocol import LensProtocolTool + + +@pytest.fixture +def mock_settings(): + with patch("src.tools.lens_protocol.settings") as mock_settings: + mock_settings.LENS_API_KEY = "test_api_key" + mock_settings.LENS_PROFILE_ID = "default_profile_id" + yield mock_settings + + +@pytest.fixture +def mock_gql_client(): + with patch("src.tools.lens_protocol.Client") as mock_client: + yield mock_client + + +@pytest.fixture +def mock_transport(): + with patch("src.tools.lens_protocol.RequestsHTTPTransport") as mock_transport: + yield mock_transport + + +class TestLensProtocolTool: + def test_initialization_success(self, mock_settings, mock_gql_client, mock_transport): + """Test successful initialization""" + lens_tool = LensProtocolTool() + + mock_transport.assert_called_once_with( + url="https://api-v2.lens.dev", headers={"x-api-key": "test_api_key"} + ) + mock_gql_client.assert_called_once() + assert lens_tool.profile_id == "default_profile_id" + + def test_initialization_failure(self, mock_settings, mock_gql_client): + """Test initialization failure""" + mock_settings.LENS_API_KEY = "" + + lens_tool = LensProtocolTool() + + assert lens_tool.client is None + + def test_get_profile_success(self, mock_settings): + """Test successful profile retrieval""" + lens_tool = LensProtocolTool() + # Mock GraphQL response + mock_client = Mock() + mock_client.execute.return_value = { + "profile": { + "id": "test.lens", + "handle": "test_handle", + "bio": "Test bio", + "stats": {"totalFollowers": 100, "totalFollowing": 50}, + } + } + lens_tool.client = mock_client + + result = lens_tool.get_profile("test.lens") + + assert result == { + "id": "test.lens", + "handle": "test_handle", + "bio": "Test bio", + "followers": 100, + "following": 50, + } + + def test_get_profile_default_id(self, mock_settings): + """Test profile retrieval with default profile ID""" + lens_tool = LensProtocolTool() + mock_client = Mock() + mock_client.execute.return_value = { + "profile": { + "id": "default_profile_id", + "handle": "test_handle", + "bio": "Test bio", + "stats": {"totalFollowers": 100, "totalFollowing": 50}, + } + } + lens_tool.client = mock_client + + result = lens_tool.get_profile() + + assert result is not None + assert result["id"] == "default_profile_id" + mock_client.execute.assert_called_once() + + def test_get_profile_not_initialized(self, mock_settings): + """Test profile retrieval with uninitialized client""" + lens_tool = LensProtocolTool() + lens_tool.client = None + + result = lens_tool.get_profile("test.lens") + + assert result is None + + def test_publish_content_success(self, mock_settings): + """Test successful content publication""" + lens_tool = LensProtocolTool() + # Mock GraphQL response + mock_client = Mock() + mock_client.execute.return_value = { + "createPostTypedData": { + "id": "pub-123", + "content": "Test content", + "createdAt": "2024-01-01T12:00:00Z", + } + } + lens_tool.client = mock_client + + result = lens_tool.publish_content("Test content") + + assert result == { + "id": "pub-123", + "content": "Test content", + "timestamp": "2024-01-01T12:00:00Z", + } + + def test_publish_content_failure(self, mock_settings): + """Test content publication failure""" + lens_tool = LensProtocolTool() + mock_client = Mock() + mock_client.execute.side_effect = Exception("Publication failed") + lens_tool.client = mock_client + + result = lens_tool.publish_content("Test content") + + assert result is None + + def test_fetch_content_success(self, mock_settings): + """Test successful content fetching""" + lens_tool = LensProtocolTool() + # Mock GraphQL response + mock_client = Mock() + mock_client.execute.return_value = { + "explorePublications": { + "items": [ + { + "id": "pub-123", + "profile": {"id": "profile-123"}, + "metadata": {"content": "Test content"}, + "createdAt": "2024-01-01T12:00:00Z", + "stats": { + "totalAmountOfComments": 10, + "totalAmountOfMirrors": 5, + "totalAmountOfReactions": 20, + }, + } + ] + } + } + lens_tool.client = mock_client + + query_params = {"limit": 1, "orderBy": "TOP_REACTED"} + result = lens_tool.fetch_content(query_params) + + assert len(result) == 1 + assert result[0] == { + "id": "pub-123", + "profile_id": "profile-123", + "content": "Test content", + "timestamp": "2024-01-01T12:00:00Z", + "stats": {"comments": 10, "mirrors": 5, "reactions": 20}, + } + + def test_fetch_content_empty_result(self, mock_settings): + """Test content fetching with empty result""" + lens_tool = LensProtocolTool() + mock_client = Mock() + mock_client.execute.return_value = {"explorePublications": {"items": []}} + lens_tool.client = mock_client + + query_params = {"limit": 1} + result = lens_tool.fetch_content(query_params) + + assert result == [] + + def test_fetch_content_not_initialized(self, mock_settings): + """Test content fetching with uninitialized client""" + lens_tool = LensProtocolTool() + lens_tool.client = None + + query_params = {"limit": 1} + result = lens_tool.fetch_content(query_params) + + assert result == [] From e465c0db8c761dccffeba6894c262572547469f9 Mon Sep 17 00:00:00 2001 From: Andriy Kogan Date: Sat, 18 Jan 2025 07:58:10 +0100 Subject: [PATCH 2/5] Minor bug fixes --- Pipfile | 4 ++-- Pipfile.lock | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Pipfile b/Pipfile index b1c9895..289075a 100644 --- a/Pipfile +++ b/Pipfile @@ -15,8 +15,6 @@ rapidfuzz = "*" python-telegram-bot = "*" telegramify-markdown = "*" tweepy = "*" -torch = "*" -transformers = "*" discord = "*" slack-sdk = "*" lenspy = "*" @@ -42,6 +40,8 @@ anthropic = "*" chromadb = "*" pytest = "*" ruff = "*" +torch = ">=2.2.0" +transformers = ">=4.38.0" [dev-packages] mypy = "*" diff --git a/Pipfile.lock b/Pipfile.lock index 349cdc7..4d754b2 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "d3d35671a55a4e80d2393509f820c5d4f1f7f3c4c1b03d2e56b3c897af4b5e8c" + "sha256": "2e6b8621eaa238f9d824e4b08c2cec6aadfd189968ef9ee50c04e68aa43164f6" }, "pipfile-spec": 6, "requires": { From 048aed3696f454f639773e2e5f01c161af01e9ab Mon Sep 17 00:00:00 2001 From: Andriy Kogan Date: Sat, 18 Jan 2025 09:00:18 +0100 Subject: [PATCH 3/5] Minor bug fixes --- Pipfile | 2 +- Pipfile.lock | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/Pipfile b/Pipfile index 289075a..75af12e 100644 --- a/Pipfile +++ b/Pipfile @@ -37,7 +37,7 @@ mkdocs-material = "*" mkdocs-macros-plugin = "*" mkdocs-awesome-pages-plugin = "*" anthropic = "*" -chromadb = "*" +chromadb = {version = "==0.6.3", extras = ["cpu"]} pytest = "*" ruff = "*" torch = ">=2.2.0" diff --git a/Pipfile.lock b/Pipfile.lock index 4d754b2..c33d7bd 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "2e6b8621eaa238f9d824e4b08c2cec6aadfd189968ef9ee50c04e68aa43164f6" + "sha256": "4d469a814d891a7a0d4fd5280f3b90d732f28ca789f04be61f9a1d4b0ca41209" }, "pipfile-spec": 6, "requires": { @@ -398,11 +398,13 @@ "version": "==0.7.6" }, "chromadb": { + "extras": [ + "cpu" + ], "hashes": [ "sha256:4851258489a3612b558488d98d09ae0fe0a28d5cad6bd1ba64b96fdc419dc0e5", "sha256:c8f34c0b704b9108b04491480a36d42e894a960429f87c6516027b5481d59ed3" ], - "index": "pypi", "markers": "python_version >= '3.9'", "version": "==0.6.3" }, From 1612680b35de595479f6cbab14a9d7209a572360 Mon Sep 17 00:00:00 2001 From: Andriy Kogan Date: Sat, 18 Jan 2025 11:57:34 +0100 Subject: [PATCH 4/5] Minor bug fixes --- Pipfile | 9 +- Pipfile.lock | 199 ++------------------------------- docs/agent/lens_protocol.md | 214 ++++++++++-------------------------- 3 files changed, 73 insertions(+), 349 deletions(-) diff --git a/Pipfile b/Pipfile index 75af12e..6fd9085 100644 --- a/Pipfile +++ b/Pipfile @@ -37,11 +37,10 @@ mkdocs-material = "*" mkdocs-macros-plugin = "*" mkdocs-awesome-pages-plugin = "*" anthropic = "*" -chromadb = {version = "==0.6.3", extras = ["cpu"]} +chromadb = "*" pytest = "*" ruff = "*" -torch = ">=2.2.0" -transformers = ">=4.38.0" + [dev-packages] mypy = "*" @@ -50,8 +49,8 @@ pytest = "*" pytest-asyncio = "*" pytest-cov = "*" ruff = "*" -torch = ">=2.2.0" -transformers = ">=4.38.0" +torch = {version = "*", index = "https://download.pytorch.org/whl/cpu"} +transformers = {version = ">=4.38.0"} [requires] python_version = "3.12" diff --git a/Pipfile.lock b/Pipfile.lock index c33d7bd..58ae5f4 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "4d469a814d891a7a0d4fd5280f3b90d732f28ca789f04be61f9a1d4b0ca41209" + "sha256": "a3cf29913309534460cf7461fd5d247f3546b22077fda6c464e08b7907fa935c" }, "pipfile-spec": 6, "requires": { @@ -398,13 +398,11 @@ "version": "==0.7.6" }, "chromadb": { - "extras": [ - "cpu" - ], "hashes": [ "sha256:4851258489a3612b558488d98d09ae0fe0a28d5cad6bd1ba64b96fdc419dc0e5", "sha256:c8f34c0b704b9108b04491480a36d42e894a960429f87c6516027b5481d59ed3" ], + "index": "pypi", "markers": "python_version >= '3.9'", "version": "==0.6.3" }, @@ -1676,14 +1674,6 @@ "markers": "python_version >= '3.5'", "version": "==1.6.0" }, - "networkx": { - "hashes": [ - "sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1", - "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f" - ], - "markers": "python_version >= '3.10'", - "version": "==3.4.2" - }, "numpy": { "hashes": [ "sha256:059e6a747ae84fce488c3ee397cee7e5f905fd1bda5fb18c66bc41807ff119b2", @@ -1746,111 +1736,6 @@ "markers": "python_version >= '3.10'", "version": "==2.2.1" }, - "nvidia-cublas-cu12": { - "hashes": [ - "sha256:0f8aa1706812e00b9f19dfe0cdb3999b092ccb8ca168c0db5b8ea712456fd9b3", - "sha256:2fc8da60df463fdefa81e323eef2e36489e1c94335b5358bcb38360adf75ac9b", - "sha256:5a796786da89203a0657eda402bcdcec6180254a8ac22d72213abc42069522dc" - ], - "markers": "python_version >= '3'", - "version": "==12.4.5.8" - }, - "nvidia-cuda-cupti-cu12": { - "hashes": [ - "sha256:5688d203301ab051449a2b1cb6690fbe90d2b372f411521c86018b950f3d7922", - "sha256:79279b35cf6f91da114182a5ce1864997fd52294a87a16179ce275773799458a", - "sha256:9dec60f5ac126f7bb551c055072b69d85392b13311fcc1bcda2202d172df30fb" - ], - "markers": "python_version >= '3'", - "version": "==12.4.127" - }, - "nvidia-cuda-nvrtc-cu12": { - "hashes": [ - "sha256:0eedf14185e04b76aa05b1fea04133e59f465b6f960c0cbf4e37c3cb6b0ea198", - "sha256:a178759ebb095827bd30ef56598ec182b85547f1508941a3d560eb7ea1fbf338", - "sha256:a961b2f1d5f17b14867c619ceb99ef6fcec12e46612711bcec78eb05068a60ec" - ], - "markers": "python_version >= '3'", - "version": "==12.4.127" - }, - "nvidia-cuda-runtime-cu12": { - "hashes": [ - "sha256:09c2e35f48359752dfa822c09918211844a3d93c100a715d79b59591130c5e1e", - "sha256:64403288fa2136ee8e467cdc9c9427e0434110899d07c779f25b5c068934faa5", - "sha256:961fe0e2e716a2a1d967aab7caee97512f71767f852f67432d572e36cb3a11f3" - ], - "markers": "python_version >= '3'", - "version": "==12.4.127" - }, - "nvidia-cudnn-cu12": { - "hashes": [ - "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f", - "sha256:6278562929433d68365a07a4a1546c237ba2849852c0d4b2262a486e805b977a" - ], - "markers": "python_version >= '3'", - "version": "==9.1.0.70" - }, - "nvidia-cufft-cu12": { - "hashes": [ - "sha256:5dad8008fc7f92f5ddfa2101430917ce2ffacd86824914c82e28990ad7f00399", - "sha256:d802f4954291101186078ccbe22fc285a902136f974d369540fd4a5333d1440b", - "sha256:f083fc24912aa410be21fa16d157fed2055dab1cc4b6934a0e03cba69eb242b9" - ], - "markers": "python_version >= '3'", - "version": "==11.2.1.3" - }, - "nvidia-curand-cu12": { - "hashes": [ - "sha256:1f173f09e3e3c76ab084aba0de819c49e56614feae5c12f69883f4ae9bb5fad9", - "sha256:a88f583d4e0bb643c49743469964103aa59f7f708d862c3ddb0fc07f851e3b8b", - "sha256:f307cc191f96efe9e8f05a87096abc20d08845a841889ef78cb06924437f6771" - ], - "markers": "python_version >= '3'", - "version": "==10.3.5.147" - }, - "nvidia-cusolver-cu12": { - "hashes": [ - "sha256:19e33fa442bcfd085b3086c4ebf7e8debc07cfe01e11513cc6d332fd918ac260", - "sha256:d338f155f174f90724bbde3758b7ac375a70ce8e706d70b018dd3375545fc84e", - "sha256:e77314c9d7b694fcebc84f58989f3aa4fb4cb442f12ca1a9bde50f5e8f6d1b9c" - ], - "markers": "python_version >= '3'", - "version": "==11.6.1.9" - }, - "nvidia-cusparse-cu12": { - "hashes": [ - "sha256:9bc90fb087bc7b4c15641521f31c0371e9a612fc2ba12c338d3ae032e6b6797f", - "sha256:9d32f62896231ebe0480efd8a7f702e143c98cfaa0e8a76df3386c1ba2b54df3", - "sha256:ea4f11a2904e2a8dc4b1833cc1b5181cde564edd0d5cd33e3c168eff2d1863f1" - ], - "markers": "python_version >= '3'", - "version": "==12.3.1.170" - }, - "nvidia-nccl-cu12": { - "hashes": [ - "sha256:8579076d30a8c24988834445f8d633c697d42397e92ffc3f63fa26766d25e0a0" - ], - "markers": "python_version >= '3'", - "version": "==2.21.5" - }, - "nvidia-nvjitlink-cu12": { - "hashes": [ - "sha256:06b3b9b25bf3f8af351d664978ca26a16d2c5127dbd53c0497e28d1fb9611d57", - "sha256:4abe7fef64914ccfa909bc2ba39739670ecc9e820c83ccc7a6ed414122599b83", - "sha256:fd9020c501d27d135f983c6d3e244b197a7ccad769e34df53a42e276b0e25fa1" - ], - "markers": "python_version >= '3'", - "version": "==12.4.127" - }, - "nvidia-nvtx-cu12": { - "hashes": [ - "sha256:641dccaaa1139f3ffb0d3164b4b84f9d253397e38246a4f2f36728b48566d485", - "sha256:781e950d9b9f60d8241ccea575b32f5105a5baf4c2351cab5256a24869f12a1a", - "sha256:7959ad635db13edf4fc65c06a6e9f9e55fc2f92596db928d169c0bb031e88ef3" - ], - "markers": "python_version >= '3'", - "version": "==12.4.127" - }, "oauthlib": { "hashes": [ "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca", @@ -2981,27 +2866,6 @@ "markers": "python_version >= '3.7'", "version": "==0.9.2" }, - "safetensors": { - "hashes": [ - "sha256:03c937100f38c9ff4c1507abea9928a6a9b02c9c1c9c3609ed4fb2bf413d4975", - "sha256:1506e4c2eda1431099cebe9abf6c76853e95d0b7a95addceaa74c6019c65d8cf", - "sha256:3ab696dfdc060caffb61dbe4066b86419107a24c804a4e373ba59be699ebd8d5", - "sha256:3dfa7c2f3fe55db34eba90c29df94bcdac4821043fc391cb5d082d9922013869", - "sha256:45b6092997ceb8aa3801693781a71a99909ab9cc776fbc3fa9322d29b1d3bef2", - "sha256:46ff2116150ae70a4e9c490d2ab6b6e1b1b93f25e520e540abe1b81b48560c3a", - "sha256:5c5b5d9da594f638a259fca766046f44c97244cc7ab8bef161b3e80d04becc76", - "sha256:6d0d6a8ee2215a440e1296b843edf44fd377b055ba350eaba74655a2fe2c4bae", - "sha256:78abdddd03a406646107f973c7843276e7b64e5e32623529dc17f3d94a20f589", - "sha256:86016d40bcaa3bcc9a56cd74d97e654b5f4f4abe42b038c71e4f00a089c4526c", - "sha256:990833f70a5f9c7d3fc82c94507f03179930ff7d00941c287f73b6fcbf67f19e", - "sha256:a00e737948791b94dad83cf0eafc09a02c4d8c2171a239e8c8572fe04e25960e", - "sha256:cb4a8d98ba12fa016f4241932b1fc5e702e5143f5374bba0bbcf7ddc1c4cf2b8", - "sha256:d3a06fae62418ec8e5c635b61a8086032c9e281f16c63c3af46a6efbab33156f", - "sha256:fe55c039d97090d1f85277d402954dd6ad27f63034fa81985a9cc59655ac3ee2" - ], - "markers": "python_version >= '3.7'", - "version": "==0.5.2" - }, "setuptools": { "hashes": [ "sha256:c5afc8f407c626b8313a86e10311dd3f661c6cd9c09d4bf8c15c0e11f9f2b0e6", @@ -3077,11 +2941,11 @@ }, "sympy": { "hashes": [ - "sha256:9cebf7e04ff162015ce31c9c6c9144daa34a93bd082f54fd8f12deca4f47515f", - "sha256:db36cdc64bf61b9b24578b6f7bab1ecdd2452cf008f34faa33776680c26d66f8" + "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73", + "sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9" ], "markers": "python_version >= '3.8'", - "version": "==1.13.1" + "version": "==1.13.3" }, "tavily-python": { "hashes": [ @@ -3175,30 +3039,6 @@ "markers": "python_version >= '3.7'", "version": "==0.21.0" }, - "torch": { - "hashes": [ - "sha256:1f3b7fb3cf7ab97fae52161423f81be8c6b8afac8d9760823fd623994581e1a3", - "sha256:23d062bf70776a3d04dbe74db950db2a5245e1ba4f27208a87f0d743b0d06e86", - "sha256:31f8c39660962f9ae4eeec995e3049b5492eb7360dd4f07377658ef4d728fa4c", - "sha256:32a037bd98a241df6c93e4c789b683335da76a2ac142c0973675b715102dc5fa", - "sha256:340ce0432cad0d37f5a31be666896e16788f1adf8ad7be481196b503dad675b9", - "sha256:34bfa1a852e5714cbfa17f27c49d8ce35e1b7af5608c4bc6e81392c352dbc601", - "sha256:3f4b7f10a247e0dcd7ea97dc2d3bfbfc90302ed36d7f3952b0008d0df264e697", - "sha256:46c817d3ea33696ad3b9df5e774dba2257e9a4cd3c4a3afbf92f6bb13ac5ce2d", - "sha256:603c52d2fe06433c18b747d25f5c333f9c1d58615620578c326d66f258686f9a", - "sha256:71328e1bbe39d213b8721678f9dcac30dfc452a46d586f1d514a6aa0a99d4744", - "sha256:73e58e78f7d220917c5dbfad1a40e09df9929d3b95d25e57d9f8558f84c9a11c", - "sha256:7974e3dce28b5a21fb554b73e1bc9072c25dde873fa00d54280861e7a009d7dc", - "sha256:8046768b7f6d35b85d101b4b38cba8aa2f3cd51952bc4c06a49580f2ce682291", - "sha256:8c712df61101964eb11910a846514011f0b6f5920c55dbf567bff8a34163d5b1", - "sha256:9b61edf3b4f6e3b0e0adda8b3960266b9009d02b37555971f4d1c8f7a05afed7", - "sha256:de5b7d6740c4b636ef4db92be922f0edc425b65ed78c5076c43c42d362a45457", - "sha256:ed231a4b3a5952177fafb661213d690a72caaad97d5824dd4fc17ab9e15cec03" - ], - "index": "pypi", - "markers": "python_full_version >= '3.8.0'", - "version": "==2.5.1" - }, "tqdm": { "hashes": [ "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", @@ -3207,26 +3047,6 @@ "markers": "python_version >= '3.7'", "version": "==4.67.1" }, - "transformers": { - "hashes": [ - "sha256:03fdfcbfb8b0367fb6c9fbe9d1c9aa54dfd847618be9b52400b2811d22799cb1", - "sha256:6d3de6d71cb5f2a10f9775ccc17abce9620195caaf32ec96542bd2a6937f25b0" - ], - "index": "pypi", - "markers": "python_full_version >= '3.9.0'", - "version": "==4.48.0" - }, - "triton": { - "hashes": [ - "sha256:0f34f6e7885d1bf0eaaf7ba875a5f0ce6f3c13ba98f9503651c1e6dc6757ed5c", - "sha256:6b0dd10a925263abbe9fa37dcde67a5e9b2383fc269fdf59f5657cac38c5d1d8", - "sha256:6dadaca7fc24de34e180271b5cf864c16755702e9f63a16f62df714a8099126a", - "sha256:aafa9a20cd0d9fee523cd4504aa7131807a864cd77dcf6efe7e981f18b8c6c11", - "sha256:c8182f42fd8080a7d39d666814fa36c5e30cc00ea7eeeb1a2983dbb4c99a0fdc" - ], - "markers": "platform_system == 'Linux' and platform_machine == 'x86_64' and python_version < '3.13'", - "version": "==3.1.0" - }, "tweepy": { "hashes": [ "sha256:1345cbcdf0a75e2d89f424c559fd49fda4d8cd7be25cd5131e3b57bad8a21d76", @@ -4554,11 +4374,11 @@ }, "sympy": { "hashes": [ - "sha256:9cebf7e04ff162015ce31c9c6c9144daa34a93bd082f54fd8f12deca4f47515f", - "sha256:db36cdc64bf61b9b24578b6f7bab1ecdd2452cf008f34faa33776680c26d66f8" + "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73", + "sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9" ], "markers": "python_version >= '3.8'", - "version": "==1.13.1" + "version": "==1.13.3" }, "tokenizers": { "hashes": [ @@ -4601,7 +4421,7 @@ "sha256:de5b7d6740c4b636ef4db92be922f0edc425b65ed78c5076c43c42d362a45457", "sha256:ed231a4b3a5952177fafb661213d690a72caaad97d5824dd4fc17ab9e15cec03" ], - "index": "pypi", + "index": "https://download.pytorch.org/whl/cpu", "markers": "python_full_version >= '3.8.0'", "version": "==2.5.1" }, @@ -4618,7 +4438,6 @@ "sha256:03fdfcbfb8b0367fb6c9fbe9d1c9aa54dfd847618be9b52400b2811d22799cb1", "sha256:6d3de6d71cb5f2a10f9775ccc17abce9620195caaf32ec96542bd2a6937f25b0" ], - "index": "pypi", "markers": "python_full_version >= '3.9.0'", "version": "==4.48.0" }, diff --git a/docs/agent/lens_protocol.md b/docs/agent/lens_protocol.md index 3e4c69f..7afee48 100644 --- a/docs/agent/lens_protocol.md +++ b/docs/agent/lens_protocol.md @@ -1,172 +1,78 @@ -# Llama Integration - -The agent supports local inference using Meta's Llama models (8B, 70B, and 405B parameters) for scenarios requiring on-premise or self-hosted language models. +# Lens Protocol Integration ## Setup -1. Download Llama Model - - Obtain access to Meta's Llama models through [Meta's AI website](https://ai.meta.com/llama/) - - Download your preferred model size: - - Llama-8B (16GB minimum RAM) - - Llama-70B (140GB minimum RAM) - - Llama-405B (780GB minimum RAM) +1. Enable Lens Protocol API Access + - Go to [Lens Protocol Developer Portal](https://docs.lens.xyz/docs/developer-quickstart) + - Create a developer account + - Navigate to API Keys section -2. Install Dependencies - ```bash - pip install torch>=2.2.0 transformers>=4.38.0 - ``` +2. Generate API Key + - In the Developer Portal, create a new API key + - Copy the generated API key + - (Optional) Set API key restrictions and rate limits 3. Configure Environment Variables Add these to your `.env` file: ```bash - LLAMA_MODEL_PATH=/path/to/your/llama/model - LLM_PROVIDER=llama + LENS_API_KEY=your_api_key_here + LENS_PROFILE_ID=your_profile_id ``` ### Basic Setup ```python -from src.llm.llm import LLM - -# Initialize LLM with Llama backend -llm = LLM() # Will use Llama if LLM_PROVIDER=llama - -# Generate a response -response = await llm.generate_response([ - {"role": "user", "content": "Explain quantum computing"} -]) -``` - -## Model Selection - -Choose the appropriate model size based on your requirements: - -### Llama-8B -- Minimum Requirements: - - 16GB RAM - - 20GB disk space -- Best for: - - Basic text generation - - Simple chat interactions - - Resource-constrained environments - -### Llama-70B -- Minimum Requirements: - - 140GB RAM - - 140GB disk space -- Best for: - - Complex reasoning - - Code generation - - Advanced chat applications - -### Llama-405B -- Minimum Requirements: - - 780GB RAM - - 800GB disk space -- Best for: - - Research applications - - Maximum model capability - - Enterprise-scale deployments - -## Advanced Configuration - -### Memory Management -The agent automatically manages model loading based on available resources: -```python -# Override default device mapping -response = await llm.generate_response( - messages=[{"role": "user", "content": "Hello"}], - device_map="cpu" # Force CPU inference -) - -# Control memory usage -response = await llm.generate_response( - messages=[{"role": "user", "content": "Hello"}], - max_tokens=512, # Limit response length - torch_dtype="auto" # Automatic precision selection -) -``` - -### Generation Parameters -Customize response generation: -```python -# Creative writing -response = await llm.generate_response( - messages=[{"role": "user", "content": "Write a story"}], - temperature=0.9, # More creative - top_p=0.95 -) - -# Factual responses -response = await llm.generate_response( - messages=[{"role": "user", "content": "Explain TCP/IP"}], - temperature=0.2, # More focused - top_p=0.1 +from src.tools.lens_protocol import LensProtocolTool + +# Initialize Lens Protocol client +lens = LensProtocolTool() + +# Connect to Lens Protocol +auth_credentials = { + 'api_key': 'your_api_key_here' +} +lens.initialize_connection(auth_credentials) + +# Get profile information +profile = lens.get_profile("lens.dev") + +# Fetch recent content +publications = lens.fetch_content({ + 'limit': 5, + 'sort': 'DESC' +}) + +# Publish content +result = lens.publish_content( + profile_id="your_profile_id", + content="Hello Lens Protocol!" ) ``` -## Example Use Cases - -### Chat Application -```python -messages = [ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "What is the capital of France?"} -] -response = await llm.generate_response(messages) -``` - -### Code Generation -```python -messages = [ - {"role": "system", "content": "You are a Python expert."}, - {"role": "user", "content": "Write a function to calculate Fibonacci numbers."} -] -response = await llm.generate_response( - messages, - temperature=0.2 # Lower temperature for code generation -) -``` - -## Best Practices - -1. **Resource Management** - - Monitor system memory usage - - Use appropriate model size for your hardware - - Consider CPU fallback for large models - -2. **Performance Optimization** - - Use GPU acceleration when available - - Adjust batch sizes based on memory constraints - - Cache frequently used responses - -3. **Error Handling** - - Implement proper validation of model files - - Handle out-of-memory scenarios gracefully - - Monitor model performance and errors - -## Troubleshooting - -Common issues and solutions: - -1. **Out of Memory Errors** - - Switch to a smaller model - - Use CPU fallback - - Reduce batch size or context length - -2. **Missing Model Files** - - Verify model path in environment variables - - Check file permissions - - Ensure all required model files are present - -3. **Poor Performance** - - Check GPU utilization - - Monitor system resources - - Adjust generation parameters +## Features +- Profile information retrieval and management +- Content publication to the Lens network +- Content exploration and fetching with custom parameters +- Access to social metrics (followers, following, reactions) +- Publication statistics (comments, mirrors, reactions) +- GraphQL-based API integration +- Error handling and logging + +## TODOs for Future Enhancements: +- Add support for authentication and profile management +- Implement follow/unfollow functionality +- Add comment creation and management +- Support for mirroring content +- Implement content moderation features +- Add support for media attachments +- Implement notification handling +- Add support for collecting publications +- Implement profile search functionality +- Add support for encrypted direct messaging ## Reference -For implementation details, see: `src/llm/providers/llama.py` +For implementation details, see: `src/tools/lens_protocol.py` -For more information, refer to: -- [Llama Model Documentation](https://ai.meta.com/llama/) -- [Hugging Face Transformers](https://huggingface.co/docs/transformers/index) -- [PyTorch Documentation](https://pytorch.org/docs/) +The implementation uses the Lens Protocol API v2. For more information, refer to: +- [Lens Protocol Documentation](https://docs.lens.xyz/) +- [Lens Protocol API Reference](https://docs.lens.xyz/docs/api-basics) +- [GraphQL Schema Documentation](https://docs.lens.xyz/docs/authentication-quickstart) \ No newline at end of file From 78ad50c5da444e64368a00613c0616397fcee72d Mon Sep 17 00:00:00 2001 From: Andriy Kogan Date: Sat, 18 Jan 2025 12:25:46 +0100 Subject: [PATCH 5/5] Disabling GPU libraries --- Pipfile | 2 +- Pipfile.lock | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Pipfile b/Pipfile index 6fd9085..b4531c2 100644 --- a/Pipfile +++ b/Pipfile @@ -49,7 +49,7 @@ pytest = "*" pytest-asyncio = "*" pytest-cov = "*" ruff = "*" -torch = {version = "*", index = "https://download.pytorch.org/whl/cpu"} +torch = {version = "*", index = "pypi", extras = ["cpu"]} transformers = {version = ">=4.38.0"} [requires] diff --git a/Pipfile.lock b/Pipfile.lock index 58ae5f4..aabe5b7 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "a3cf29913309534460cf7461fd5d247f3546b22077fda6c464e08b7907fa935c" + "sha256": "c8b3d9f16eb97bbeb9c4bac67dde73b46f7f49b644edba1649af5abb68420b17" }, "pipfile-spec": 6, "requires": { @@ -4402,6 +4402,9 @@ "version": "==0.21.0" }, "torch": { + "extras": [ + "cpu" + ], "hashes": [ "sha256:1f3b7fb3cf7ab97fae52161423f81be8c6b8afac8d9760823fd623994581e1a3", "sha256:23d062bf70776a3d04dbe74db950db2a5245e1ba4f27208a87f0d743b0d06e86", @@ -4421,7 +4424,7 @@ "sha256:de5b7d6740c4b636ef4db92be922f0edc425b65ed78c5076c43c42d362a45457", "sha256:ed231a4b3a5952177fafb661213d690a72caaad97d5824dd4fc17ab9e15cec03" ], - "index": "https://download.pytorch.org/whl/cpu", + "index": "pypi", "markers": "python_full_version >= '3.8.0'", "version": "==2.5.1" },