-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrequirements-dev.lock
140 lines (139 loc) · 2.92 KB
/
requirements-dev.lock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
# generated by rye
# use `rye lock` or `rye sync` to update this lockfile
#
# last locked with the following flags:
# pre: false
# features: []
# all-features: true
# with-sources: false
# generate-hashes: false
-e file:.
ai-edge-model-explorer==0.1.7
# via model-explorer-refiners
ai-edge-model-explorer-adapter==0.1.3
# via ai-edge-model-explorer
asttokens==2.4.1
# via stack-data
blinker==1.8.2
# via flask
certifi==2024.6.2
# via requests
charset-normalizer==3.3.2
# via requests
click==8.1.7
# via flask
decorator==5.1.1
# via ipython
executing==2.0.1
# via stack-data
filelock==3.15.4
# via torch
flask==3.0.3
# via ai-edge-model-explorer
fsspec==2024.6.1
# via torch
idna==3.7
# via requests
ipython==8.26.0
# via ai-edge-model-explorer
itsdangerous==2.2.0
# via flask
jaxtyping==0.2.31
# via refiners
jedi==0.19.1
# via ipython
jinja2==3.1.4
# via flask
# via torch
markupsafe==2.1.5
# via jinja2
# via werkzeug
matplotlib-inline==0.1.7
# via ipython
mpmath==1.3.0
# via sympy
networkx==3.3
# via torch
numpy==2.0.0
# via ai-edge-model-explorer
# via refiners
nvidia-cublas-cu12==12.1.3.1
# via nvidia-cudnn-cu12
# via nvidia-cusolver-cu12
# via torch
nvidia-cuda-cupti-cu12==12.1.105
# via torch
nvidia-cuda-nvrtc-cu12==12.1.105
# via torch
nvidia-cuda-runtime-cu12==12.1.105
# via torch
nvidia-cudnn-cu12==8.9.2.26
# via torch
nvidia-cufft-cu12==11.0.2.54
# via torch
nvidia-curand-cu12==10.3.2.106
# via torch
nvidia-cusolver-cu12==11.4.5.107
# via torch
nvidia-cusparse-cu12==12.1.0.106
# via nvidia-cusolver-cu12
# via torch
nvidia-nccl-cu12==2.20.5
# via torch
nvidia-nvjitlink-cu12==12.5.40
# via nvidia-cusolver-cu12
# via nvidia-cusparse-cu12
nvidia-nvtx-cu12==12.1.105
# via torch
packaging==24.1
# via ai-edge-model-explorer
# via refiners
parso==0.8.4
# via jedi
pexpect==4.9.0
# via ipython
pillow==10.3.0
# via refiners
portpicker==1.6.0
# via ai-edge-model-explorer
prompt-toolkit==3.0.47
# via ipython
psutil==6.0.0
# via portpicker
ptyprocess==0.7.0
# via pexpect
pure-eval==0.2.2
# via stack-data
pygments==2.18.0
# via ipython
refiners @ git+https://github.com/finegrain-ai/refiners@e091788b885c5024b82e18cef082a922cd050481
# via model-explorer-refiners
requests==2.32.3
# via ai-edge-model-explorer
safetensors==0.4.3
# via refiners
six==1.16.0
# via asttokens
stack-data==0.6.3
# via ipython
sympy==1.12.1
# via torch
termcolor==2.4.0
# via ai-edge-model-explorer
torch==2.3.1
# via ai-edge-model-explorer
# via refiners
traitlets==5.14.3
# via ipython
# via matplotlib-inline
typeguard==2.13.3
# via jaxtyping
typing-extensions==4.12.2
# via ai-edge-model-explorer
# via torch
urllib3==2.2.2
# via requests
wcwidth==0.2.13
# via prompt-toolkit
werkzeug==3.0.3
# via flask