Skip to content
New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

Paul semester project #2

Open
wants to merge 39 commits into
base: stable
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
d45adc3
Change all newlines to LF, add basics to gitignore and get running
PaulEibensteiner Oct 5, 2024
cb56a1d
some comments
PaulEibensteiner Oct 10, 2024
b4d641b
Upgrade libraries, remove .egg-info file
PaulEibensteiner Oct 11, 2024
9e64b58
format code with black
PaulEibensteiner Oct 11, 2024
605d3ec
rename poissonRateEstimator variables
PaulEibensteiner Oct 11, 2024
e29a590
choose optimization library in fit method
PaulEibensteiner Oct 12, 2024
08b1a09
revert to mosek
PaulEibensteiner Oct 12, 2024
605f52a
remove weird duplicate handling
PaulEibensteiner Oct 12, 2024
c85b6a4
most minor type fixes
PaulEibensteiner Oct 12, 2024
1f735cc
Optimize objective and add option to move to gpu
PaulEibensteiner Oct 12, 2024
e37f339
Efficient Triangle embedding class
PaulEibensteiner Oct 13, 2024
ded12a2
Make comment strings raw if they contain a backslash
PaulEibensteiner Oct 17, 2024
a9d2e01
more data type robustness
PaulEibensteiner Nov 10, 2024
fb26885
link_fun_rate running
PaulEibensteiner Nov 10, 2024
5f062a3
fix efficient triangle basis test
PaulEibensteiner Nov 10, 2024
111a717
precompute phis with custom basis, comments and sampling as list
PaulEibensteiner Nov 10, 2024
2b0631a
fix link_fun_rate i.e. implement integral approximation
PaulEibensteiner Nov 21, 2024
9470fe3
precompute even without using bins
PaulEibensteiner Nov 21, 2024
918b2cd
slight doc update
PaulEibensteiner Nov 21, 2024
5ee811a
add kernel integral for rbf
PaulEibensteiner Nov 21, 2024
9cfacaf
make numpy torch conversions device-safe
PaulEibensteiner Dec 1, 2024
0e035a6
improve posterior sampling print statements
PaulEibensteiner Dec 1, 2024
f83af69
Fix the squared exp integral by using gamma correctly (usually sigma)
PaulEibensteiner Dec 1, 2024
0ca798d
fix integral scaling in ExpGaussProcessRateEstimator
PaulEibensteiner Dec 1, 2024
1fbe527
enable .fit on PoissonRateEstimator
PaulEibensteiner Dec 1, 2024
42d7b7e
Only run bucketization if self.dual
PaulEibensteiner Dec 1, 2024
b335dc8
Naive integral implementation for PPP for more efficient reference
PaulEibensteiner Dec 1, 2024
b662e41
Implement roi for optimal basis and fix interpolation outside convex set
PaulEibensteiner Dec 8, 2024
fce9b5f
fix optimal basis by norming over the right dimension
PaulEibensteiner Jan 23, 2025
b8b99d6
add truncated gp class
PaulEibensteiner Feb 2, 2025
1a091a6
optimizations: GPU sampling, kmeans subsampling, grid subsampling, gp…
PaulEibensteiner Feb 2, 2025
7696ad7
spelling fix
PaulEibensteiner Feb 2, 2025
48d6abe
voxel size binary search
PaulEibensteiner Feb 2, 2025
9c27752
add new dependencies
PaulEibensteiner Feb 2, 2025
dda4e06
use same device and data type class-wide
PaulEibensteiner Feb 2, 2025
1919237
enable single process interpolation
PaulEibensteiner Feb 4, 2025
24736be
refactor GPU usage and update NMF parameters for optimal performance
PaulEibensteiner Feb 4, 2025
d44a239
precomp naming fix
PaulEibensteiner Feb 8, 2025
f2b2b53
add logcox process
PaulEibensteiner Feb 8, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 41 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,44 @@ sensepy/__pycache__
*.txt
*.png
*.pdf
*.pyo
*.pyd
*.pdb
*.egg
*.egg-info
*.whl
*.manifest
*.spec
*.log
*.pot
*.mo
*.so
*.dll
*.dylib
*.a
*.lib
*.swp
*.swo
*.tmp
*.bak
*.old
*.orig
*.rej
*.sublime-project
*.sublime-workspace
*.project
*.pydevproject
*.idea/
.vscode/
__pycache__/
*.coverage
.coverage.*
.cache
.tox/
.nox/
.pytest_cache/
htmlcov/
dist/
build/
site/
docs/_build/
54 changes: 30 additions & 24 deletions setup.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,35 @@
from setuptools import setup

packages = [
'numpy',
'scipy',
'matplotlib',
'sklearn',
'tensorflow',
'cvxpy',
'torch',
'pymanopt',
'pandas',
'mosek',
'quadprog',
'cvxpylayers',
'functorch',
'autograd_minimize'
"numpy",
"scipy",
"matplotlib",
"scikit-learn",
"tensorflow",
"cvxpy",
"torch",
"pymanopt",
"pandas",
"mosek",
"quadprog",
"cvxpylayers",
"autograd_minimize",
"torch-cluster",
"nmf-torch",
"fast-pytorch-kmeans",
"tqdm",
]
#
setup(name='stpy',
version='0.0.2',
description='Stochastic Process Library for Python',
url='',
author='Mojmir Mutny',
author_email='mojmir.mutny@inf.ethz.ch',
license='custom ',
packages=['stpy'],
zip_safe=False,
install_requires=packages)
setup(
name="stpy",
version="0.0.2",
description="Stochastic Process Library for Python",
url="",
author="Mojmir Mutny",
author_email="mojmir.mutny@inf.ethz.ch",
license="custom ",
packages=["stpy"],
zip_safe=False,
install_requires=packages,
setup_requires=["torch", "Cython"],
)
8 changes: 0 additions & 8 deletions stpy.egg-info/PKG-INFO

This file was deleted.

1 change: 0 additions & 1 deletion stpy.egg-info/not-zip-safe

This file was deleted.

98 changes: 51 additions & 47 deletions stpy/approx_inference/expected-propagation.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,63 +3,67 @@
from scipy.stats import multivariate_normal


class ExpectedPropagationQuadratic():
class ExpectedPropagationQuadratic:

def __init__(self, mu_prior, Sigma_prior, likelihood_single, data):
def __init__(self, mu_prior, Sigma_prior, likelihood_single, data):

# takes two arguments param, theta
self.likelihood_single = likelihood_single
# takes two arguments param, theta
self.likelihood_single = likelihood_single

# prior information
self.mu_prior = mu_prior
self.Sigma_prior = Sigma_prior
# prior information
self.mu_prior = mu_prior
self.Sigma_prior = Sigma_prior

self.d = mu_prior.size()[1]
self.d = mu_prior.size()[1]

self.n = len(self.data)
self.data = data
self.n = len(self.data)
self.data = data

self.approx = []
for i in range(self.n):
mu = torch.zeros(size=(1, self.d)).double()
Sigma = torch.eye(size=(self.d, self.d)).double()
self.approx.append((mu, Sigma))
self.approx = []
for i in range(self.n):
mu = torch.zeros(size=(1, self.d)).double()
Sigma = torch.eye(size=(self.d, self.d)).double()
self.approx.append((mu, Sigma))

def marginalized_version(self, j):
mu = torch.zeros(size=(1, self.d)).double()
Sigma = torch.zeros(size=(self.d, self.d)).double()
def marginalized_version(self, j):
mu = torch.zeros(size=(1, self.d)).double()
Sigma = torch.zeros(size=(self.d, self.d)).double()

for i in range(self.n):
if i != j:
Sigma_elem = self.approx[j][0]
mu_elem = self.approx[j][1]
Sigma_elem_inv = torch.inverse(Sigma_elem)
mu += Sigma_elem_inv @ mu_elem
Sigma += Sigma_elem_inv
Sigma = torch.inverse(Sigma)
mu = Sigma @ mu
return (mu, Sigma)
for i in range(self.n):
if i != j:
Sigma_elem = self.approx[j][0]
mu_elem = self.approx[j][1]
Sigma_elem_inv = torch.inverse(Sigma_elem)
mu += Sigma_elem_inv @ mu_elem
Sigma += Sigma_elem_inv
Sigma = torch.inverse(Sigma)
mu = Sigma @ mu
return (mu, Sigma)

def match_likelihood(self, j):
mu, Sigma = self.marginalized_version(j)
lik = lambda x: self.likelihood_single(torch.from_numpy(x), self.data[j]).numpy()
prob = lambda x: multivariate_normal.pdf(x, mean=mu.view(-1).reshape.numpy(), cov=Sigma.numpy())
first_moment = integrate.quad(lambda x: x * lik(x) * prob(x), 0.0, 10e10)
second_moment = integrate.quad(lambda x: x * x * lik(x) * prob(x), 0.0, 10e10)
def match_likelihood(self, j):
mu, Sigma = self.marginalized_version(j)
lik = lambda x: self.likelihood_single(
torch.from_numpy(x), self.data[j]
).numpy()
prob = lambda x: multivariate_normal.pdf(
x, mean=mu.view(-1).reshape.numpy(), cov=Sigma.numpy()
)
first_moment = integrate.quad(lambda x: x * lik(x) * prob(x), 0.0, 10e10)
second_moment = integrate.quad(lambda x: x * x * lik(x) * prob(x), 0.0, 10e10)

self.approx[j][0] = first_moment
self.approx[j][1] = second_moment
self.approx[j][0] = first_moment
self.approx[j][1] = second_moment

return (first_moment, second_moment - first_moment ** 2)
return (first_moment, second_moment - first_moment**2)

def finalize(self):
pass
def finalize(self):
pass

def fit_gp(self, iterations='auto'):
if iterations == 'auto':
T = 100
for i in range(T):
for j in range(self.n):
self.match_likelihood(j)
mu, Sigma = self.finalize()
return mu, Sigma
def fit_gp(self, iterations="auto"):
if iterations == "auto":
T = 100
for i in range(T):
for j in range(self.n):
self.match_likelihood(j)
mu, Sigma = self.finalize()
return mu, Sigma
12 changes: 7 additions & 5 deletions stpy/approx_inference/hmc.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
params_hmc = hamiltorch.sample(log_prob_func=log_prob_func,
params_init=params_init,
num_samples=num_samples,
step_size=step_size,
num_steps_per_sample=num_steps_per_sample)
params_hmc = hamiltorch.sample(
log_prob_func=log_prob_func,
params_init=params_init,
num_samples=num_samples,
step_size=step_size,
num_steps_per_sample=num_steps_per_sample,
)
42 changes: 23 additions & 19 deletions stpy/approx_inference/langevin.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,25 +3,29 @@
import torch


class LangevinSampler():
class LangevinSampler:

def __init__(self, verbose=False):
self.verbose = verbose
pass
def __init__(self, verbose=False):
self.verbose = verbose
pass

def calculate(self, HessianF, theta0):
W = HessianF(theta0)
L = float(scipy.sparse.linalg.eigsh(W.numpy(), k=1, which='LM', return_eigenvectors=False, tol=1e-3))
return L
def calculate(self, HessianF, theta0):
W = HessianF(theta0)
L = float(
scipy.sparse.linalg.eigsh(
W.numpy(), k=1, which="LM", return_eigenvectors=False, tol=1e-3
)
)
return L

def sample(self, F, nablaF, HessianF, theta0, steps=100):
L = self.calculate(HessianF, theta0)
eta = 0.5 / (L + 1)
m = theta0.size()[0]
theta = theta0
for k in range(steps):
w = torch.randn(size=(m, 1)).double()
theta = theta - eta * nablaF(theta) + np.sqrt(2 * eta) * w
if self.verbose == True:
print("Iter:", k, theta.T)
return theta
def sample(self, F, nablaF, HessianF, theta0, steps=100):
L = self.calculate(HessianF, theta0)
eta = 0.5 / (L + 1)
m = theta0.size()[0]
theta = theta0
for k in range(steps):
w = torch.randn(size=(m, 1)).double()
theta = theta - eta * nablaF(theta) + np.sqrt(2 * eta) * w
if self.verbose == True:
print("Iter:", k, theta.T)
return theta
29 changes: 17 additions & 12 deletions stpy/approx_inference/proximal_langevin.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,18 +3,23 @@


def ProximalLangevin(LangevinSampler):
def sample(self, F, nablaF, HessianF, theta0, prox, steps=100):
L = self.calculate(HessianF, theta0)
eta = 0.5 / (L + 1)
m = theta0.size()[0]
theta = theta0
for k in range(steps):
w = torch.randn(size=(m, 1)).double()
theta = (1 - eta) * theta - eta * nablaF(theta) + eta * prox(theta) + np.sqrt(2 * eta) * w
if self.verbose == True:
print("Iter:", k, theta.T)
return prox(theta)
def sample(self, F, nablaF, HessianF, theta0, prox, steps=100):
L = self.calculate(HessianF, theta0)
eta = 0.5 / (L + 1)
m = theta0.size()[0]
theta = theta0
for k in range(steps):
w = torch.randn(size=(m, 1)).double()
theta = (
(1 - eta) * theta
- eta * nablaF(theta)
+ eta * prox(theta)
+ np.sqrt(2 * eta) * w
)
if self.verbose == True:
print("Iter:", k, theta.T)
return prox(theta)


def MirrorLangevin(LangvinSampler):
pass
pass
Loading