Skip to content

Commit

Permalink
Meshes
Browse files Browse the repository at this point in the history
  • Loading branch information
tvayer committed Oct 16, 2019
1 parent 9805085 commit 0a4fcab
Show file tree
Hide file tree
Showing 7 changed files with 328 additions and 7 deletions.
6 changes: 6 additions & 0 deletions .ipynb_checkpoints/SGW example-checkpoint.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"cells": [],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 1
}
229 changes: 229 additions & 0 deletions SGW example.ipynb

Large diffs are not rendered by default.

Binary file added data/meshes.npy
Binary file not shown.
Binary file added lib/__pycache__/sgw_numpy.cpython-35.pyc
Binary file not shown.
Binary file added lib/__pycache__/sgw_pytorch.cpython-35.pyc
Binary file not shown.
69 changes: 65 additions & 4 deletions lib/sgw_numpy.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def sgw_cpu(xs,xt,nproj=200,tolog=False,P=None):
nproj : integer
Number of projections. Ignore if P is not None
P : numpy array, shape (max(p,q),n_proj)
Projection matrix
Projection matrix. If None creates a new projection matrix
tolog : bool
Wether to return timings or not
Returns
Expand All @@ -39,7 +39,6 @@ def sgw_cpu(xs,xt,nproj=200,tolog=False,P=None):
Example
----------
import numpy as np
import torch
from sgw_numpy import sgw_cpu
n_samples=300
Expand Down Expand Up @@ -74,6 +73,25 @@ def sgw_cpu(xs,xt,nproj=200,tolog=False,P=None):
return d

def _cost(xsp,xtp,tolog=False):
""" Returns the GM cost eq (3) in [1]
Parameters
----------
xsp : tensor, shape (n, n_proj)
1D sorted samples (after finding sigma opt) for each proj in the source
xtp : tensor, shape (n, n_proj)
1D sorted samples (after finding sigma opt) for each proj in the target
tolog : bool
Wether to return timings or not
Returns
-------
C : tensor, shape (n_proj,1)
Cost for each projection
References
----------
.. [1] Vayer Titouan, Chapel Laetitia, Flamary R{\'e}mi, Tavenard Romain
and Courty Nicolas
"Sliced Gromov-Wasserstein"
"""
st=time.time()
allC=[]
for j in range(xsp.shape[1]):
Expand Down Expand Up @@ -117,8 +135,26 @@ def _cost(xsp,xtp,tolog=False):


def gromov_1d(xs,xt,tolog=False,fast=True):


""" Solves the Gromov in 1D (eq (2) in [1] for each proj
Parameters
----------
xsp : tensor, shape (n, n_proj)
1D sorted samples for each proj in the source
xtp : tensor, shape (n, n_proj)
1D sorted samples for each proj in the target
tolog : bool
Wether to return timings or not
fast: use the O(nlog(n)) cost or not
Returns
-------
toreturn : tensor, shape (n_proj,1)
The SGW cost for each proj
References
----------
.. [1] Vayer Titouan, Chapel Laetitia, Flamary R{\'e}mi, Tavenard Romain
and Courty Nicolas
"Sliced Gromov-Wasserstein"
"""
if tolog:
log={}

Expand Down Expand Up @@ -152,6 +188,31 @@ def gromov_1d(xs,xt,tolog=False,fast=True):


def sink_(xs,xt,nproj=200,P=None):
""" Sinks the points of the measure in the lowest dimension onto the highest dimension and applies the projections.
Only implemented with the 0 padding Delta=Delta_pad operator (see [1])
Parameters
----------
xs : tensor, shape (n, p)
Source samples
xt : tensor, shape (n, q)
Target samples
device : torch device
nproj : integer
Number of projections. Ignored if P is not None
P : tensor, shape (max(p,q),n_proj)
Projection matrix
Returns
-------
xsp : tensor, shape (n,n_proj)
Projected source samples
xtp : tensor, shape (n,n_proj)
Projected target samples
References
----------
.. [1] Vayer Titouan, Chapel Laetitia, Flamary R{\'e}mi, Tavenard Romain
and Courty Nicolas
"Sliced Gromov-Wasserstein"
"""
dim_d= xs.shape[1]
dim_p= xt.shape[1]

Expand Down
31 changes: 28 additions & 3 deletions lib/sgw_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ class BadShapeError(Exception):
pass

def sgw_gpu(xs,xt,device,nproj=200,tolog=False,P=None):
""" Returns SGW between xs and xt eq (4) in [1]. Only implemented with the 0 padding Delta
""" Returns SGW between xs and xt eq (4) in [1]. Only implemented with the 0 padding operator Delta
Parameters
----------
xs : tensor, shape (n, p)
Expand All @@ -24,7 +24,7 @@ def sgw_gpu(xs,xt,device,nproj=200,tolog=False,P=None):
nproj : integer
Number of projections. Ignore if P is not None
P : tensor, shape (max(p,q),n_proj)
Projection matrix
Projection matrix. If None creates a new projection matrix
tolog : bool
Wether to return timings or not
Returns
Expand All @@ -49,7 +49,7 @@ def sgw_gpu(xs,xt,device,nproj=200,tolog=False,P=None):
xt=torch.from_numpy(Xt).to(torch.float32)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
P=np.random.randn(2,500)
sgw(xs,xt,device,P=torch.from_numpy(P).to(torch.float32),fast=True)
sgw_gpu(xs,xt,device,P=torch.from_numpy(P).to(torch.float32))
"""
if tolog:
log={}
Expand Down Expand Up @@ -194,6 +194,31 @@ def gromov_1d(xs,xt,tolog=False):
return toreturn

def sink_(xs,xt,device,nproj=200,P=None): #Delta operator (here just padding)
""" Sinks the points of the measure in the lowest dimension onto the highest dimension and applies the projections.
Only implemented with the 0 padding Delta=Delta_pad operator (see [1])
Parameters
----------
xs : tensor, shape (n, p)
Source samples
xt : tensor, shape (n, q)
Target samples
device : torch device
nproj : integer
Number of projections. Ignored if P is not None
P : tensor, shape (max(p,q),n_proj)
Projection matrix
Returns
-------
xsp : tensor, shape (n,n_proj)
Projected source samples
xtp : tensor, shape (n,n_proj)
Projected target samples
References
----------
.. [1] Vayer Titouan, Chapel Laetitia, Flamary R{\'e}mi, Tavenard Romain
and Courty Nicolas
"Sliced Gromov-Wasserstein"
"""
dim_d= xs.shape[1]
dim_p= xt.shape[1]

Expand Down

0 comments on commit 0a4fcab

Please sign in to comment.