Point Cloud Optimization

import multipers as mp
from multipers.data import noisy_annulus, three_annulus
import multipers.ml.point_clouds as mmp
import multipers.ml.signed_measures as mms
import gudhi as gd
import numpy as np
import matplotlib.pyplot as plt
import torch as t
import torch
# t.autograd.set_detect_anomaly(True)
import multipers.torch.rips_density as mpt
from multipers.plots import plot_signed_measures, plot_signed_measure
from tqdm import tqdm
torch.manual_seed(1)
np.random.seed(1)
[KeOps] Warning : Cuda libraries were not detected on the system or could not be loaded ; using cpu only mode

Spatially localized optimization

The goal of this notebook is to generate cycles on the modes of a fixed measure.

In this example, the measure is defined (in the cell below) as a sum of 3 gaussian measures.

## The density function of the measure
def custom_map(x, sigma=.17, threshold=None):
    if x.ndim == 1:
        x = x[None,:]
    assert x.ndim ==2
    basepoints = t.tensor([[0.2,0.2], [0.8, 0.4], [0.4, 0.7]]).T
    out = -(t.exp( - (((x[:,:,None]- basepoints[None,:,:]) / sigma).square() ).sum(dim=1) )).sum(dim=-1)
    return 1+out # 0.8 pour norme 1
x= np.linspace(0,1,100)
mesh = np.meshgrid(x,x)
coordinates = np.concatenate([stuff.flatten()[:,None] for stuff in mesh], axis=1)
coordinates = torch.from_numpy(coordinates)
plt.scatter(*coordinates.T,c=custom_map(coordinates), cmap="viridis_r")
plt.colorbar()
<matplotlib.colorbar.Colorbar at 0x7fd9f6951160>
../_images/602956f45d0543b97f2ea77dab5291de707762ed9321c3e685bb05718fbc74a5.png

We start from a uniform point cloud, that we will optimize

x = np.random.uniform(size=(500,2))
x = t.tensor(x, requires_grad=True)
plt.scatter(*x.detach().numpy().T, c=custom_map(x).detach().numpy(), cmap="viridis_r")
plt.colorbar()
<matplotlib.colorbar.Colorbar at 0x7fd9f5f6ecf0>
../_images/5b568a1f06e3387393e23c04682a4787083126775c4e012dbbdbb3a875b4e51a.png

The function_rips_signed_measure function is meant to compute signed measures from rips + function bifiltrations, in a torch-differentiable manner.

x = np.random.uniform(size=(300,2))
x = t.tensor(x, requires_grad=True)
sm_diff, = mpt.function_rips_signed_measure(
    x, function=custom_map, degree=1, num_collapses=-1, plot=True, verbose=True, complex="rips");
Input is a slicer.
Reduced slicer. Retrieving measure from it...Done.
Cleaning measure...Done.
Pushing back the measure to the grid...Done.
../_images/cb57fe2a471af46c0a31f3e64616619b8d410add516d691acea4d2f4c73e528d.png

For this example we use the following loss. Given a signed measure \(\mu\), define

\[\mathrm{loss}(\mu) := \int\varphi(x) d\mu(x)\]

where \(x := (r,d)\in \mathbb R^2\) (\(r\) for radius, and \(d\) for codensity value of the original measure) $\(\varphi(x) = \varphi(r,d) = r\times(\mathrm{threshold}-d)\)$

This can be interpreted as follows :

  • we maximise the radius of the negative point (maximizing the radius of cycles)

  • we minimize the radius of positive points (the edges of the connected points creating the cycles). This create pretty cycles

  • we care more about cycles that are close to the mode (the threshold-d part). The threshold is meant to prevent the cycles that are not close enough the the cycles to progressively stop to create loops.

threshold = .65
def softplus(x):
    return torch.log(1+torch.exp(x))
# @torch.compile(dynamic=True)
def loss_function(x,sm):
    pts,weights = sm
    radius,density = pts.T
    density = density
    
    phi = lambda x,d : (
        x
        * (threshold-d)
    ).sum()
    loss = phi(radius[weights>0], density[weights>0]) - phi(radius[weights<0], density[weights<0])
    return loss

loss_function(x,sm_diff) #test that it work. It should make no error + have a gradient
tensor(0.2417, grad_fn=<SubBackward0>)
xinit = np.random.uniform(size=(500,2)) # initial dataset
x = t.tensor(xinit, requires_grad=True)
adam = t.optim.Adam([x], lr=0.01) #optimizer
losses = []
plt.scatter(*x.detach().numpy().T, c=custom_map(x, threshold=np.inf).detach().numpy(), cmap="viridis_r")
plt.show()
for i in range(101): # gradient steps
    # change backend to "multipers" if you don't have mpfree
    sm_diff, = mpt.function_rips_signed_measure(x, function=custom_map, degree=1, complex="weak_delaunay"); # delaunay can be changed to rips
    adam.zero_grad()
    loss = loss_function(x,sm_diff)
    loss.backward()
    adam.step()
    losses.append([loss.detach().numpy()])
    with torch.no_grad():
        if i %10 == 1: #plot part
            base=4
            ncols=3
            fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=ncols, figsize=(ncols*base, base))
            ax1.scatter(*x.detach().numpy().T, c=custom_map(x, threshold=np.inf).detach().numpy(), cmap="viridis_r", )
            plot_signed_measure(sm_diff, ax=ax2)
            ax3.plot(losses, label="loss")
            plt.show()
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=ncols, figsize=(ncols*base, base))
ax1.scatter(*xinit.T, c=custom_map(t.tensor(xinit)).detach().numpy(),cmap="viridis_r")
ax2.scatter(*x.detach().numpy().T, c=custom_map(x).detach().numpy(),cmap="viridis_r")
ax3.plot(losses)

plt.show()
../_images/da8da0dc08a7be4d6912c9f43f06d319e08fab87c2200c39b0a0032785052cf8.png ../_images/8e26904c1994d92b6e4c09493ff653684927712401ab929bbf525ce0bc954f39.png ../_images/be34a89b942151fd370d240d4a9de3214f54581a31c9eb9126d6398eb72a96de.png ../_images/5d33a1b9149a1196a0725d3e63b3e91abde1f514d9332afec6b0bd966184808a.png ../_images/0428a077a59b1a43bcac5b882139b57a51e04d12c4494b68985a395ededd38ba.png ../_images/d318ccc91b760c174fee804737994837dbdcff630a7b6edae1de4c98adc45d2a.png ../_images/9193ccc45c3c7dea93dfc2a5d6f3fb513e327c38f8580d6f5859fe4e723b7705.png ../_images/abd30e3ff3a84387318913fa566fde0968cf006a088570b8eabf85fa132f1611.png ../_images/15925fc8659c0f4ed78af2cd05aaca1955e46ef3a7388e2430940200138a4ab9.png ../_images/ac990fe7dff3eca517ee52217a15f38bc7c632438aa6039b8e159c8beba46ab1.png ../_images/6179686b1f37bb01290f91286eaa523c4304e4aff9179aeafd12ab85afd581e1.png ../_images/47cbc4ab0e316a80f774cc7f2c7fd9c2d6d090b9a89dace7533ece36e2f7c40f.png

We now observe a onion-like structure around the poles of the background measure.

How to interpret this ? The density constraints (in the loss) and the radius constraints are fighting against each other, therefore, each cycle has to balance itself to a local optimal, which leads to these onion layers.

Density preserving optimization

Example taken from the paper Differentiability and Optimization of Multiparameter Persistent Homology, and is an extension of the point cloud experiement of the optimization Gudhi notebook.
One can check from the Gudhi’s notebook that a compacity regularization term is necessary in the one parameter persistence setting; this issue will not happen when optimizing a Rips-Density bi-filtration, as we can enforce cycles to naturally balance between scale and density, and hence not diverge.

from multipers.ml.convolutions import KDE
X = np.block([
    [np.random.uniform(low=-0.1,high=.2,size=(100,2))],
    [mp.data.noisy_annulus(300,0, 0.85,1)]
])
bandwidth = .1
custom_map2 = lambda X :  -KDE(bandwidth=bandwidth, return_log=True).fit(X).score_samples(X)
codensity = custom_map2(X)
plt.scatter(*X.T, c=-codensity)
plt.gca().set_aspect(1)
../_images/34fb423fcb14678f3c8511d958cb59273c250ef0c864f1d8bc1366ea8ec0ee4e.png
def norm_loss(sm_diff,norm=1.):
    pts,weights = sm_diff
    loss = (torch.norm(pts[weights>0], p=norm, dim=1)).sum() -  (torch.norm(pts[weights<0], p=norm, dim=1)).sum()
    return loss / pts.shape[0]
x = torch.from_numpy(X).clone().requires_grad_(True)
opt = torch.optim.Adam([x], lr=.01)
losses = []
for i in range(100):
    opt.zero_grad()
    sm_diff, = mpt.function_rips_signed_measure(x=x, theta=bandwidth,degrees=[1], kernel="gaussian", complex="weak_delaunay")
    loss = norm_loss(sm_diff)
    loss.backward()
    losses.append(loss)
    opt.step()
    if i % 10 == 0:
    	with torch.no_grad():
            base=4
            ncols=3
            fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=ncols, figsize=(ncols*base, base))
            ax1.scatter(*x.detach().numpy().T, c=custom_map2(x).detach().numpy(), cmap="viridis_r", )
            plot_signed_measure(sm_diff, ax=ax2)
            ax3.plot(losses, label="loss")
            plt.show()

with torch.no_grad():
    fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=ncols, figsize=(ncols*base, base))
    ax1.scatter(*X.T, c=custom_map2(t.tensor(X)).detach().numpy(),cmap="viridis_r")
    ax2.scatter(*x.detach().numpy().T, c=custom_map2(x).detach().numpy(),cmap="viridis_r")
    ax3.plot(losses)
    plt.show()
../_images/e9ae264c607658d28d0ca06c7b1faea5facf9159fe7c8da84f152b338bf33d0e.png ../_images/07360cd191de1c21f45bdd40dca08849c210e8e339e974ee09c88c83ca2ec5dd.png ../_images/20ee8bc39d1d396496fbff86fa9f0ef9801ff4adb6a5b16f9b73d1249e2564de.png ../_images/56e197ed9ca939e0fb39d061c704c5e7cc01b94e90a65d525bd1cb8f79c39932.png ../_images/a1db55e9545ff27895782cf421c7e24b068d1a46048953f005196294149013a8.png ../_images/738668500f63a73d5513d1d972f679febfd56ca95cc89605f97a9ad9d97c9ac1.png ../_images/5e9e5201a83a6b903968052a65b9b1ebdcac8d1a4a5519d2fc7677c813f79723.png ../_images/d8b51c4c57fa497daed5e7f538dc3d5fe76756eedc76c78f16e3f3a7c23b750b.png ../_images/dffe92aaa46fa805af82003df0d7ffb3e7c151e2f0977cc6b4d5f060e7994204.png ../_images/0c3bc6e08c63c641988aef22b408bcd695dd9dbde6c0bb5d779cf53de6aa52ca.png ../_images/83c648fd744fde84f83db1c641c5802fa79103e611717904ff8cc4cc79901035.png