Skip to content

Commit a117837

Browse files
adding sampling methods for Postitive Semi-Definite and sampling methods for normals given some linear projection
1 parent ac357a7 commit a117837

File tree

2 files changed

+111
-0
lines changed

2 files changed

+111
-0
lines changed
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
"""
2+
This function generates samples N from N( mu, Simga ) such that N'*nu = x ie. samples
3+
N | N*nu = x (which is still normal btw).
4+
5+
Note that actually mu is useless.
6+
7+
"""
8+
import numpy as np
9+
def sample_normal_given_projection( covariance, x, lin_proj, n_samples=1):
10+
"""
11+
parameters:
12+
x: the value s.t. lin_proj*N = x; scalar
13+
lin_proj: the vector to project the sample unto (n,)
14+
covariance: the covariance matrix of the unconditional samples (nxn)
15+
n_samples: the number of samples to return
16+
17+
returns:
18+
( n x n_samples ) numpy array
19+
20+
"""
21+
variance = np.dot( np.dot( lin_proj.T, covariance), lin_proj )
22+
23+
#normalize our variables s.t. lin_proj*N is N(0,1)
24+
25+
sigma_lin = np.dot(covariance, lin_proj[:,None])
26+
cond_mu = ( sigma_lin.T*x/variance ).flatten()
27+
cond_covar = covariance - np.dot( sigma_lin, sigma_lin.T )/ variance
28+
29+
_samples = np.random.multivariate_normal( cond_mu, cond_covar, size = (n_samples) )
30+
return ( _samples )
31+
32+
33+
34+
35+
36+
37+
38+

MonteCarlo/sample_psd.py

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
2+
3+
def generate_psd_matrix( dim, avg_covariance=0, diag=np.array([]) ):
4+
"""
5+
This uses Sylvester's criterion to create n-dim covariance (PSD) matrices.
6+
To make correlation matrices, specify the diag parameters to be an array of all ones.
7+
parameters:
8+
avg_covariance: is added to a Normal(0,1) observation for each covariance.
9+
So, the sample mean of all covariances should be avg_covariance.
10+
dim: the dimension of the sampled covariance matrix
11+
diag: enter a dim-dimensional vector to use as the diagonal elements.
12+
13+
"""
14+
invA = None
15+
M = np.zeros( (dim,dim) )
16+
for i in xrange( 0, dim ):
17+
A = M[:i,:i]
18+
19+
b_flag = False
20+
while not b_flag:
21+
22+
#generate a variance and covariance array
23+
variance = diag[i] if diag.any() else np.abs(avg_covariance + np.random.randn(1) )
24+
covariance = avg_covariance + np.random.randn(i)
25+
26+
#check if det > 0 of matrix | A cov |
27+
# | cov var |
28+
if i==0 or (variance - np.dot( np.dot( covariance.T, invA), covariance ) ) > 0:
29+
b_flag = True
30+
M[i, :i] = covariance
31+
M[:i, i] = covariance
32+
M[i,i] = variance
33+
34+
if i > 0:
35+
invA = invert_block_matrix_CASE1( A , covariance, variance, invA)
36+
else:
37+
invA = 1.0/M[i,i]
38+
39+
return M
40+
41+
42+
43+
def invert_block_matrix_CASE1( A, b, c, invA = None):
44+
"""
45+
Inverts the matrix | A b |
46+
| b' c |
47+
48+
where A is (n,n), b is (n,) and c is a scalar
49+
P,lus if you know A inverse already, add it to make computations easier.
50+
This is quicker for larger matrices. How large?
51+
52+
"""
53+
54+
n = A.shape[0]
55+
if n == 1 and A[0] != 0:
56+
invA = 1.0/A
57+
if b.shape[0] == 0:
58+
return 1.0/A
59+
60+
if invA == None:
61+
invA = np.linalg.inv(A)
62+
63+
inverse = np.zeros( (n+1, n+1) )
64+
k = c - np.dot( np.dot( b, invA), b )
65+
66+
inverse[ :n, :n] = invA + np.dot( np.dot( invA, b[:,None]), np.dot( b[:,None].T, invA) )/k
67+
inverse[n, n] = 1/k
68+
inverse[:n, n] = -np.dot(invA,b)/k
69+
inverse[n, :n] = -np.dot(invA,b)/k
70+
return inverse
71+
72+
73+

0 commit comments

Comments
 (0)