-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathexample_1.py
140 lines (117 loc) · 4.44 KB
/
example_1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
# Example 1: Unit Circle and Half-Sphere Integrands Comparison
#
# This example demonstrates how different Monte Carlo integration methods perform
# with two different integrand functions:
# 1. Unit Circle: Integrating the indicator function of a unit circle (area = π)
# 2. Half-Sphere: Integrating a function representing a half-sphere (volume = 2π/3)
#
# The example compares:
# - Plain Monte Carlo integration
# - Markov Chain Monte Carlo (MCMC)
# - VEGAS algorithm (adaptive importance sampling)
# - VEGAS with MCMC
#
# Both integrands are defined over the square [-1,1]×[-1,1]
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import os
import sys
import traceback
from MCintegration import MonteCarlo, MarkovChainMonteCarlo, Vegas
os.environ["NCCL_DEBUG"] = "OFF"
os.environ["TORCH_DISTRIBUTED_DEBUG"] = "OFF"
os.environ["GLOG_minloglevel"] = "2"
os.environ["MASTER_ADDR"] = os.getenv("MASTER_ADDR", "localhost")
os.environ["MASTER_PORT"] = os.getenv("MASTER_PORT", "12355")
backend = "nccl"
def init_process(rank, world_size, fn, backend=backend):
try:
dist.init_process_group(backend, rank=rank, world_size=world_size)
fn(rank, world_size)
except Exception as e:
traceback.print_exc()
if dist.is_initialized():
dist.destroy_process_group()
raise e
def run_mcmc(rank, world_size):
try:
if rank != 0:
sys.stderr = open(os.devnull, "w")
sys.stdout = open(os.devnull, "w")
torch.manual_seed(42 + rank)
def unit_circle_integrand(x, f):
f[:, 0] = (x[:, 0] ** 2 + x[:, 1] ** 2 < 1).double()
return f[:, 0]
def half_sphere_integrand(x, f):
f[:, 0] = torch.clamp(1 - (x[:, 0] ** 2 + x[:, 1] ** 2), min=0) * 2
return f[:, 0]
dim = 2
bounds = [(-1, 1), (-1, 1)]
n_eval = 6400000
batch_size = 40000
n_therm = 20
device = torch.device(f"cuda:{rank}")
vegas_map = Vegas(dim, device=device, ninc=10)
# Monte Carlo and MCMC for Unit Circle
mc_integrator = MonteCarlo(
f=unit_circle_integrand, bounds=bounds, batch_size=batch_size,
device=device
)
mcmc_integrator = MarkovChainMonteCarlo(
f=unit_circle_integrand, bounds=bounds, batch_size=batch_size, nburnin=n_therm,
device=device
)
print("Unit Circle Integration Results:")
print("Plain MC:", mc_integrator(n_eval))
print("MCMC:", mcmc_integrator(n_eval, mix_rate=0.5))
# Train VEGAS map for Unit Circle
vegas_map.adaptive_training(batch_size, unit_circle_integrand, alpha=0.5)
vegas_integrator = MonteCarlo(
bounds, f=unit_circle_integrand, maps=vegas_map, batch_size=batch_size,
device=device
)
vegasmcmc_integrator = MarkovChainMonteCarlo(
bounds,
f=unit_circle_integrand,
maps=vegas_map,
batch_size=batch_size,
nburnin=n_therm,
device=device
)
print("VEGAS:", vegas_integrator(n_eval))
print("VEGAS-MCMC:", vegasmcmc_integrator(n_eval, mix_rate=0.5))
# Monte Carlo and MCMC for Half-Sphere
mc_integrator.f = half_sphere_integrand
mcmc_integrator.f = half_sphere_integrand
print("\nHalf-Sphere Integration Results:")
print("Plain MC:", mc_integrator(n_eval))
print("MCMC:", mcmc_integrator(n_eval, mix_rate=0.5))
vegas_map.make_uniform()
vegas_map.adaptive_training(batch_size, half_sphere_integrand, epoch=10, alpha=0.5)
vegas_integrator.f = half_sphere_integrand
vegasmcmc_integrator.f = half_sphere_integrand
print("VEGAS:", vegas_integrator(n_eval))
print("VEGAS-MCMC:", vegasmcmc_integrator(n_eval, mix_rate=0.5))
except Exception as e:
print(f"Error in run_mcmc for rank {rank}: {e}")
traceback.print_exc()
raise e
finally:
if dist.is_initialized():
dist.destroy_process_group()
def test_mcmc(world_size):
world_size = min(world_size, mp.cpu_count())
try:
mp.spawn(
init_process,
args=(world_size, run_mcmc),
nprocs=world_size,
join=True,
daemon=False,
)
except Exception as e:
print(f"Error in test_mcmc: {e}")
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
test_mcmc(4)