Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

NeuroSMT solver #130

Draft
wants to merge 53 commits into
base: main
Choose a base branch
from
Draft
Changes from 1 commit
Commits
Show all changes
53 commits
Select commit Hold shift + click to select a range
3549fe8
implement stub for neurosmt
stephen-ostapenko Jul 5, 2023
dcce69b
set up gradle
stephen-ostapenko Jul 19, 2023
5f06f9d
SMT formula graph extractor
stephen-ostapenko Jul 19, 2023
4ae03cc
some fixes for extractor
stephen-ostapenko Jul 20, 2023
c5c7e71
another fix
stephen-ostapenko Jul 20, 2023
0236f09
fuck python
stephen-ostapenko Jul 21, 2023
13de179
yet another fix
stephen-ostapenko Jul 21, 2023
a6999eb
more fixes
stephen-ostapenko Jul 24, 2023
98dc958
aaaaaaaaaaaaa
stephen-ostapenko Jul 25, 2023
58791a4
train baseline
stephen-ostapenko Jul 26, 2023
fab60e8
train baseline
stephen-ostapenko Jul 26, 2023
deb0343
Merge remote-tracking branch 'origin/neurosmt-training' into neurosmt…
stephen-ostapenko Jul 26, 2023
829c45d
fix major bug
stephen-ostapenko Jul 27, 2023
9091a71
add roc-auc and more layers in decoder
stephen-ostapenko Jul 27, 2023
c5fdbc1
some refactoring
stephen-ostapenko Jul 27, 2023
dcb2c1e
sandboxing
stephen-ostapenko Jul 28, 2023
ce00214
add pytorch-lightning and tensorboard
stephen-ostapenko Aug 1, 2023
2ffa9b4
i forgot model
stephen-ostapenko Aug 1, 2023
3a689f8
adjust floating point precision
stephen-ostapenko Aug 1, 2023
09ef95a
add checkpointing
stephen-ostapenko Aug 1, 2023
6bfe3b6
refactor
stephen-ostapenko Aug 1, 2023
c787a29
fix backdoor
stephen-ostapenko Aug 1, 2023
ad9d65d
align sat/unsat sizes
stephen-ostapenko Aug 1, 2023
4ce4ad8
fix checkpointing
stephen-ostapenko Aug 1, 2023
ccf7177
add validation script
stephen-ostapenko Aug 1, 2023
02a959b
new logic of data loading
stephen-ostapenko Aug 2, 2023
0bdd180
adjust train/val scripts to new data loading process
stephen-ostapenko Aug 2, 2023
bc590f9
refactoring
stephen-ostapenko Aug 2, 2023
bbc3c5e
add ksmt binary files processing
stephen-ostapenko Aug 2, 2023
e0b644d
refactoring
stephen-ostapenko Aug 2, 2023
5546453
refactoring 2
stephen-ostapenko Aug 3, 2023
5c6bb72
train/test/val split with groups to avoid data leaks
stephen-ostapenko Aug 9, 2023
7893f94
commit for main split file
stephen-ostapenko Aug 10, 2023
40a483a
small refactor
stephen-ostapenko Aug 10, 2023
da66e8f
add confusion matrix for val and test
stephen-ostapenko Aug 10, 2023
24b933a
modify model input to save in onnx format
stephen-ostapenko Aug 11, 2023
7cb1af4
add kotlin runtime for model
stephen-ostapenko Aug 17, 2023
59f7839
fix bug in runtime and change conv type
stephen-ostapenko Aug 18, 2023
65d8bba
move python files to new module
stephen-ostapenko Aug 18, 2023
13b85fd
add node-wise features
stephen-ostapenko Aug 22, 2023
6b8a120
refactor
stephen-ostapenko Aug 22, 2023
9b777cb
update target type + additional metrics
stephen-ostapenko Aug 23, 2023
ff4dc62
refactor graph extractor and some other stuff
stephen-ostapenko Aug 23, 2023
8460715
add standalone model runner for hand benchmarking
stephen-ostapenko Aug 24, 2023
8b42598
add model export script
stephen-ostapenko Aug 24, 2023
5cbfbb8
add type hints, some comments and global constants in external file
stephen-ostapenko Aug 24, 2023
ce79d98
add model run time stats
stephen-ostapenko Aug 25, 2023
0cda4ac
add requirements.txt
stephen-ostapenko Aug 25, 2023
42ed4c5
add basic ksmt integration
stephen-ostapenko Aug 25, 2023
9320f96
add comments for kotlin code
stephen-ostapenko Aug 25, 2023
dd840ef
add model resource files
stephen-ostapenko Aug 25, 2023
63740f7
update code for usvm-formulas export
stephen-ostapenko Aug 25, 2023
9fd38c4
remove sandboxes
stephen-ostapenko Aug 31, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
i forgot model
  • Loading branch information
stephen-ostapenko committed Aug 1, 2023
commit 2ffa9b4e7f17d0069235b71bd640f3004b2d91eb
144 changes: 144 additions & 0 deletions ksmt-neurosmt/src/main/python/LightningModel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
from sklearn.metrics import classification_report

import torch
import torch.nn.functional as F

import pytorch_lightning as pl

from torchmetrics.classification import BinaryAccuracy, BinaryAUROC

from Model import Model


class LightningModel(pl.LightningModule):
def __init__(self):
super().__init__()

self.model = Model()

self.val_outputs = []
self.val_targets = []

self.acc = BinaryAccuracy()
self.roc_auc = BinaryAUROC()

#self.train_dl = train_dl
#self.val_dl = val_dl
#self.test_dl = test_dl

def forward(self, x):
return self.model(x)

def configure_optimizers(self):
params = [p for p in self.model.parameters() if p is not None and p.requires_grad]
optimizer = torch.optim.Adam(params, lr=1e-4)

return optimizer

def training_step(self, train_batch, batch_idx):
out = self.model(train_batch)
loss = F.binary_cross_entropy_with_logits(out, train_batch.y)

out = F.sigmoid(out)

self.log(
"train/loss", loss.item(),
prog_bar=True, logger=True,
on_step=True, on_epoch=True,
batch_size=train_batch.num_graphs
)
self.log(
"train/acc", self.acc(out, train_batch.y),
prog_bar=True, logger=True,
on_step=False, on_epoch=True,
batch_size=train_batch.num_graphs
)

return loss

def validation_step(self, val_batch, batch_idx):
out = self.model(val_batch)
loss = F.binary_cross_entropy_with_logits(out, val_batch.y)

out = F.sigmoid(out)

self.log(
"val/loss", loss.item(),
prog_bar=True, logger=True,
on_step=False, on_epoch=True,
batch_size=val_batch.num_graphs
)
self.log(
"val/acc", self.acc(out, val_batch.y),
prog_bar=True, logger=True,
on_step=False, on_epoch=True,
batch_size=val_batch.num_graphs
)

self.val_outputs.append(out)
self.val_targets.append(val_batch.y)

"""
probas = torch.flatten(probas).cpu().numpy()
answers = torch.flatten(answers).cpu().numpy()
targets = torch.flatten(targets).cpu().numpy()

mean_loss = np.mean(losses)
roc_auc = roc_auc_score(targets, probas) if val else None

print("\n", flush=True)
print(f"mean loss: {mean_loss}")
print(f"acc: {accuracy_score(targets, answers)}")
print(f"roc-auc: {roc_auc}")
print(classification_report(targets, answers, digits=3, zero_division=0.0), flush=True)

if val:
return mean_loss, roc_auc
else:
return mean_loss
"""

return loss

def on_validation_epoch_end(self):
print("\n\n", flush=True)

all_outputs = torch.flatten(torch.cat(self.val_outputs))
all_targets = torch.flatten(torch.cat(self.val_targets))

self.val_outputs.clear()
self.val_targets.clear()

logger = self.logger.experiment

roc_auc = self.roc_auc(all_outputs, all_targets)
self.log(
"val/roc-auc", roc_auc,
prog_bar=True, logger=False,
on_step=False, on_epoch=True
)
logger.add_scalar("val/roc-auc", roc_auc, self.current_epoch)

all_outputs = all_outputs.cpu().numpy()
all_targets = all_targets.cpu().numpy()

all_outputs = all_outputs > 0.5
print(classification_report(all_targets, all_outputs, digits=3, zero_division=0.0))

print("\n", flush=True)

"""
def backward(self, trainer, loss, optimizer, optimizer_idx):
loss.backward()
"""

"""
def train_dataloader(self):
return self.train_dl

def val_dataloader(self):
return self.val_dl

def test_dataloader(self):
return self.test_dl
"""