diff --git a/GraphHD_v2/basic/result1699333218.335392.csv b/GraphHD_v2/basic/result1699333218.335392.csv new file mode 100644 index 00000000..f6f1c6a5 --- /dev/null +++ b/GraphHD_v2/basic/result1699333218.335392.csv @@ -0,0 +1,8 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.39,0.16,53.33,53.33,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.23,0.12,62.86,62.86,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.52,0.26,63.81,63.81,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.29,0.15,54.29,54.29,FHRR diff --git a/GraphHD_v2/basic/result1699333236.2733052.csv b/GraphHD_v2/basic/result1699333236.2733052.csv new file mode 100644 index 00000000..836aa40c --- /dev/null +++ b/GraphHD_v2/basic/result1699333236.2733052.csv @@ -0,0 +1,8 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.36,0.17,64.76,64.76,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.21,0.12,63.81,63.81,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.51,0.23,63.81,63.81,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.3,0.14,63.81,63.81,FHRR diff --git a/GraphHD_v2/basic/result1699399798.518559.csv b/GraphHD_v2/basic/result1699399798.518559.csv new file mode 100644 index 00000000..ec36a20a --- /dev/null +++ b/GraphHD_v2/basic/result1699399798.518559.csv @@ -0,0 +1,8 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.51,0.2,59.05,59.05,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.24,0.11,36.84,36.84,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,7.05,3.48,50.53,50.53,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.06,0.59,12.22,12.22,BSC diff --git a/GraphHD_v2/basic/result1699399947.0550241.csv b/GraphHD_v2/basic/result1699399947.0550241.csv new file mode 100644 index 00000000..db8ac6f3 --- /dev/null +++ b/GraphHD_v2/basic/result1699399947.0550241.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.44,0.21,56.19,56.19,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.24,0.15,33.33,33.33,BSC diff --git a/GraphHD_v2/basic/result1699399982.062447.csv b/GraphHD_v2/basic/result1699399982.062447.csv new file mode 100644 index 00000000..50594f70 --- /dev/null +++ b/GraphHD_v2/basic/result1699399982.062447.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.45,0.2,43.81,43.81,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.25,0.13,31.58,31.58,BSC diff --git a/GraphHD_v2/basic/result1699400007.153137.csv b/GraphHD_v2/basic/result1699400007.153137.csv new file mode 100644 index 00000000..9ab57913 --- /dev/null +++ b/GraphHD_v2/basic/result1699400007.153137.csv @@ -0,0 +1,48 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.43,0.19,37.14,37.14,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.23,0.12,33.33,33.33,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,8.28,3.28,50.45,50.45,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.15,0.52,17.78,17.78,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,2.61,1.05,59.58,59.58,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DD,10000,16.52,7.5,54.24,54.24,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.29,0.17,55.24,55.24,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.18,0.09,82.46,82.46,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,4.58,2.22,65.45,65.45,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.75,0.35,36.67,36.67,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,1.57,0.72,70.36,70.36,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DD,10000,10.86,4.55,71.47,71.47,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.99,0.41,53.33,53.33,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.63,0.29,82.46,82.46,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,19.84,8.55,60.91,60.91,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,4.0,1.64,36.11,36.11,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,8.07,4.23,68.56,68.56,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DD,10000,77.69,28.39,76.55,76.55,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.35,0.22,55.24,55.24,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.23,0.12,85.96,85.96,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,7.75,3.84,61.8,61.8,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.53,0.64,36.67,36.67,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,2.71,1.2,67.66,67.66,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DD,10000,22.84,8.59,72.88,72.88,FHRR diff --git a/GraphHD_v2/basic/result1699478414.976448.csv b/GraphHD_v2/basic/result1699478414.976448.csv new file mode 100644 index 00000000..820e686c --- /dev/null +++ b/GraphHD_v2/basic/result1699478414.976448.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.0,0.0,0.0,0.0,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.0,0.0,0.0,0.0,BSC diff --git a/GraphHD_v2/basic/result1699478437.8803468.csv b/GraphHD_v2/basic/result1699478437.8803468.csv new file mode 100644 index 00000000..89c61ed9 --- /dev/null +++ b/GraphHD_v2/basic/result1699478437.8803468.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.0,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699478446.3933382.csv b/GraphHD_v2/basic/result1699478446.3933382.csv new file mode 100644 index 00000000..4724de82 --- /dev/null +++ b/GraphHD_v2/basic/result1699478446.3933382.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.01,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699478536.29761.csv b/GraphHD_v2/basic/result1699478536.29761.csv new file mode 100644 index 00000000..41d8e6a1 --- /dev/null +++ b/GraphHD_v2/basic/result1699478536.29761.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.09,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699478578.291143.csv b/GraphHD_v2/basic/result1699478578.291143.csv new file mode 100644 index 00000000..4bca11be --- /dev/null +++ b/GraphHD_v2/basic/result1699478578.291143.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.08,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699478605.5848489.csv b/GraphHD_v2/basic/result1699478605.5848489.csv new file mode 100644 index 00000000..4bca11be --- /dev/null +++ b/GraphHD_v2/basic/result1699478605.5848489.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.08,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699478647.246716.csv b/GraphHD_v2/basic/result1699478647.246716.csv new file mode 100644 index 00000000..5037e684 --- /dev/null +++ b/GraphHD_v2/basic/result1699478647.246716.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.03,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699478678.555866.csv b/GraphHD_v2/basic/result1699478678.555866.csv new file mode 100644 index 00000000..5037e684 --- /dev/null +++ b/GraphHD_v2/basic/result1699478678.555866.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.03,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699478699.115463.csv b/GraphHD_v2/basic/result1699478699.115463.csv new file mode 100644 index 00000000..4724de82 --- /dev/null +++ b/GraphHD_v2/basic/result1699478699.115463.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.01,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699478901.1126761.csv b/GraphHD_v2/basic/result1699478901.1126761.csv new file mode 100644 index 00000000..5037e684 --- /dev/null +++ b/GraphHD_v2/basic/result1699478901.1126761.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.03,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699479017.988084.csv b/GraphHD_v2/basic/result1699479017.988084.csv new file mode 100644 index 00000000..5037e684 --- /dev/null +++ b/GraphHD_v2/basic/result1699479017.988084.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.03,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699479038.843523.csv b/GraphHD_v2/basic/result1699479038.843523.csv new file mode 100644 index 00000000..4724de82 --- /dev/null +++ b/GraphHD_v2/basic/result1699479038.843523.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.01,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699479721.872013.csv b/GraphHD_v2/basic/result1699479721.872013.csv new file mode 100644 index 00000000..3072e71f --- /dev/null +++ b/GraphHD_v2/basic/result1699479721.872013.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.06,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699481692.648533.csv b/GraphHD_v2/basic/result1699481692.648533.csv new file mode 100644 index 00000000..5037e684 --- /dev/null +++ b/GraphHD_v2/basic/result1699481692.648533.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.03,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699481739.722862.csv b/GraphHD_v2/basic/result1699481739.722862.csv new file mode 100644 index 00000000..5d8d1e87 --- /dev/null +++ b/GraphHD_v2/basic/result1699481739.722862.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.04,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699481968.7003891.csv b/GraphHD_v2/basic/result1699481968.7003891.csv new file mode 100644 index 00000000..5037e684 --- /dev/null +++ b/GraphHD_v2/basic/result1699481968.7003891.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.03,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699481978.6428401.csv b/GraphHD_v2/basic/result1699481978.6428401.csv new file mode 100644 index 00000000..8b38d87b --- /dev/null +++ b/GraphHD_v2/basic/result1699481978.6428401.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.02,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699482001.435821.csv b/GraphHD_v2/basic/result1699482001.435821.csv new file mode 100644 index 00000000..9f4bd7ac --- /dev/null +++ b/GraphHD_v2/basic/result1699482001.435821.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.05,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699482233.271428.csv b/GraphHD_v2/basic/result1699482233.271428.csv new file mode 100644 index 00000000..5d8d1e87 --- /dev/null +++ b/GraphHD_v2/basic/result1699482233.271428.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.04,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699483016.824408.csv b/GraphHD_v2/basic/result1699483016.824408.csv new file mode 100644 index 00000000..3072e71f --- /dev/null +++ b/GraphHD_v2/basic/result1699483016.824408.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.06,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699483096.264558.csv b/GraphHD_v2/basic/result1699483096.264558.csv new file mode 100644 index 00000000..26395aa2 --- /dev/null +++ b/GraphHD_v2/basic/result1699483096.264558.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.13,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699483303.0820029.csv b/GraphHD_v2/basic/result1699483303.0820029.csv new file mode 100644 index 00000000..6f677e14 --- /dev/null +++ b/GraphHD_v2/basic/result1699483303.0820029.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.14,0.0,0.0,0.0,MAP diff --git a/GraphHD_v2/basic/result1699484311.550752.csv b/GraphHD_v2/basic/result1699484311.550752.csv new file mode 100644 index 00000000..36700fc7 --- /dev/null +++ b/GraphHD_v2/basic/result1699484311.550752.csv @@ -0,0 +1,10 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.35,0.17,62.86,62.86,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.35,0.17,62.86,62.86,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.35,0.17,62.86,62.86,FHRR,eigen_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.35,0.17,62.86,62.86,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.35,0.17,62.86,62.86,FHRR,closeness_centrality diff --git a/GraphHD_v2/basic/result1699484444.565254.csv b/GraphHD_v2/basic/result1699484444.565254.csv new file mode 100644 index 00000000..3ad6944c --- /dev/null +++ b/GraphHD_v2/basic/result1699484444.565254.csv @@ -0,0 +1,10 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.35,0.22,60.95,60.95,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.35,0.22,60.95,60.95,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.35,0.22,60.95,60.95,FHRR,eigen_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.35,0.22,60.95,60.95,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.35,0.22,60.95,60.95,FHRR,closeness_centrality diff --git a/GraphHD_v2/basic/result1699484455.0859709.csv b/GraphHD_v2/basic/result1699484455.0859709.csv new file mode 100644 index 00000000..88c2d45c --- /dev/null +++ b/GraphHD_v2/basic/result1699484455.0859709.csv @@ -0,0 +1,32 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.34,0.17,57.14,57.14,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.34,0.17,57.14,57.14,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.34,0.17,57.14,57.14,FHRR,eigen_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.34,0.17,57.14,57.14,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.34,0.17,57.14,57.14,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.34,0.17,57.14,57.14,FHRR,current_flow_closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.34,0.17,57.14,57.14,FHRR,information_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.34,0.17,57.14,57.14,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.34,0.17,57.14,57.14,FHRR,current_flow_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.34,0.17,57.14,57.14,FHRR,communicability_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.34,0.17,57.14,57.14,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.34,0.17,57.14,57.14,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.34,0.17,57.14,57.14,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.34,0.17,57.14,57.14,FHRR,harmonic_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.34,0.17,57.14,57.14,FHRR,second_order_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.34,0.17,57.14,57.14,FHRR,second_order_centrality diff --git a/GraphHD_v2/basic/result1699484542.139751.csv b/GraphHD_v2/basic/result1699484542.139751.csv new file mode 100644 index 00000000..76e9385b --- /dev/null +++ b/GraphHD_v2/basic/result1699484542.139751.csv @@ -0,0 +1,32 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.15,52.38,52.38,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.15,52.38,52.38,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.15,52.38,52.38,FHRR,eigen_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.15,52.38,52.38,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.15,52.38,52.38,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.15,52.38,52.38,FHRR,current_flow_closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.15,52.38,52.38,FHRR,information_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.15,52.38,52.38,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.15,52.38,52.38,FHRR,current_flow_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.15,52.38,52.38,FHRR,communicability_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.15,52.38,52.38,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.15,52.38,52.38,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.15,52.38,52.38,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.15,52.38,52.38,FHRR,harmonic_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.15,52.38,52.38,FHRR,second_order_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.15,52.38,52.38,FHRR,second_order_centrality diff --git a/GraphHD_v2/basic/result1699484578.5374851.csv b/GraphHD_v2/basic/result1699484578.5374851.csv new file mode 100644 index 00000000..b6f81a26 --- /dev/null +++ b/GraphHD_v2/basic/result1699484578.5374851.csv @@ -0,0 +1,32 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,eigen_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,current_flow_closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,information_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,current_flow_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,communicability_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,harmonic_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,second_order_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,second_order_centrality diff --git a/GraphHD_v2/basic/result1699484608.5689301.csv b/GraphHD_v2/basic/result1699484608.5689301.csv new file mode 100644 index 00000000..19bda0b9 --- /dev/null +++ b/GraphHD_v2/basic/result1699484608.5689301.csv @@ -0,0 +1,68 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.14,58.1,58.1,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.14,58.1,58.1,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.14,58.1,58.1,FHRR,eigen_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.14,58.1,58.1,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.14,58.1,58.1,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.14,58.1,58.1,FHRR,current_flow_closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.14,58.1,58.1,FHRR,information_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.14,58.1,58.1,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.14,58.1,58.1,FHRR,current_flow_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.14,58.1,58.1,FHRR,communicability_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.14,58.1,58.1,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.14,58.1,58.1,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.14,58.1,58.1,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.14,58.1,58.1,FHRR,harmonic_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.14,58.1,58.1,FHRR,second_order_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.14,58.1,58.1,FHRR,second_order_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.35,0.09,85.96,85.96,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.35,0.09,85.96,85.96,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.35,0.09,85.96,85.96,FHRR,eigen_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.35,0.09,85.96,85.96,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.35,0.09,85.96,85.96,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.35,0.09,85.96,85.96,FHRR,current_flow_closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.35,0.09,85.96,85.96,FHRR,information_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.35,0.09,85.96,85.96,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.35,0.09,85.96,85.96,FHRR,current_flow_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.35,0.09,85.96,85.96,FHRR,communicability_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.35,0.09,85.96,85.96,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.35,0.09,85.96,85.96,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.35,0.09,85.96,85.96,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.35,0.09,85.96,85.96,FHRR,harmonic_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.35,0.09,85.96,85.96,FHRR,second_order_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.35,0.09,85.96,85.96,FHRR,second_order_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,4.8,2.44,61.96,61.96,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,4.8,2.44,61.96,61.96,FHRR,degree_centrality diff --git a/GraphHD_v2/basic/result1699484665.915395.csv b/GraphHD_v2/basic/result1699484665.915395.csv new file mode 100644 index 00000000..8964fd13 --- /dev/null +++ b/GraphHD_v2/basic/result1699484665.915395.csv @@ -0,0 +1,68 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.15,57.14,57.14,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.15,57.14,57.14,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.15,57.14,57.14,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.15,57.14,57.14,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.15,57.14,57.14,FHRR,current_flow_closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.15,57.14,57.14,FHRR,information_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.15,57.14,57.14,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.15,57.14,57.14,FHRR,current_flow_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.15,57.14,57.14,FHRR,communicability_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.15,57.14,57.14,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.15,57.14,57.14,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.15,57.14,57.14,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.15,57.14,57.14,FHRR,harmonic_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.15,57.14,57.14,FHRR,second_order_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.15,57.14,57.14,FHRR,second_order_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.34,0.09,85.96,85.96,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.34,0.09,85.96,85.96,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.34,0.09,85.96,85.96,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.34,0.09,85.96,85.96,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.34,0.09,85.96,85.96,FHRR,current_flow_closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.34,0.09,85.96,85.96,FHRR,information_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.34,0.09,85.96,85.96,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.34,0.09,85.96,85.96,FHRR,current_flow_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.34,0.09,85.96,85.96,FHRR,communicability_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.34,0.09,85.96,85.96,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.34,0.09,85.96,85.96,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.34,0.09,85.96,85.96,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.34,0.09,85.96,85.96,FHRR,harmonic_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.34,0.09,85.96,85.96,FHRR,second_order_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.34,0.09,85.96,85.96,FHRR,second_order_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,5.01,2.43,62.53,62.53,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,5.01,2.43,62.53,62.53,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,5.01,2.43,62.53,62.53,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,5.01,2.43,62.53,62.53,FHRR,closeness_centrality diff --git a/GraphHD_v2/basic/result1699484777.505989.csv b/GraphHD_v2/basic/result1699484777.505989.csv new file mode 100644 index 00000000..3fddcaa6 --- /dev/null +++ b/GraphHD_v2/basic/result1699484777.505989.csv @@ -0,0 +1,64 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,information_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,current_flow_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,communicability_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,harmonic_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,second_order_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.29,0.14,55.24,55.24,FHRR,second_order_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.33,0.09,87.72,87.72,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.33,0.09,87.72,87.72,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.33,0.09,87.72,87.72,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.33,0.09,87.72,87.72,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.33,0.09,87.72,87.72,FHRR,information_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.33,0.09,87.72,87.72,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.33,0.09,87.72,87.72,FHRR,current_flow_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.33,0.09,87.72,87.72,FHRR,communicability_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.33,0.09,87.72,87.72,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.33,0.09,87.72,87.72,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.33,0.09,87.72,87.72,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.33,0.09,87.72,87.72,FHRR,harmonic_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.33,0.09,87.72,87.72,FHRR,second_order_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.33,0.09,87.72,87.72,FHRR,second_order_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,4.97,2.55,61.31,61.31,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,4.97,2.55,61.31,61.31,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,4.97,2.55,61.31,61.31,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,4.97,2.55,61.31,61.31,FHRR,closeness_centrality diff --git a/GraphHD_v2/basic/result1699484873.465694.csv b/GraphHD_v2/basic/result1699484873.465694.csv new file mode 100644 index 00000000..2202f5ea --- /dev/null +++ b/GraphHD_v2/basic/result1699484873.465694.csv @@ -0,0 +1,10 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,5.02,2.41,60.91,60.91,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,5.02,2.41,60.91,60.91,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,5.02,2.41,60.91,60.91,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,5.02,2.41,60.91,60.91,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,5.02,2.41,60.91,60.91,FHRR,betweenness_centrality diff --git a/GraphHD_v2/basic/result1699484922.6135309.csv b/GraphHD_v2/basic/result1699484922.6135309.csv new file mode 100644 index 00000000..b6f3e548 --- /dev/null +++ b/GraphHD_v2/basic/result1699484922.6135309.csv @@ -0,0 +1,20 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,4.96,2.56,63.58,63.58,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,4.96,2.56,63.58,63.58,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,4.96,2.56,63.58,63.58,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,4.96,2.56,63.58,63.58,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,4.96,2.56,63.58,63.58,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,4.96,2.56,63.58,63.58,FHRR,communicability_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,4.96,2.56,63.58,63.58,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,4.96,2.56,63.58,63.58,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,4.96,2.56,63.58,63.58,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,4.96,2.56,63.58,63.58,FHRR,harmonic_centrality diff --git a/GraphHD_v2/basic/result1699485213.081866.csv b/GraphHD_v2/basic/result1699485213.081866.csv new file mode 100644 index 00000000..582521be --- /dev/null +++ b/GraphHD_v2/basic/result1699485213.081866.csv @@ -0,0 +1,30 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.94,0.45,25.56,25.56,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.94,0.45,25.56,25.56,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.94,0.45,25.56,25.56,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.94,0.45,25.56,25.56,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.94,0.45,25.56,25.56,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.94,0.45,25.56,25.56,FHRR,communicability_betweeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.94,0.45,25.56,25.56,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.94,0.45,25.56,25.56,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.94,0.45,25.56,25.56,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.94,0.45,25.56,25.56,FHRR,harmonic_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,1.98,0.89,70.66,70.66,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,1.98,0.89,70.66,70.66,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,1.98,0.89,70.66,70.66,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,1.98,0.89,70.66,70.66,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,1.98,0.89,70.66,70.66,FHRR,betweenness_centrality diff --git a/GraphHD_v2/basic/result1699485481.363292.csv b/GraphHD_v2/basic/result1699485481.363292.csv new file mode 100644 index 00000000..a7c6a078 --- /dev/null +++ b/GraphHD_v2/basic/result1699485481.363292.csv @@ -0,0 +1,40 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.97,0.42,23.33,23.33,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.97,0.42,23.33,23.33,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.97,0.42,23.33,23.33,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.97,0.42,23.33,23.33,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.97,0.42,23.33,23.33,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.97,0.42,23.33,23.33,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.97,0.42,23.33,23.33,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.97,0.42,23.33,23.33,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.97,0.42,23.33,23.33,FHRR,harmonic_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,1.97,0.94,70.06,70.06,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,1.97,0.94,70.06,70.06,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,1.97,0.94,70.06,70.06,FHRR,katz_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,1.97,0.94,70.06,70.06,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,1.97,0.94,70.06,70.06,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,1.97,0.94,70.06,70.06,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,1.97,0.94,70.06,70.06,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,1.97,0.94,70.06,70.06,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,1.97,0.94,70.06,70.06,FHRR,harmonic_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +DD,10000,15.42,7.57,73.73,73.73,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +DD,10000,15.42,7.57,73.73,73.73,FHRR,degree_centrality diff --git a/GraphHD_v2/basic/result1699485717.082081.csv b/GraphHD_v2/basic/result1699485717.082081.csv new file mode 100644 index 00000000..150e6311 --- /dev/null +++ b/GraphHD_v2/basic/result1699485717.082081.csv @@ -0,0 +1,8 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +DD,10000,15.83,6.09,74.01,74.01,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +DD,10000,15.83,6.09,74.01,74.01,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +DD,10000,15.83,6.09,74.01,74.01,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +DD,10000,15.83,6.09,74.01,74.01,FHRR,betweenness_centrality diff --git a/GraphHD_v2/basic/result1699486143.9071412.csv b/GraphHD_v2/basic/result1699486143.9071412.csv new file mode 100644 index 00000000..33356e68 --- /dev/null +++ b/GraphHD_v2/basic/result1699486143.9071412.csv @@ -0,0 +1,18 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.75,0.4,42.22,42.22,FHRR,none +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.75,0.4,42.22,42.22,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.75,0.4,42.22,42.22,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.75,0.4,42.22,42.22,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.75,0.4,42.22,42.22,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.75,0.4,42.22,42.22,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.75,0.4,42.22,42.22,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.75,0.4,42.22,42.22,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.75,0.4,42.22,42.22,FHRR,harmonic_centrality diff --git a/GraphHD_v2/basic/result1699486177.775477.csv b/GraphHD_v2/basic/result1699486177.775477.csv new file mode 100644 index 00000000..55ccd58b --- /dev/null +++ b/GraphHD_v2/basic/result1699486177.775477.csv @@ -0,0 +1,18 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.85,0.41,38.33,38.33,FHRR,none +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.85,0.41,38.33,38.33,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.85,0.41,38.33,38.33,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.85,0.41,38.33,38.33,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.85,0.41,38.33,38.33,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.85,0.41,38.33,38.33,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.85,0.41,38.33,38.33,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.85,0.41,38.33,38.33,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.85,0.41,38.33,38.33,FHRR,harmonic_centrality diff --git a/GraphHD_v2/basic/result1699486310.766479.csv b/GraphHD_v2/basic/result1699486310.766479.csv new file mode 100644 index 00000000..84a3ee20 --- /dev/null +++ b/GraphHD_v2/basic/result1699486310.766479.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.87,0.38,34.44,34.44,FHRR,none +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.87,0.38,34.44,34.44,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.87,0.38,34.44,34.44,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.87,0.38,34.44,34.44,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.87,0.38,34.44,34.44,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.87,0.38,34.44,34.44,FHRR,load_centrality diff --git a/GraphHD_v2/basic/result1699486591.8672829.csv b/GraphHD_v2/basic/result1699486591.8672829.csv new file mode 100644 index 00000000..76668170 --- /dev/null +++ b/GraphHD_v2/basic/result1699486591.8672829.csv @@ -0,0 +1,18 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.84,0.36,22.22,22.22,FHRR,none +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.84,0.36,22.22,22.22,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.84,0.36,22.22,22.22,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.84,0.36,22.22,22.22,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.84,0.36,22.22,22.22,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.84,0.36,22.22,22.22,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.84,0.36,22.22,22.22,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.84,0.36,22.22,22.22,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.84,0.36,22.22,22.22,FHRR,harmonic_centrality diff --git a/GraphHD_v2/basic/result1699486660.236307.csv b/GraphHD_v2/basic/result1699486660.236307.csv new file mode 100644 index 00000000..4c2fd1c5 --- /dev/null +++ b/GraphHD_v2/basic/result1699486660.236307.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.96,0.45,25.56,25.56,FHRR diff --git a/GraphHD_v2/basic/result1699486663.246925.csv b/GraphHD_v2/basic/result1699486663.246925.csv new file mode 100644 index 00000000..b30e576c --- /dev/null +++ b/GraphHD_v2/basic/result1699486663.246925.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.01,0.43,20.0,20.0,FHRR diff --git a/GraphHD_v2/basic/result1699486788.859935.csv b/GraphHD_v2/basic/result1699486788.859935.csv new file mode 100644 index 00000000..ed785b3c --- /dev/null +++ b/GraphHD_v2/basic/result1699486788.859935.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.03,0.47,32.78,32.78,FHRR diff --git a/GraphHD_v2/basic/result1699486844.406713.csv b/GraphHD_v2/basic/result1699486844.406713.csv new file mode 100644 index 00000000..4c994339 --- /dev/null +++ b/GraphHD_v2/basic/result1699486844.406713.csv @@ -0,0 +1,18 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.9,0.37,23.33,23.33,FHRR,none +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.9,0.45,26.67,26.67,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.8,0.46,22.78,22.78,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,1.76,0.54,27.78,27.78,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,1.36,0.67,16.11,16.11,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,1.33,0.68,21.11,21.11,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,4.33,1.87,23.33,23.33,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,5.92,3.68,22.22,22.22,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,1.19,0.5,20.0,20.0,FHRR,harmonic_centrality diff --git a/GraphHD_v2/basic/result1699486890.615197.csv b/GraphHD_v2/basic/result1699486890.615197.csv new file mode 100644 index 00000000..03ba4acb --- /dev/null +++ b/GraphHD_v2/basic/result1699486890.615197.csv @@ -0,0 +1,18 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.85,0.41,23.22,23.22,FHRR,none +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.99,0.49,21.52,21.52,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,1.02,0.48,21.7,21.7,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,1.12,0.52,23.93,23.93,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,1.49,0.76,21.67,21.67,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,1.36,0.63,22.11,22.11,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,3.88,1.89,22.19,22.19,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,4.91,2.13,21.67,21.67,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,1.11,0.55,22.41,22.41,FHRR,harmonic_centrality diff --git a/GraphHD_v2/basic/result1699762409.840183.csv b/GraphHD_v2/basic/result1699762409.840183.csv new file mode 100644 index 00000000..c9b1488f --- /dev/null +++ b/GraphHD_v2/basic/result1699762409.840183.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +AIDS,10000,1.9,0.84,99.83,99.83,FHRR diff --git a/GraphHD_v2/basic/result1699762424.5391848.csv b/GraphHD_v2/basic/result1699762424.5391848.csv new file mode 100644 index 00000000..40cb1ba5 --- /dev/null +++ b/GraphHD_v2/basic/result1699762424.5391848.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +AIDS,10000,1.69,0.89,99.5,99.5,FHRR diff --git a/GraphHD_v2/basic/result1699762580.685486.csv b/GraphHD_v2/basic/result1699762580.685486.csv new file mode 100644 index 00000000..e79e2002 --- /dev/null +++ b/GraphHD_v2/basic/result1699762580.685486.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.89,0.34,71.04,71.04,FHRR diff --git a/GraphHD_v2/basic/result1699762615.255841.csv b/GraphHD_v2/basic/result1699762615.255841.csv new file mode 100644 index 00000000..672476e3 --- /dev/null +++ b/GraphHD_v2/basic/result1699762615.255841.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.58,0.27,74.51,74.51,FHRR diff --git a/GraphHD_v2/basic/result1699764795.516787.csv b/GraphHD_v2/basic/result1699764795.516787.csv new file mode 100644 index 00000000..8bdecd03 --- /dev/null +++ b/GraphHD_v2/basic/result1699764795.516787.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.7,0.27,72.62,72.62,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR_MD,10000,1.01,0.43,60.0,60.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2,10000,0.61,0.3,61.7,61.7,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2_MD,10000,1.37,0.62,53.41,53.41,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR,10000,1.02,0.49,67.4,67.4,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR_MD,10000,1.52,0.68,61.02,61.02,FHRR diff --git a/GraphHD_v2/basic/result1699764944.035145.csv b/GraphHD_v2/basic/result1699764944.035145.csv new file mode 100644 index 00000000..71ad7bd3 --- /dev/null +++ b/GraphHD_v2/basic/result1699764944.035145.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.52,0.28,73.2,73.2,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR_MD,10000,0.98,0.45,61.96,61.96,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2,10000,0.64,0.31,62.73,62.73,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2_MD,10000,1.41,0.62,49.51,49.51,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR,10000,1.08,0.52,67.25,67.25,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR_MD,10000,1.55,0.71,59.24,59.24,FHRR diff --git a/GraphHD_v2/basic/result1699911664.8755379.csv b/GraphHD_v2/basic/result1699911664.8755379.csv new file mode 100644 index 00000000..5d82dfc2 --- /dev/null +++ b/GraphHD_v2/basic/result1699911664.8755379.csv @@ -0,0 +1,22 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +AIDS,10000,3.93,1.42,98.67,98.67,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.88,0.48,73.77,73.77,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR_MD,10000,1.43,0.54,66.3,66.3,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2,10000,1.0,0.48,57.45,57.45,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2_MD,10000,1.9,0.77,60.44,60.44,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR,10000,1.66,0.66,72.69,72.69,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR_MD,10000,2.01,0.8,66.95,66.95,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ER_MD,10000,1.9,0.92,72.39,72.39,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +FRANKENSTEIN,10000,4.44,2.18,60.22,60.22,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MCF-7,10000,43.82,20.02,67.75,67.75,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MCF-7H,10000,59.18,27.56,66.79,66.79,FHRR diff --git a/GraphHD_v2/basic_centrality/result1699477760.426856.csv b/GraphHD_v2/basic_centrality/result1699477760.426856.csv new file mode 100644 index 00000000..3b8baed9 --- /dev/null +++ b/GraphHD_v2/basic_centrality/result1699477760.426856.csv @@ -0,0 +1,48 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.51,0.19,62.86,62.86,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.22,0.12,31.58,31.58,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,6.49,3.03,48.18,48.18,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.24,0.72,18.33,18.33,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,2.5,1.29,58.68,58.68,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DD,10000,18.0,7.28,57.34,57.34,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.31,0.14,66.67,66.67,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.16,0.08,78.95,78.95,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,4.96,2.41,64.72,64.72,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.85,0.35,21.67,21.67,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,1.43,0.7,70.96,70.96,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DD,10000,10.59,4.54,75.42,75.42,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.86,0.41,59.05,59.05,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.59,0.25,77.19,77.19,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,20.48,8.64,65.29,65.29,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,4.52,2.01,21.11,21.11,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,8.38,3.77,67.66,67.66,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DD,10000,72.19,28.99,66.67,66.67,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.39,0.19,60.0,60.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.23,0.12,87.72,87.72,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,6.95,3.3,63.18,63.18,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.16,0.64,20.56,20.56,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,3.26,1.11,67.07,67.07,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DD,10000,20.3,8.46,73.73,73.73,FHRR diff --git a/GraphHD_v2/basic_node_attr/result1699911901.148511.csv b/GraphHD_v2/basic_node_attr/result1699911901.148511.csv new file mode 100644 index 00000000..84c1f421 --- /dev/null +++ b/GraphHD_v2/basic_node_attr/result1699911901.148511.csv @@ -0,0 +1,78 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +AIDS,10000,0.03,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.01,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR_MD,10000,0.01,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2_MD,10000,0.02,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR_MD,10000,0.01,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ER_MD,10000,0.01,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +FRANKENSTEIN,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MCF-7,10000,0.01,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MCF-7H,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MOLT-4,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MOLT-4H,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +Mutagenicity,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI109,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI-H23,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI-H23H,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +OVCAR-8,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +OVCAR-8H,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +P388,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +P388H,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PC-3,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PC-3H,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FR,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_MM,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_MR,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +SF-295,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +SF-295H,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +SN12C,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +SN12CH,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +SW-620,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +SW-620H,10000,0.01,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +UACC257,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +UACC257H,10000,0.01,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +Yeast,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +YeastH,10000,0.01,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/basic_node_attr/result1699912072.739661.csv b/GraphHD_v2/basic_node_attr/result1699912072.739661.csv new file mode 100644 index 00000000..b6c96a9c --- /dev/null +++ b/GraphHD_v2/basic_node_attr/result1699912072.739661.csv @@ -0,0 +1,40 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +AIDS,10000,0.02,1.2,78.17,78.17,FHRR +BZR,10000,0.0,0.37,33.61,33.61,FHRR +BZR_MD,10000,0.02,0.77,44.57,44.57,FHRR +COX2,10000,0.01,1.19,67.38,67.38,FHRR +COX2_MD,10000,0.02,1.82,48.35,48.35,FHRR +DHFR,10000,0.0,0.83,54.19,54.19,FHRR +DHFR_MD,10000,0.02,0.98,71.19,71.19,FHRR +ER_MD,10000,0.01,0.85,55.22,55.22,FHRR +FRANKENSTEIN,10000,0.0,2.09,55.76,55.76,FHRR +MCF-7,10000,0.0,20.64,65.06,65.06,FHRR +MCF-7H,10000,0.01,33.06,66.75,66.75,FHRR +MOLT-4,10000,0.0,32.0,66.42,66.42,FHRR +MOLT-4H,10000,0.01,45.56,73.78,73.78,FHRR +Mutagenicity,10000,0.0,3.34,54.69,54.69,FHRR +MUTAG,10000,0.0,0.12,42.11,42.11,FHRR +NCI1,10000,0.0,3.18,50.04,50.04,FHRR +NCI109,10000,0.0,3.19,53.19,53.19,FHRR +NCI-H23,10000,0.01,28.07,74.57,74.57,FHRR +NCI-H23H,10000,0.0,48.16,64.5,64.5,FHRR +OVCAR-8,10000,0.0,34.09,82.81,82.81,FHRR +OVCAR-8H,10000,0.0,48.88,73.38,73.38,FHRR +P388,10000,0.0,32.07,58.37,58.37,FHRR +P388H,10000,0.01,50.94,70.56,70.56,FHRR +PC-3,10000,0.0,23.33,70.59,70.59,FHRR +PC-3H,10000,0.0,32.17,78.23,78.23,FHRR +PTC_FM,10000,0.0,0.19,48.57,48.57,FHRR +PTC_FR,10000,0.0,0.19,39.62,39.62,FHRR +PTC_MM,10000,0.0,0.18,56.44,56.44,FHRR +PTC_MR,10000,0.0,0.18,51.92,51.92,FHRR +SF-295,10000,0.01,29.5,81.92,81.92,FHRR +SF-295H,10000,0.01,46.45,71.82,71.82,FHRR +SN12C,10000,0.01,32.21,77.37,77.37,FHRR +SN12CH,10000,0.01,52.6,31.39,31.39,FHRR +SW-620,10000,0.0,30.24,74.19,74.19,FHRR +SW-620H,10000,0.0,43.84,54.67,54.67,FHRR +UACC257,10000,0.01,27.66,75.26,75.26,FHRR +UACC257H,10000,0.0,39.69,79.5,79.5,FHRR +Yeast,10000,0.01,52.67,67.16,67.16,FHRR +YeastH,10000,0.0,68.76,70.09,70.09,FHRR diff --git a/GraphHD_v2/experiment_1/result1699228665.124537.csv b/GraphHD_v2/experiment_1/result1699228665.124537.csv new file mode 100644 index 00000000..d7afb034 --- /dev/null +++ b/GraphHD_v2/experiment_1/result1699228665.124537.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FM,10000,0.24,0.11,58.1,58.1 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.14,0.07,77.19,77.19 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,3.31,1.64,60.75,60.75 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.52,0.27,19.44,19.44 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,1.27,0.55,67.96,67.96 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,9.24,3.39,74.58,74.58 diff --git a/GraphHD_v2/experiment_1/result1699228755.18482.csv b/GraphHD_v2/experiment_1/result1699228755.18482.csv new file mode 100644 index 00000000..5ccad83c --- /dev/null +++ b/GraphHD_v2/experiment_1/result1699228755.18482.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FM,10000,0.22,0.12,55.71,55.71 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.12,0.07,84.04,84.04 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,3.24,1.69,63.62,63.62 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.53,0.28,25.83,25.83 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,1.16,0.56,67.34,67.34 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,8.01,3.43,74.6,74.6 diff --git a/GraphHD_v2/experiment_1/result1699292440.771747.csv b/GraphHD_v2/experiment_1/result1699292440.771747.csv new file mode 100644 index 00000000..49642902 --- /dev/null +++ b/GraphHD_v2/experiment_1/result1699292440.771747.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.22,0.12,58.49,58.49 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.13,0.07,77.19,77.19 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,6.78,2.3,63.26,63.26 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.6,0.32,26.67,26.67 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,1.87,1.22,66.77,66.77 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,9.01,4.23,71.47,71.47 diff --git a/GraphHD_v2/experiment_1/result1699292622.428019.csv b/GraphHD_v2/experiment_1/result1699292622.428019.csv new file mode 100644 index 00000000..9c857072 --- /dev/null +++ b/GraphHD_v2/experiment_1/result1699292622.428019.csv @@ -0,0 +1,22 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +OHSU,10000,0.27,0.09,66.67,66.67 +dataset,dimensions,train_time,test_time,accuracy,f1 +KKI,10000,0.09,0.04,48.0,48.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FM,10000,0.32,0.2,49.52,49.52 +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_MR,10000,0.32,0.21,58.65,58.65 +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_MM,10000,0.33,0.15,55.45,55.45 +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.26,0.13,45.28,45.28 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.27,0.1,84.21,84.21 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,3.88,1.82,63.58,63.58 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.54,0.26,26.11,26.11 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,1.23,0.56,65.57,65.57 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,8.37,3.38,73.45,73.45 diff --git a/GraphHD_v2/experiment_1/result1699300835.392452.csv b/GraphHD_v2/experiment_1/result1699300835.392452.csv new file mode 100644 index 00000000..94729943 --- /dev/null +++ b/GraphHD_v2/experiment_1/result1699300835.392452.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.03,0.0,0.0,0.0 diff --git a/GraphHD_v2/experiment_1/result1699300851.129469.csv b/GraphHD_v2/experiment_1/result1699300851.129469.csv new file mode 100644 index 00000000..61ce2312 --- /dev/null +++ b/GraphHD_v2/experiment_1/result1699300851.129469.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.04,0.0,0.0,0.0 diff --git a/GraphHD_v2/experiment_1/result1699315999.176398.csv b/GraphHD_v2/experiment_1/result1699315999.176398.csv new file mode 100644 index 00000000..ec2053ad --- /dev/null +++ b/GraphHD_v2/experiment_1/result1699315999.176398.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FM,10000,0.32,0.17,53.33,53.33 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.16,0.09,85.96,85.96 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,6.66,2.65,61.07,61.07 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.88,0.49,24.44,24.44 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,2.14,1.01,67.66,67.66 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,15.75,7.47,74.29,74.29 diff --git a/GraphHD_v2/experiment_1/result1699316143.838742.csv b/GraphHD_v2/experiment_1/result1699316143.838742.csv new file mode 100644 index 00000000..15b166ba --- /dev/null +++ b/GraphHD_v2/experiment_1/result1699316143.838742.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FM,10000,0.28,0.15,54.29,54.29 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.17,0.08,89.47,89.47 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,5.02,2.57,64.8,64.8 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.86,0.43,25.0,25.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,2.06,0.81,70.66,70.66 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,16.28,6.61,73.73,73.73 diff --git a/GraphHD_v2/experiment_2/result1698883260.2217548.csv b/GraphHD_v2/experiment_2/result1698883260.2217548.csv new file mode 100644 index 00000000..fefc3f24 --- /dev/null +++ b/GraphHD_v2/experiment_2/result1698883260.2217548.csv @@ -0,0 +1 @@ +0,1e-05,0.0001,0.001,0.01,0.05,0.1,0.15,0.2,0.4,0.6,0.8,1 diff --git a/GraphHD_v2/experiment_2/result1698883637.197716.csv b/GraphHD_v2/experiment_2/result1698883637.197716.csv new file mode 100644 index 00000000..5d19490e --- /dev/null +++ b/GraphHD_v2/experiment_2/result1698883637.197716.csv @@ -0,0 +1,8 @@ +0 +82.46 +82.46 +82.23 +RANDOM +77.19 +77.19 +80.18 diff --git a/GraphHD_v2/experiment_2/result1698883664.525732.csv b/GraphHD_v2/experiment_2/result1698883664.525732.csv new file mode 100644 index 00000000..e4cd6194 --- /dev/null +++ b/GraphHD_v2/experiment_2/result1698883664.525732.csv @@ -0,0 +1,8 @@ +0,1e-05,0.0001,0.001,0.01,0.05,0.1,0.15,0.2,0.4,0.6,0.8,1 +82.63,81.4,80.18,81.23,81.23,83.33,80.35,79.82,81.4,83.51,83.68,81.23,81.4 +82.63,81.4,80.18,81.23,81.23,83.33,80.35,79.82,81.4,83.51,83.68,81.23,81.4 +84.38,82.78,84.15,84.0,86.43,86.12,84.57,84.78,85.75,85.28,84.49,81.69,84.16 +RANDOM +83.16 +83.16 +84.12 diff --git a/GraphHD_v2/experiment_2/result1698897181.309966.csv b/GraphHD_v2/experiment_2/result1698897181.309966.csv new file mode 100644 index 00000000..541ffad6 --- /dev/null +++ b/GraphHD_v2/experiment_2/result1698897181.309966.csv @@ -0,0 +1,4 @@ +0,1e-05,0.0001,0.001,0.01,0.05,0.1,0.15,0.2,0.4,0.6,0.8,1 +78.95,77.72,80.35,81.05,77.02,83.86,82.46,82.63,80.18,81.93,81.23,82.81,79.12 +78.95,77.72,80.35,81.05,77.02,83.86,82.46,82.63,80.18,81.93,81.23,82.81,79.12 +83.93,82.37,83.95,83.1,82.75,85.75,87.57,84.41,84.25,84.15,83.17,84.37,81.92 diff --git a/GraphHD_v2/experiment_2/result1698898025.2724328.csv b/GraphHD_v2/experiment_2/result1698898025.2724328.csv new file mode 100644 index 00000000..dee87b59 --- /dev/null +++ b/GraphHD_v2/experiment_2/result1698898025.2724328.csv @@ -0,0 +1,9 @@ +MUTAG,0 +85.96 +85.96 +ENZYMES,0 +25.28 +25.28 +PROTEINS,0 +52.1 +52.1 diff --git a/GraphHD_v2/experiment_2/result1698968981.460049.csv b/GraphHD_v2/experiment_2/result1698968981.460049.csv new file mode 100644 index 00000000..bae7ab31 --- /dev/null +++ b/GraphHD_v2/experiment_2/result1698968981.460049.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +81.4 +81.4 +ENZYMES,RANDOM +36.33 +36.33 +PROTEINS,RANDOM +67.57 +67.57 diff --git a/GraphHD_v2/experiment_2/result1699229619.253332.csv b/GraphHD_v2/experiment_2/result1699229619.253332.csv new file mode 100644 index 00000000..c73ef7e1 --- /dev/null +++ b/GraphHD_v2/experiment_2/result1699229619.253332.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FM,10000,0.54,0.27,54.95,54.95 diff --git a/GraphHD_v2/experiment_2/result1699229637.498841.csv b/GraphHD_v2/experiment_2/result1699229637.498841.csv new file mode 100644 index 00000000..a2d2c429 --- /dev/null +++ b/GraphHD_v2/experiment_2/result1699229637.498841.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FM,10000,0.49,0.24,57.14,57.14 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.36,0.18,87.72,87.72 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,12.49,5.6,62.61,62.61 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,4.91,2.13,40.56,40.56 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,10.66,4.5,64.97,64.97 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,143.36,63.46,72.88,72.88 diff --git a/GraphHD_v2/experiment_2/result1699292652.668042.csv b/GraphHD_v2/experiment_2/result1699292652.668042.csv new file mode 100644 index 00000000..bc3d3ef0 --- /dev/null +++ b/GraphHD_v2/experiment_2/result1699292652.668042.csv @@ -0,0 +1,22 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +OHSU,10000,4.32,1.29,50.0,50.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +KKI,10000,0.62,0.35,24.0,24.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FM,10000,0.52,0.23,60.0,60.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_MR,10000,0.48,0.27,56.73,56.73 +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_MM,10000,0.49,0.22,54.46,54.46 +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.52,0.25,55.66,55.66 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.36,0.18,78.95,78.95 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,14.59,6.46,65.04,65.04 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,5.74,2.41,30.0,30.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,13.01,5.11,68.26,68.26 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,152.5,82.12,70.34,70.34 diff --git a/GraphHD_v2/experiment_2/result1699316391.372292.csv b/GraphHD_v2/experiment_2/result1699316391.372292.csv new file mode 100644 index 00000000..8fef92b1 --- /dev/null +++ b/GraphHD_v2/experiment_2/result1699316391.372292.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.79,0.41,54.72,54.72 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.56,0.24,84.21,84.21 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,21.66,8.96,64.56,64.56 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,8.04,3.4,33.89,33.89 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,17.2,7.39,67.37,67.37 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,263.15,105.25,75.99,75.99 diff --git a/GraphHD_v2/experiment_3/result1698898308.445337.csv b/GraphHD_v2/experiment_3/result1698898308.445337.csv new file mode 100644 index 00000000..cbeebce5 --- /dev/null +++ b/GraphHD_v2/experiment_3/result1698898308.445337.csv @@ -0,0 +1,18 @@ +MUTAG,0 +78.07 +78.07 +ENZYMES,0 +21.94 +21.94 +PROTEINS,0 +53.59 +53.59 +MUTAG,RANDOM +86.84 +86.84 +ENZYMES,RANDOM +36.94 +36.94 +PROTEINS,RANDOM +69.01 +69.01 diff --git a/GraphHD_v2/experiment_3/result1698969403.039312.csv b/GraphHD_v2/experiment_3/result1698969403.039312.csv new file mode 100644 index 00000000..7f706dc8 --- /dev/null +++ b/GraphHD_v2/experiment_3/result1698969403.039312.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +82.46 +82.46 +ENZYMES,RANDOM +32.78 +32.78 +PROTEINS,RANDOM +68.56 +68.56 diff --git a/GraphHD_v2/experiment_3/result1698970515.460466.csv b/GraphHD_v2/experiment_3/result1698970515.460466.csv new file mode 100644 index 00000000..56aa2587 --- /dev/null +++ b/GraphHD_v2/experiment_3/result1698970515.460466.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +80.35 +80.35 diff --git a/GraphHD_v2/experiment_3/result1698970526.979655.csv b/GraphHD_v2/experiment_3/result1698970526.979655.csv new file mode 100644 index 00000000..90864382 --- /dev/null +++ b/GraphHD_v2/experiment_3/result1698970526.979655.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +56.14 +56.14 diff --git a/GraphHD_v2/experiment_3/result1699232628.314276.csv b/GraphHD_v2/experiment_3/result1699232628.314276.csv new file mode 100644 index 00000000..65ebf7e9 --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699232628.314276.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FM,10000,0.29,0.14,52.38,52.38 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.18,0.09,75.44,75.44 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,6.06,2.92,64.72,64.72 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,1.66,0.73,40.56,40.56 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,3.71,1.65,64.07,64.07 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,46.21,18.92,75.71,75.71 diff --git a/GraphHD_v2/experiment_3/result1699233370.5322418.csv b/GraphHD_v2/experiment_3/result1699233370.5322418.csv new file mode 100644 index 00000000..51f732d2 --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699233370.5322418.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FM,10000,0.37,0.15,57.14,57.14 diff --git a/GraphHD_v2/experiment_3/result1699233476.6957371.csv b/GraphHD_v2/experiment_3/result1699233476.6957371.csv new file mode 100644 index 00000000..3f1799be --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699233476.6957371.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FM,10000,0.33,0.15,48.57,48.57 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.2,0.1,78.95,78.95 diff --git a/GraphHD_v2/experiment_3/result1699233788.749739.csv b/GraphHD_v2/experiment_3/result1699233788.749739.csv new file mode 100644 index 00000000..45c53b2e --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699233788.749739.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FM,10000,0.16,0.08,57.14,57.14 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.08,0.04,43.86,43.86 diff --git a/GraphHD_v2/experiment_3/result1699233806.609916.csv b/GraphHD_v2/experiment_3/result1699233806.609916.csv new file mode 100644 index 00000000..0dbf439b --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699233806.609916.csv @@ -0,0 +1,6 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FM,10000,0.14,0.07,55.24,55.24 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.07,0.04,26.32,26.32 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,2.25,1.22,51.26,51.26 diff --git a/GraphHD_v2/experiment_3/result1699233952.9048162.csv b/GraphHD_v2/experiment_3/result1699233952.9048162.csv new file mode 100644 index 00000000..924a8721 --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699233952.9048162.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FM,10000,0.16,0.08,66.67,66.67 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.07,0.04,89.47,89.47 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,2.48,1.3,62.85,62.85 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.58,0.26,40.0,40.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,1.2,0.64,66.47,66.47 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,11.23,5.15,72.32,72.32 diff --git a/GraphHD_v2/experiment_3/result1699299334.416488.csv b/GraphHD_v2/experiment_3/result1699299334.416488.csv new file mode 100644 index 00000000..db422d6d --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699299334.416488.csv @@ -0,0 +1,22 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +OHSU,10000,0.29,0.14,33.33,33.33 +dataset,dimensions,train_time,test_time,accuracy,f1 +KKI,10000,0.08,0.03,48.0,48.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FM,10000,0.14,0.08,53.33,53.33 +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_MR,10000,0.15,0.08,50.0,50.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_MM,10000,0.2,0.09,54.46,54.46 +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.13,0.08,49.06,49.06 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.08,0.06,80.7,80.7 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,3.01,1.41,63.99,63.99 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.69,0.31,40.56,40.56 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,1.27,0.58,69.46,69.46 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,9.51,4.42,70.9,70.9 diff --git a/GraphHD_v2/experiment_3/result1699299439.4174678.csv b/GraphHD_v2/experiment_3/result1699299439.4174678.csv new file mode 100644 index 00000000..cf65dc32 --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699299439.4174678.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +OHSU,10000,0.42,0.24,41.67,41.67 +dataset,dimensions,train_time,test_time,accuracy,f1 +KKI,10000,0.13,0.08,48.0,48.0 diff --git a/GraphHD_v2/experiment_3/result1699299449.5678332.csv b/GraphHD_v2/experiment_3/result1699299449.5678332.csv new file mode 100644 index 00000000..15c5827c --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699299449.5678332.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +OHSU,10000,0.28,0.13,54.17,54.17 +dataset,dimensions,train_time,test_time,accuracy,f1 +KKI,10000,0.07,0.03,44.0,44.0 diff --git a/GraphHD_v2/experiment_3/result1699299708.5326378.csv b/GraphHD_v2/experiment_3/result1699299708.5326378.csv new file mode 100644 index 00000000..ce04f0ca --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699299708.5326378.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +OHSU,10000,0.29,0.1,50.0,50.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +KKI,10000,0.06,0.03,48.0,48.0 diff --git a/GraphHD_v2/experiment_3/result1699299923.6250482.csv b/GraphHD_v2/experiment_3/result1699299923.6250482.csv new file mode 100644 index 00000000..e3f7009d --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699299923.6250482.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +OHSU,10000,0.14,0.06,62.5,62.5 +dataset,dimensions,train_time,test_time,accuracy,f1 +KKI,10000,0.03,0.02,48.0,48.0 diff --git a/GraphHD_v2/experiment_3/result1699299960.450033.csv b/GraphHD_v2/experiment_3/result1699299960.450033.csv new file mode 100644 index 00000000..3421cccf --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699299960.450033.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.1,0.06,56.6,56.6 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.04,0.03,84.21,84.21 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,1.79,1.66,62.94,62.94 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,1.02,0.71,20.0,20.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,0.81,0.68,61.38,61.38 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,3.19,1.36,74.58,74.58 diff --git a/GraphHD_v2/experiment_3/result1699300082.983465.csv b/GraphHD_v2/experiment_3/result1699300082.983465.csv new file mode 100644 index 00000000..8b6f4631 --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699300082.983465.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.69,0.3,57.55,57.55 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.45,0.21,80.7,80.7 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,16.49,7.04,63.83,63.83 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,6.72,2.84,27.78,27.78 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,13.9,6.75,61.98,61.98 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,241.91,103.68,68.08,68.08 diff --git a/GraphHD_v2/experiment_3/result1699316987.076629.csv b/GraphHD_v2/experiment_3/result1699316987.076629.csv new file mode 100644 index 00000000..65b01aa8 --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699316987.076629.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.2,0.25,61.32,61.32 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.12,0.07,77.19,77.19 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,3.12,1.75,62.85,62.85 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.57,0.3,21.11,21.11 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,1.21,0.51,63.47,63.47 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,9.27,3.11,72.6,72.6 diff --git a/GraphHD_v2/experiment_3/result1699317053.650334.csv b/GraphHD_v2/experiment_3/result1699317053.650334.csv new file mode 100644 index 00000000..3e2a9193 --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699317053.650334.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.13,0.08,54.72,54.72 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.08,0.05,85.96,85.96 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,2.89,1.46,60.58,60.58 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.46,0.23,25.0,25.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,1.03,0.65,67.66,67.66 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,7.62,2.48,74.01,74.01 diff --git a/GraphHD_v2/experiment_3/result1699317240.8697112.csv b/GraphHD_v2/experiment_3/result1699317240.8697112.csv new file mode 100644 index 00000000..7aaa2568 --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699317240.8697112.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.15,0.09,56.6,56.6 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.08,0.05,80.7,80.7 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,2.68,1.5,64.64,64.64 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.65,0.25,23.33,23.33 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,1.11,0.48,60.48,60.48 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,7.28,2.98,71.47,71.47 diff --git a/GraphHD_v2/experiment_3/result1699317273.874395.csv b/GraphHD_v2/experiment_3/result1699317273.874395.csv new file mode 100644 index 00000000..05ec0dca --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699317273.874395.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.08,0.06,43.4,43.4 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.04,0.03,80.7,80.7 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,1.21,0.83,61.48,61.48 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.21,0.13,25.56,25.56 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,0.58,0.3,60.48,60.48 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,3.14,1.26,71.19,71.19 diff --git a/GraphHD_v2/experiment_3/result1699317297.835927.csv b/GraphHD_v2/experiment_3/result1699317297.835927.csv new file mode 100644 index 00000000..d71dd1c6 --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699317297.835927.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.39,0.18,52.83,52.83 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.23,0.11,89.47,89.47 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,8.21,3.71,64.31,64.31 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,1.32,0.55,19.44,19.44 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,2.76,1.35,65.27,65.27 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,19.59,8.55,72.32,72.32 diff --git a/GraphHD_v2/experiment_3/result1699317483.245534.csv b/GraphHD_v2/experiment_3/result1699317483.245534.csv new file mode 100644 index 00000000..ba34eeb1 --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699317483.245534.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.38,0.19,46.23,46.23 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.23,0.11,91.23,91.23 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,10.44,3.74,64.72,64.72 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,1.37,0.61,21.11,21.11 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,2.74,1.49,64.07,64.07 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,18.49,8.59,71.75,71.75 diff --git a/GraphHD_v2/experiment_3/result1699317769.851528.csv b/GraphHD_v2/experiment_3/result1699317769.851528.csv new file mode 100644 index 00000000..fb7d7889 --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699317769.851528.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.83,0.46,54.72,54.72 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.54,0.28,82.46,82.46 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,16.78,7.44,66.34,66.34 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,3.5,1.61,26.11,26.11 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,7.89,4.49,69.76,69.76 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,75.97,25.21,72.88,72.88 diff --git a/GraphHD_v2/experiment_3/result1699318023.689996.csv b/GraphHD_v2/experiment_3/result1699318023.689996.csv new file mode 100644 index 00000000..57aadca9 --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699318023.689996.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.74,0.35,50.94,50.94 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.48,0.22,78.95,78.95 diff --git a/GraphHD_v2/experiment_3/result1699333316.7917252.csv b/GraphHD_v2/experiment_3/result1699333316.7917252.csv new file mode 100644 index 00000000..2d71d8c6 --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699333316.7917252.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,3.45,0.56,63.21,63.21 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.61,0.25,85.96,85.96 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,18.33,9.46,63.1,63.1 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,4.72,2.46,35.0,35.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,7.73,3.51,70.36,70.36 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,69.63,30.47,75.71,75.71 diff --git a/GraphHD_v2/experiment_3/result1699333839.826659.csv b/GraphHD_v2/experiment_3/result1699333839.826659.csv new file mode 100644 index 00000000..39ebb46d --- /dev/null +++ b/GraphHD_v2/experiment_3/result1699333839.826659.csv @@ -0,0 +1,48 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.31,0.16,45.71,45.71,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.21,0.1,36.84,36.84,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,8.5,2.44,48.58,48.58,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.94,0.41,15.56,15.56,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,1.92,0.89,40.12,40.12,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DD,10000,16.5,6.58,60.73,60.73,BSC +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.13,0.08,50.48,50.48,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.08,0.05,82.46,82.46,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,2.45,1.37,62.94,62.94,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.8,0.33,36.67,36.67,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,1.48,0.63,66.17,66.17,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DD,10000,10.29,5.16,75.14,75.14,MAP +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.73,0.34,55.24,55.24,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.49,0.22,78.95,78.95,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,17.42,7.93,63.1,63.1,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,3.71,2.36,40.0,40.0,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,9.86,3.51,68.26,68.26,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DD,10000,74.12,33.86,70.06,70.06,HRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.19,0.11,51.43,51.43,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.14,0.07,89.47,89.47,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,5.18,3.24,63.58,63.58,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.14,0.57,37.22,37.22,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,2.38,1.22,64.97,64.97,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DD,10000,22.44,9.21,72.32,72.32,FHRR diff --git a/GraphHD_v2/experiment_4/result1698970550.9232142.csv b/GraphHD_v2/experiment_4/result1698970550.9232142.csv new file mode 100644 index 00000000..52e7cf89 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698970550.9232142.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +63.16 +63.16 +ENZYMES,RANDOM +20.0 +20.0 +PROTEINS,RANDOM +56.89 +56.89 diff --git a/GraphHD_v2/experiment_4/result1698970664.429025.csv b/GraphHD_v2/experiment_4/result1698970664.429025.csv new file mode 100644 index 00000000..6afc8f55 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698970664.429025.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +66.67 +66.67 +ENZYMES,RANDOM +19.44 +19.44 +PROTEINS,RANDOM +46.71 +46.71 diff --git a/GraphHD_v2/experiment_4/result1698970759.67253.csv b/GraphHD_v2/experiment_4/result1698970759.67253.csv new file mode 100644 index 00000000..1c0efb29 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698970759.67253.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +71.93 +71.93 +ENZYMES,RANDOM +25.56 +25.56 +PROTEINS,RANDOM +51.5 +51.5 diff --git a/GraphHD_v2/experiment_4/result1698970870.995784.csv b/GraphHD_v2/experiment_4/result1698970870.995784.csv new file mode 100644 index 00000000..fc12a998 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698970870.995784.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +77.19 +77.19 +ENZYMES,RANDOM +40.0 +40.0 +PROTEINS,RANDOM +67.07 +67.07 diff --git a/GraphHD_v2/experiment_4/result1698970912.2332742.csv b/GraphHD_v2/experiment_4/result1698970912.2332742.csv new file mode 100644 index 00000000..3b3831b6 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698970912.2332742.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +85.96 +85.96 +ENZYMES,RANDOM +38.33 +38.33 +PROTEINS,RANDOM +64.67 +64.67 diff --git a/GraphHD_v2/experiment_4/result1698970996.438001.csv b/GraphHD_v2/experiment_4/result1698970996.438001.csv new file mode 100644 index 00000000..16d6b40e --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698970996.438001.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +85.96 +85.96 +ENZYMES,RANDOM +33.33 +33.33 +PROTEINS,RANDOM +64.37 +64.37 diff --git a/GraphHD_v2/experiment_4/result1698971015.69031.csv b/GraphHD_v2/experiment_4/result1698971015.69031.csv new file mode 100644 index 00000000..6a2d6496 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698971015.69031.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +84.21 +84.21 +ENZYMES,RANDOM +33.33 +33.33 +PROTEINS,RANDOM +68.26 +68.26 diff --git a/GraphHD_v2/experiment_4/result1698971061.738545.csv b/GraphHD_v2/experiment_4/result1698971061.738545.csv new file mode 100644 index 00000000..503540d9 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698971061.738545.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +77.19 +77.19 +ENZYMES,RANDOM +33.33 +33.33 +PROTEINS,RANDOM +68.86 +68.86 diff --git a/GraphHD_v2/experiment_4/result1698971086.0078552.csv b/GraphHD_v2/experiment_4/result1698971086.0078552.csv new file mode 100644 index 00000000..8a6466be --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698971086.0078552.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +84.21 +84.21 +ENZYMES,RANDOM +35.0 +35.0 +PROTEINS,RANDOM +68.26 +68.26 diff --git a/GraphHD_v2/experiment_4/result1698971201.457181.csv b/GraphHD_v2/experiment_4/result1698971201.457181.csv new file mode 100644 index 00000000..c50874d7 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698971201.457181.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +80.7 +80.7 +ENZYMES,RANDOM +35.56 +35.56 +PROTEINS,RANDOM +68.26 +68.26 diff --git a/GraphHD_v2/experiment_4/result1698971270.866296.csv b/GraphHD_v2/experiment_4/result1698971270.866296.csv new file mode 100644 index 00000000..d290ae7c --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698971270.866296.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +80.7 +80.7 +ENZYMES,RANDOM +32.78 +32.78 +PROTEINS,RANDOM +43.71 +43.71 diff --git a/GraphHD_v2/experiment_4/result1698971322.394444.csv b/GraphHD_v2/experiment_4/result1698971322.394444.csv new file mode 100644 index 00000000..af028e76 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698971322.394444.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +73.68 +73.68 +ENZYMES,RANDOM +35.56 +35.56 +PROTEINS,RANDOM +70.36 +70.36 diff --git a/GraphHD_v2/experiment_4/result1698971430.1781728.csv b/GraphHD_v2/experiment_4/result1698971430.1781728.csv new file mode 100644 index 00000000..b5f2fd81 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698971430.1781728.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +80.7 +80.7 +ENZYMES,RANDOM +18.89 +18.89 +PROTEINS,RANDOM +44.31 +44.31 diff --git a/GraphHD_v2/experiment_4/result1698971484.1329238.csv b/GraphHD_v2/experiment_4/result1698971484.1329238.csv new file mode 100644 index 00000000..23da3778 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698971484.1329238.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +75.44 +75.44 +ENZYMES,RANDOM +27.78 +27.78 +PROTEINS,RANDOM +55.39 +55.39 diff --git a/GraphHD_v2/experiment_4/result1698971562.196776.csv b/GraphHD_v2/experiment_4/result1698971562.196776.csv new file mode 100644 index 00000000..fe49016b --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698971562.196776.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +80.7 +80.7 +ENZYMES,RANDOM +36.11 +36.11 +PROTEINS,RANDOM +67.96 +67.96 diff --git a/GraphHD_v2/experiment_4/result1698971664.957193.csv b/GraphHD_v2/experiment_4/result1698971664.957193.csv new file mode 100644 index 00000000..e17de826 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698971664.957193.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +75.44 +75.44 +ENZYMES,RANDOM +30.0 +30.0 +PROTEINS,RANDOM +58.08 +58.08 diff --git a/GraphHD_v2/experiment_4/result1698971708.372192.csv b/GraphHD_v2/experiment_4/result1698971708.372192.csv new file mode 100644 index 00000000..f8962e82 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698971708.372192.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +85.96 +85.96 +ENZYMES,RANDOM +29.44 +29.44 +PROTEINS,RANDOM +60.18 +60.18 diff --git a/GraphHD_v2/experiment_4/result1698972104.965437.csv b/GraphHD_v2/experiment_4/result1698972104.965437.csv new file mode 100644 index 00000000..7f818321 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698972104.965437.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +75.44 +75.44 +ENZYMES,RANDOM +31.67 +31.67 +PROTEINS,RANDOM +58.18 +58.18 diff --git a/GraphHD_v2/experiment_4/result1698972251.580739.csv b/GraphHD_v2/experiment_4/result1698972251.580739.csv new file mode 100644 index 00000000..4ba43661 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698972251.580739.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +78.36 +78.36 +ENZYMES,RANDOM +37.59 +37.59 +PROTEINS,RANDOM +66.67 +66.67 diff --git a/GraphHD_v2/experiment_4/result1698972837.796536.csv b/GraphHD_v2/experiment_4/result1698972837.796536.csv new file mode 100644 index 00000000..376cee6f --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698972837.796536.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +74.27 +74.27 +ENZYMES,RANDOM +35.0 +35.0 +PROTEINS,RANDOM +59.98 +59.98 diff --git a/GraphHD_v2/experiment_4/result1698972877.558339.csv b/GraphHD_v2/experiment_4/result1698972877.558339.csv new file mode 100644 index 00000000..4a708453 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698972877.558339.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +69.01 +69.01 +ENZYMES,RANDOM +31.67 +31.67 +PROTEINS,RANDOM +58.38 +58.38 diff --git a/GraphHD_v2/experiment_4/result1698973003.850564.csv b/GraphHD_v2/experiment_4/result1698973003.850564.csv new file mode 100644 index 00000000..b23d1a47 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1698973003.850564.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +79.53 +79.53 +ENZYMES,RANDOM +37.22 +37.22 +PROTEINS,RANDOM +68.36 +68.36 diff --git a/GraphHD_v2/experiment_4/result1699049344.535028.csv b/GraphHD_v2/experiment_4/result1699049344.535028.csv new file mode 100644 index 00000000..d0690c36 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699049344.535028.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +82.46 +82.46 +ENZYMES,RANDOM +38.89 +38.89 +PROTEINS,RANDOM +67.27 +67.27 diff --git a/GraphHD_v2/experiment_4/result1699049489.260779.csv b/GraphHD_v2/experiment_4/result1699049489.260779.csv new file mode 100644 index 00000000..1ac9c051 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699049489.260779.csv @@ -0,0 +1,15 @@ +MUTAG,RANDOM +77.19 +77.19 +ENZYMES,RANDOM +43.89 +43.89 +PROTEINS,RANDOM +65.57 +65.57 +IMDB-BINARY,RANDOM +53.33 +53.33 +REDDIT-BINARY,RANDOM +66.17 +66.17 diff --git a/GraphHD_v2/experiment_4/result1699050105.484075.csv b/GraphHD_v2/experiment_4/result1699050105.484075.csv new file mode 100644 index 00000000..1ef34471 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699050105.484075.csv @@ -0,0 +1,12 @@ +MUTAG,RANDOM +85.96 +85.96 +ENZYMES,RANDOM +38.33 +38.33 +PROTEINS,RANDOM +67.66 +67.66 +IMDB-BINARY,RANDOM +51.67 +51.67 diff --git a/GraphHD_v2/experiment_4/result1699050877.022481.csv b/GraphHD_v2/experiment_4/result1699050877.022481.csv new file mode 100644 index 00000000..26f8aa65 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699050877.022481.csv @@ -0,0 +1,15 @@ +MUTAG,RANDOM +82.46 +82.46 +ENZYMES,RANDOM +22.22 +22.22 +PROTEINS,RANDOM +64.97 +64.97 +IMDB-BINARY,RANDOM +65.67 +65.67 +REDDIT-BINARY,RANDOM +70.5 +70.5 diff --git a/GraphHD_v2/experiment_4/result1699051104.860107.csv b/GraphHD_v2/experiment_4/result1699051104.860107.csv new file mode 100644 index 00000000..b84449c7 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699051104.860107.csv @@ -0,0 +1,6 @@ +MUTAG,RANDOM +80.7 +80.7 +ENZYMES,RANDOM +23.33 +23.33 diff --git a/GraphHD_v2/experiment_4/result1699051121.839085.csv b/GraphHD_v2/experiment_4/result1699051121.839085.csv new file mode 100644 index 00000000..ddd8f4fa --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699051121.839085.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +84.21 +84.21 +ENZYMES,RANDOM +18.89 +18.89 +PROTEINS,RANDOM +64.67 +64.67 diff --git a/GraphHD_v2/experiment_4/result1699051628.804132.csv b/GraphHD_v2/experiment_4/result1699051628.804132.csv new file mode 100644 index 00000000..5288380b --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699051628.804132.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +87.72 +87.72 +ENZYMES,RANDOM +41.11 +41.11 +PROTEINS,RANDOM +72.46 +72.46 diff --git a/GraphHD_v2/experiment_4/result1699051698.685077.csv b/GraphHD_v2/experiment_4/result1699051698.685077.csv new file mode 100644 index 00000000..fc5bdb31 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699051698.685077.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +82.46 +82.46 +ENZYMES,RANDOM +40.56 +40.56 +PROTEINS,RANDOM +70.66 +70.66 diff --git a/GraphHD_v2/experiment_4/result1699051765.801305.csv b/GraphHD_v2/experiment_4/result1699051765.801305.csv new file mode 100644 index 00000000..7a755be2 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699051765.801305.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +89.47 +89.47 +ENZYMES,RANDOM +21.11 +21.11 +PROTEINS,RANDOM +61.68 +61.68 diff --git a/GraphHD_v2/experiment_4/result1699051811.354264.csv b/GraphHD_v2/experiment_4/result1699051811.354264.csv new file mode 100644 index 00000000..d8b87d07 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699051811.354264.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +84.21 +84.21 +ENZYMES,RANDOM +45.56 +45.56 +PROTEINS,RANDOM +68.56 +68.56 diff --git a/GraphHD_v2/experiment_4/result1699051821.0758982.csv b/GraphHD_v2/experiment_4/result1699051821.0758982.csv new file mode 100644 index 00000000..bc2b0ef9 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699051821.0758982.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +77.19 +77.19 +ENZYMES,RANDOM +36.67 +36.67 +PROTEINS,RANDOM +67.66 +67.66 diff --git a/GraphHD_v2/experiment_4/result1699052996.3486888.csv b/GraphHD_v2/experiment_4/result1699052996.3486888.csv new file mode 100644 index 00000000..a45c4d38 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699052996.3486888.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +26.32 +26.32 diff --git a/GraphHD_v2/experiment_4/result1699053413.236605.csv b/GraphHD_v2/experiment_4/result1699053413.236605.csv new file mode 100644 index 00000000..59f744fd --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053413.236605.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +29.82 +29.82 diff --git a/GraphHD_v2/experiment_4/result1699053426.0987232.csv b/GraphHD_v2/experiment_4/result1699053426.0987232.csv new file mode 100644 index 00000000..59f744fd --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053426.0987232.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +29.82 +29.82 diff --git a/GraphHD_v2/experiment_4/result1699053457.22961.csv b/GraphHD_v2/experiment_4/result1699053457.22961.csv new file mode 100644 index 00000000..a9129bac --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053457.22961.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +40.35 +40.35 diff --git a/GraphHD_v2/experiment_4/result1699053482.144466.csv b/GraphHD_v2/experiment_4/result1699053482.144466.csv new file mode 100644 index 00000000..590cc40c --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053482.144466.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +87.72 +87.72 diff --git a/GraphHD_v2/experiment_4/result1699053497.3144488.csv b/GraphHD_v2/experiment_4/result1699053497.3144488.csv new file mode 100644 index 00000000..68781b9f --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053497.3144488.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +82.46 +82.46 diff --git a/GraphHD_v2/experiment_4/result1699053509.430704.csv b/GraphHD_v2/experiment_4/result1699053509.430704.csv new file mode 100644 index 00000000..e6ea3a42 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053509.430704.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +77.19 +77.19 diff --git a/GraphHD_v2/experiment_4/result1699053520.1596742.csv b/GraphHD_v2/experiment_4/result1699053520.1596742.csv new file mode 100644 index 00000000..28bffc76 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053520.1596742.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +78.95 +78.95 diff --git a/GraphHD_v2/experiment_4/result1699053536.614421.csv b/GraphHD_v2/experiment_4/result1699053536.614421.csv new file mode 100644 index 00000000..28bffc76 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053536.614421.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +78.95 +78.95 diff --git a/GraphHD_v2/experiment_4/result1699053559.8590772.csv b/GraphHD_v2/experiment_4/result1699053559.8590772.csv new file mode 100644 index 00000000..68781b9f --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053559.8590772.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +82.46 +82.46 diff --git a/GraphHD_v2/experiment_4/result1699053574.497643.csv b/GraphHD_v2/experiment_4/result1699053574.497643.csv new file mode 100644 index 00000000..68781b9f --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053574.497643.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +82.46 +82.46 diff --git a/GraphHD_v2/experiment_4/result1699053586.1175401.csv b/GraphHD_v2/experiment_4/result1699053586.1175401.csv new file mode 100644 index 00000000..c367f379 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053586.1175401.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +89.47 +89.47 diff --git a/GraphHD_v2/experiment_4/result1699053594.4083378.csv b/GraphHD_v2/experiment_4/result1699053594.4083378.csv new file mode 100644 index 00000000..f7503955 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053594.4083378.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +84.21 +84.21 diff --git a/GraphHD_v2/experiment_4/result1699053614.0146172.csv b/GraphHD_v2/experiment_4/result1699053614.0146172.csv new file mode 100644 index 00000000..245e2ef1 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053614.0146172.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +86.84 +86.84 +ENZYMES,RANDOM +21.11 +21.11 +PROTEINS,RANDOM +63.17 +63.17 diff --git a/GraphHD_v2/experiment_4/result1699053645.677804.csv b/GraphHD_v2/experiment_4/result1699053645.677804.csv new file mode 100644 index 00000000..e81e2f9d --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053645.677804.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +82.46 +82.46 +ENZYMES,RANDOM +25.0 +25.0 +PROTEINS,RANDOM +62.87 +62.87 diff --git a/GraphHD_v2/experiment_4/result1699053680.642495.csv b/GraphHD_v2/experiment_4/result1699053680.642495.csv new file mode 100644 index 00000000..2886dce0 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053680.642495.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +89.47 +89.47 +ENZYMES,RANDOM +20.83 +20.83 +PROTEINS,RANDOM +62.72 +62.72 diff --git a/GraphHD_v2/experiment_4/result1699053718.043066.csv b/GraphHD_v2/experiment_4/result1699053718.043066.csv new file mode 100644 index 00000000..8ac56d45 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053718.043066.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +87.72 +87.72 +ENZYMES,RANDOM +25.56 +25.56 +PROTEINS,RANDOM +62.87 +62.87 diff --git a/GraphHD_v2/experiment_4/result1699053747.525312.csv b/GraphHD_v2/experiment_4/result1699053747.525312.csv new file mode 100644 index 00000000..e162bad9 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053747.525312.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +84.21 +84.21 +ENZYMES,RANDOM +36.11 +36.11 +PROTEINS,RANDOM +68.56 +68.56 diff --git a/GraphHD_v2/experiment_4/result1699053876.614656.csv b/GraphHD_v2/experiment_4/result1699053876.614656.csv new file mode 100644 index 00000000..0b7aa638 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053876.614656.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +75.44 +75.44 diff --git a/GraphHD_v2/experiment_4/result1699053956.251168.csv b/GraphHD_v2/experiment_4/result1699053956.251168.csv new file mode 100644 index 00000000..74c5f071 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053956.251168.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +71.93 +71.93 diff --git a/GraphHD_v2/experiment_4/result1699053981.043681.csv b/GraphHD_v2/experiment_4/result1699053981.043681.csv new file mode 100644 index 00000000..68781b9f --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699053981.043681.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +82.46 +82.46 diff --git a/GraphHD_v2/experiment_4/result1699054030.082251.csv b/GraphHD_v2/experiment_4/result1699054030.082251.csv new file mode 100644 index 00000000..e6ea3a42 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699054030.082251.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +77.19 +77.19 diff --git a/GraphHD_v2/experiment_4/result1699054052.652253.csv b/GraphHD_v2/experiment_4/result1699054052.652253.csv new file mode 100644 index 00000000..bea822f9 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699054052.652253.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +70.18 +70.18 +ENZYMES,RANDOM +38.33 +38.33 +PROTEINS,RANDOM +70.66 +70.66 diff --git a/GraphHD_v2/experiment_4/result1699054157.086186.csv b/GraphHD_v2/experiment_4/result1699054157.086186.csv new file mode 100644 index 00000000..d2d759bb --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699054157.086186.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +78.95 +78.95 +ENZYMES,RANDOM +38.33 +38.33 +PROTEINS,RANDOM +67.37 +67.37 diff --git a/GraphHD_v2/experiment_4/result1699054966.744521.csv b/GraphHD_v2/experiment_4/result1699054966.744521.csv new file mode 100644 index 00000000..999bbf31 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699054966.744521.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +80.7 +80.7 +ENZYMES,RANDOM +44.44 +44.44 +PROTEINS,RANDOM +66.17 +66.17 diff --git a/GraphHD_v2/experiment_4/result1699055001.8518882.csv b/GraphHD_v2/experiment_4/result1699055001.8518882.csv new file mode 100644 index 00000000..c7906ebb --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699055001.8518882.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +82.46 +82.46 +ENZYMES,RANDOM +40.56 +40.56 +PROTEINS,RANDOM +68.86 +68.86 diff --git a/GraphHD_v2/experiment_4/result1699055108.192469.csv b/GraphHD_v2/experiment_4/result1699055108.192469.csv new file mode 100644 index 00000000..1c183fd8 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699055108.192469.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +77.19 +77.19 +ENZYMES,RANDOM +41.11 +41.11 +PROTEINS,RANDOM +61.08 +61.08 diff --git a/GraphHD_v2/experiment_4/result1699055393.6530318.csv b/GraphHD_v2/experiment_4/result1699055393.6530318.csv new file mode 100644 index 00000000..4f42171f --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699055393.6530318.csv @@ -0,0 +1,9 @@ +MUTAG,RANDOM +73.68 +73.68 +ENZYMES,RANDOM +40.0 +40.0 +PROTEINS,RANDOM +63.77 +63.77 diff --git a/GraphHD_v2/experiment_4/result1699230458.020399.csv b/GraphHD_v2/experiment_4/result1699230458.020399.csv new file mode 100644 index 00000000..f68973e3 --- /dev/null +++ b/GraphHD_v2/experiment_4/result1699230458.020399.csv @@ -0,0 +1,3 @@ +MUTAG,RANDOM +80.7 +80.7 diff --git a/GraphHD_v2/experiment_aux/result1699301071.7556171.csv b/GraphHD_v2/experiment_aux/result1699301071.7556171.csv new file mode 100644 index 00000000..98fc655f --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699301071.7556171.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.11,0.06,62.26,62.26 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.04,0.03,36.84,36.84 diff --git a/GraphHD_v2/experiment_aux/result1699301101.0858922.csv b/GraphHD_v2/experiment_aux/result1699301101.0858922.csv new file mode 100644 index 00000000..3cc68d77 --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699301101.0858922.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.03,0.0,0.0,0.0 diff --git a/GraphHD_v2/experiment_aux/result1699301112.38654.csv b/GraphHD_v2/experiment_aux/result1699301112.38654.csv new file mode 100644 index 00000000..bf284aa3 --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699301112.38654.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.45,0.29,17.22,17.22 diff --git a/GraphHD_v2/experiment_aux/result1699301328.213244.csv b/GraphHD_v2/experiment_aux/result1699301328.213244.csv new file mode 100644 index 00000000..e6b18f33 --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699301328.213244.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,1.24,0.58,66.77,66.77 diff --git a/GraphHD_v2/experiment_aux/result1699302070.288069.csv b/GraphHD_v2/experiment_aux/result1699302070.288069.csv new file mode 100644 index 00000000..9c0c7d99 --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699302070.288069.csv @@ -0,0 +1,6 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.17,0.08,55.66,55.66 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.09,0.05,84.21,84.21 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,2.46,1.37,62.45,62.45 diff --git a/GraphHD_v2/experiment_aux/result1699302095.2334561.csv b/GraphHD_v2/experiment_aux/result1699302095.2334561.csv new file mode 100644 index 00000000..d33defbc --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699302095.2334561.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.15,0.07,64.15,64.15 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.14,0.06,80.7,80.7 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,2.25,1.28,65.04,65.04 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.56,0.26,42.78,42.78 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,1.36,0.55,65.27,65.27 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,9.81,5.03,75.71,75.71 diff --git a/GraphHD_v2/experiment_aux/result1699302216.558374.csv b/GraphHD_v2/experiment_aux/result1699302216.558374.csv new file mode 100644 index 00000000..b54aff22 --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699302216.558374.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.16,0.08,45.28,45.28 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.07,0.04,80.7,80.7 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,2.38,1.73,65.29,65.29 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.69,0.31,27.78,27.78 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,1.28,0.6,64.07,64.07 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,10.02,3.83,74.29,74.29 diff --git a/GraphHD_v2/experiment_aux/result1699302618.3281658.csv b/GraphHD_v2/experiment_aux/result1699302618.3281658.csv new file mode 100644 index 00000000..0ecb51b7 --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699302618.3281658.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.41,0.19,54.72,54.72 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.19,0.07,91.23,91.23 diff --git a/GraphHD_v2/experiment_aux/result1699305067.8188841.csv b/GraphHD_v2/experiment_aux/result1699305067.8188841.csv new file mode 100644 index 00000000..82a634ff --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699305067.8188841.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.03,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.01,0.0,0.0,0.0 diff --git a/GraphHD_v2/experiment_aux/result1699305095.925934.csv b/GraphHD_v2/experiment_aux/result1699305095.925934.csv new file mode 100644 index 00000000..55a0f344 --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699305095.925934.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.05,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.0,0.0,0.0,0.0 diff --git a/GraphHD_v2/experiment_aux/result1699305247.2475982.csv b/GraphHD_v2/experiment_aux/result1699305247.2475982.csv new file mode 100644 index 00000000..82a634ff --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699305247.2475982.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.03,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.01,0.0,0.0,0.0 diff --git a/GraphHD_v2/experiment_aux/result1699305373.85254.csv b/GraphHD_v2/experiment_aux/result1699305373.85254.csv new file mode 100644 index 00000000..71163747 --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699305373.85254.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.04,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.0,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,0.0,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.01,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,0.0,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,0.04,0.0,0.0,0.0 diff --git a/GraphHD_v2/experiment_aux/result1699305499.273552.csv b/GraphHD_v2/experiment_aux/result1699305499.273552.csv new file mode 100644 index 00000000..e3ceddaf --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699305499.273552.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.02,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.0,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,0.0,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.01,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,0.0,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,0.08,0.0,0.0,0.0 diff --git a/GraphHD_v2/experiment_aux/result1699305953.8112419.csv b/GraphHD_v2/experiment_aux/result1699305953.8112419.csv new file mode 100644 index 00000000..17c42c48 --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699305953.8112419.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.31,0.02,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.01,0.12,0.0,0.0 diff --git a/GraphHD_v2/experiment_aux/result1699305989.096194.csv b/GraphHD_v2/experiment_aux/result1699305989.096194.csv new file mode 100644 index 00000000..ff49d8b3 --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699305989.096194.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.47,0.22,58.49,58.49 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.29,0.18,80.7,80.7 diff --git a/GraphHD_v2/experiment_aux/result1699306007.1794822.csv b/GraphHD_v2/experiment_aux/result1699306007.1794822.csv new file mode 100644 index 00000000..57158fec --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699306007.1794822.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.63,0.25,57.55,57.55 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.34,0.18,78.95,78.95 diff --git a/GraphHD_v2/experiment_aux/result1699306052.321137.csv b/GraphHD_v2/experiment_aux/result1699306052.321137.csv new file mode 100644 index 00000000..adfccb04 --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699306052.321137.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.51,0.25,59.43,59.43 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.31,0.16,75.44,75.44 diff --git a/GraphHD_v2/experiment_aux/result1699306074.9835489.csv b/GraphHD_v2/experiment_aux/result1699306074.9835489.csv new file mode 100644 index 00000000..f27496c3 --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699306074.9835489.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,1.16,0.24,50.94,50.94 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.45,0.23,75.44,75.44 diff --git a/GraphHD_v2/experiment_aux/result1699306102.092428.csv b/GraphHD_v2/experiment_aux/result1699306102.092428.csv new file mode 100644 index 00000000..b4195f2c --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699306102.092428.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.69,0.27,44.34,44.34 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.29,0.15,84.21,84.21 diff --git a/GraphHD_v2/experiment_aux/result1699306170.993531.csv b/GraphHD_v2/experiment_aux/result1699306170.993531.csv new file mode 100644 index 00000000..c8b01661 --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699306170.993531.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.46,0.23,57.55,57.55 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.29,0.15,78.95,78.95 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,4.39,2.22,63.67,63.67 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,1.08,0.49,38.33,38.33 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,2.59,1.13,68.26,68.26 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,29.69,13.9,71.47,71.47 diff --git a/GraphHD_v2/experiment_aux/result1699306579.532772.csv b/GraphHD_v2/experiment_aux/result1699306579.532772.csv new file mode 100644 index 00000000..46b19d66 --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699306579.532772.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.06,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.0,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,0.0,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.01,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,0.01,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,0.04,0.0,0.0,0.0 diff --git a/GraphHD_v2/experiment_aux/result1699306767.289386.csv b/GraphHD_v2/experiment_aux/result1699306767.289386.csv new file mode 100644 index 00000000..1ff5dadd --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699306767.289386.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.02,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.0,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,0.0,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.01,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,0.02,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,0.03,0.0,0.0,0.0 diff --git a/GraphHD_v2/experiment_aux/result1699306870.9991539.csv b/GraphHD_v2/experiment_aux/result1699306870.9991539.csv new file mode 100644 index 00000000..bf2776a7 --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699306870.9991539.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.03,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.0,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,0.01,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.01,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,0.01,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,0.02,0.0,0.0,0.0 diff --git a/GraphHD_v2/experiment_aux/result1699306954.019829.csv b/GraphHD_v2/experiment_aux/result1699306954.019829.csv new file mode 100644 index 00000000..845c71ac --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699306954.019829.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.03,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.0,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,0.0,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,0.0,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,0.0,0.0,0.0,0.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,0.03,0.0,0.0,0.0 diff --git a/GraphHD_v2/experiment_aux/result1699306971.498934.csv b/GraphHD_v2/experiment_aux/result1699306971.498934.csv new file mode 100644 index 00000000..b78f863c --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699306971.498934.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.24,0.11,57.55,57.55 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.15,0.08,70.18,70.18 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,4.09,1.94,62.21,62.21 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,1.0,0.45,37.78,37.78 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,2.36,1.04,69.76,69.76 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,27.24,17.82,75.42,75.42 diff --git a/GraphHD_v2/experiment_aux/result1699307139.0938811.csv b/GraphHD_v2/experiment_aux/result1699307139.0938811.csv new file mode 100644 index 00000000..8db458d9 --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699307139.0938811.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.23,0.13,38.68,38.68 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.15,0.11,84.21,84.21 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,4.75,2.23,63.67,63.67 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,1.11,0.54,41.11,41.11 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,7.84,1.53,67.96,67.96 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,38.55,12.72,71.75,71.75 diff --git a/GraphHD_v2/experiment_aux/result1699309075.3794801.csv b/GraphHD_v2/experiment_aux/result1699309075.3794801.csv new file mode 100644 index 00000000..46b680fd --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699309075.3794801.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.24,0.13,38.68,38.68 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.14,0.07,85.96,85.96 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,5.01,2.29,62.37,62.37 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,1.15,0.5,40.0,40.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,2.56,1.03,62.57,62.57 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,33.49,12.56,68.36,68.36 diff --git a/GraphHD_v2/experiment_aux/result1699309352.056192.csv b/GraphHD_v2/experiment_aux/result1699309352.056192.csv new file mode 100644 index 00000000..4cef58c6 --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699309352.056192.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.78,0.36,54.72,54.72 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.44,0.21,70.18,70.18 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,9.92,3.88,63.83,63.83 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,1.78,0.75,35.0,35.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,4.39,1.96,69.76,69.76 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,40.12,16.63,50.85,50.85 diff --git a/GraphHD_v2/experiment_aux/result1699309550.196883.csv b/GraphHD_v2/experiment_aux/result1699309550.196883.csv new file mode 100644 index 00000000..219f806e --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699309550.196883.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,0.93,0.38,48.11,48.11 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.4,0.2,75.44,75.44 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,25.69,10.57,62.29,62.29 +dataset,dimensions,train_time,test_time,accuracy,f1 +ENZYMES,10000,1.61,0.63,35.0,35.0 +dataset,dimensions,train_time,test_time,accuracy,f1 +PROTEINS,10000,3.14,1.51,70.06,70.06 +dataset,dimensions,train_time,test_time,accuracy,f1 +DD,10000,310.85,77.11,74.58,74.58 diff --git a/GraphHD_v2/experiment_aux/result1699318066.963996.csv b/GraphHD_v2/experiment_aux/result1699318066.963996.csv new file mode 100644 index 00000000..b0e34c42 --- /dev/null +++ b/GraphHD_v2/experiment_aux/result1699318066.963996.csv @@ -0,0 +1,6 @@ +dataset,dimensions,train_time,test_time,accuracy,f1 +PTC_FR,10000,1.76,1.11,53.77,53.77 +dataset,dimensions,train_time,test_time,accuracy,f1 +MUTAG,10000,0.97,0.43,75.44,75.44 +dataset,dimensions,train_time,test_time,accuracy,f1 +NCI1,10000,51.76,22.65,64.31,64.31 diff --git a/GraphHD_v2/experiments.txt b/GraphHD_v2/experiments.txt new file mode 100644 index 00000000..f3e73a00 --- /dev/null +++ b/GraphHD_v2/experiments.txt @@ -0,0 +1 @@ +1. Experiment evaluating encoding \ No newline at end of file diff --git a/GraphHD_v2/graphhd.py b/GraphHD_v2/graphhd.py new file mode 100644 index 00000000..cfc10a16 --- /dev/null +++ b/GraphHD_v2/graphhd.py @@ -0,0 +1,162 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid + + +def experiment(randomness=0): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = 10000 # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size): + super(Encoder, self).__init__() + self.out_features = out_features + self.node_ids = embeddings.Level(size, out_features, randomness=0.05) + + def forward(self, x): + pr = pagerank(x) + pr_sort, pr_argsort = pr.sort() + + node_id_hvs = torch.zeros((x.num_nodes, self.out_features), device=device) + node_id_hvs[pr_argsort] = self.node_ids.weight[: x.num_nodes] + + row, col = to_undirected(x.edge_index) + + hvs = torchhd.bind(node_id_hvs[row], node_id_hvs[col]) + return torchhd.multiset(hvs) + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes) + model = model.to(device) + + with torch.no_grad(): + for samples in tqdm(train_ld, desc="Training"): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + model.add(samples_hv, samples.y) + + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + auc = torchmetrics.AUROC("multiclass", num_classes=graphs.num_classes) + + with torch.no_grad(): + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=True) + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + auc.update(outputs.cpu(), samples.y) + + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + au = auc.compute().item() * 100 + print(f"Testing accuracy of {acc:.3f}%") + print(f"Testing f1 of {f:.3f}%") + print(f"Testing AUC of {au:.3f}%") + return acc, f, au + + +REPETITIONS = 10 +RANDOMNESS = [0, 0.00001, 0.0001, 0.001, 0.01, 0.05, 0.1, 0.15, 0.2, 0.4, 0.6, 0.8, 1] + + +acc_final = [] +f1_final = [] +auc_final = [] + +for i in RANDOMNESS: + acc_aux = [] + f1_aux = [] + auc_aux = [] + for j in range(REPETITIONS): + acc, f1, auc = experiment(i) + acc_aux.append(acc) + f1_aux.append(f1) + auc_aux.append(auc) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + auc_final.append(round(sum(auc_aux) / REPETITIONS, 2)) + +print(acc_final) +print(f1_final) +print(auc_final) diff --git a/GraphHD_v2/graphhd_basic.py b/GraphHD_v2/graphhd_basic.py new file mode 100644 index 00000000..be1ad508 --- /dev/null +++ b/GraphHD_v2/graphhd_basic.py @@ -0,0 +1,258 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "basic/result" + str(time.time()) + ".csv" +DIM = 10000 +import networkx as nx +from torch_geometric.utils import to_networkx + + +def experiment(randomness=0, embed="random", dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = DIM # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def degree_centrality(data): + G = to_networkx(data) + + scores = nx.degree_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size): + super(Encoder, self).__init__() + self.out_features = out_features + if embed == "thermometer": + self.node_ids = embeddings.Thermometer(size, out_features, vsa=VSA) + elif embed == "circular": + self.node_ids = embeddings.Circular(size, out_features, vsa=VSA) + elif embed == "projection": + self.node_ids = embeddings.Projection(size, out_features, vsa=VSA) + elif embed == "sinusoid": + self.node_ids = embeddings.Sinusoid(size, out_features, vsa=VSA) + elif embed == "density": + self.node_ids = embeddings.Density(size, out_features, vsa=VSA) + else: + self.node_ids = embeddings.Random(size, out_features, vsa=VSA) + + def forward(self, x): + pr = pagerank(x) + pr_sort, pr_argsort = pr.sort() + + node_id_hvs = torchhd.empty(x.num_nodes, self.out_features, VSA) + node_id_hvs[pr_argsort] = self.node_ids.weight[: x.num_nodes] + + row, col = to_undirected(x.edge_index) + + hvs = torchhd.bind(node_id_hvs[row], node_id_hvs[col]) + return torchhd.multiset(hvs) + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes, VSA) + model = model.to(device) + + train_t = time.time() + with torch.no_grad(): + for samples in tqdm(train_ld, desc="Training"): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + model.add(samples_hv, samples.y) + train_t = time.time() - train_t + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + # f1 = torchmetrics.F1Score(num_classes=graphs.num_classes, average='macro', multiclass=True) + f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + test_t = time.time() + with torch.no_grad(): + if VSA != "BSC": + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=True) + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + test_t = time.time() - test_t + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f, train_t, test_t + + +REPETITIONS = 25 +RANDOMNESS = ["random"] +DATASET = [ + "AIDS", + "BZR", + "BZR_MD", + "COX2", + "COX2_MD", + "DHFR", + "DHFR_MD", + "ER_MD", + "FRANKENSTEIN", + "MCF-7", + "MCF-7H", + "MOLT-4", + "MOLT-4H", + "Mutagenicity", + "MUTAG", + "NCI1", + "NCI109", + "NCI-H23", + "NCI-H23H", + "OVCAR-8", + "OVCAR-8H", + "P388", + "P388H", + "PC-3", + "PC-3H", + "PTC_FM", + "PTC_FR", + "PTC_MM", + "PTC_MR", + "SF-295", + "SF-295H", + "SN12C", + "SN12CH", + "SW-620", + "SW-620H", + "UACC257", + "UACC257H", + "Yeast", + "YeastH", +] +VSAS = ["FHRR"] + + +for VSA in VSAS: + for d in DATASET: + acc_final = [] + f1_final = [] + train_final = [] + test_final = [] + + for i in RANDOMNESS: + acc_aux = [] + f1_aux = [] + train_aux = [] + test_aux = [] + for j in range(REPETITIONS): + acc, f1, train_t, test_t = experiment(1, i, d) + acc_aux.append(acc) + f1_aux.append(f1) + train_aux.append(train_t) + test_aux.append(test_t) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + train_final.append(round(sum(train_aux) / REPETITIONS, 2)) + test_final.append(round(sum(test_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow( + [ + "dataset", + "dimensions", + "train_time", + "test_time", + "accuracy", + "f1", + "VSA", + ] + ) + writer.writerows( + [ + [ + d, + DIM, + train_final[0], + test_final[0], + acc_final[0], + f1_final[0], + VSA, + ] + ] + ) diff --git a/GraphHD_v2/graphhd_basic_centrality.py b/GraphHD_v2/graphhd_basic_centrality.py new file mode 100644 index 00000000..ed5a4cb8 --- /dev/null +++ b/GraphHD_v2/graphhd_basic_centrality.py @@ -0,0 +1,219 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset +import torch_geometric.utils + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "basic_centrality/result" + str(time.time()) + ".csv" +DIM = 10000 + + +def experiment(randomness=0, embed="random", dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = DIM # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size): + super(Encoder, self).__init__() + self.out_features = out_features + if embed == "thermometer": + self.node_ids = embeddings.Thermometer(size, out_features, vsa=VSA) + elif embed == "circular": + self.node_ids = embeddings.Circular(size, out_features, vsa=VSA) + elif embed == "projection": + self.node_ids = embeddings.Projection(size, out_features, vsa=VSA) + elif embed == "sinusoid": + self.node_ids = embeddings.Sinusoid(size, out_features, vsa=VSA) + elif embed == "density": + self.node_ids = embeddings.Density(size, out_features, vsa=VSA) + else: + self.node_ids = embeddings.Random(size, out_features, vsa=VSA) + self.levels = embeddings.Level(size, out_features, vsa=VSA) + + def forward(self, x): + pr = pagerank(x) + pr_sort, pr_argsort = pr.sort() + + node_id_hvs = torchhd.empty(x.num_nodes, self.out_features, VSA) + node_id_hvs[pr_argsort] = self.node_ids.weight[: x.num_nodes] + + nodes, _ = x.edge_index + indexs = list(map(int, torch_geometric.utils.degree(nodes))) + + try: + node_id_hvs = torchhd.bind(node_id_hvs, self.levels.weight[indexs]) + # node_id_hvs = torchhd.bind(node_id_hvs, self.node_attr2(x.x)) + except Exception as e: + print("err " + str(e)) + + row, col = to_undirected(x.edge_index) + + hvs = torchhd.bind(node_id_hvs[row], node_id_hvs[col]) + return torchhd.multiset(hvs) + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes, VSA) + model = model.to(device) + + train_t = time.time() + with torch.no_grad(): + for samples in tqdm(train_ld, desc="Training"): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + model.add(samples_hv, samples.y) + train_t = time.time() - train_t + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + # f1 = torchmetrics.F1Score(num_classes=graphs.num_classes, average='macro', multiclass=True) + f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + test_t = time.time() + with torch.no_grad(): + if VSA != "BSC": + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=True) + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + test_t = time.time() - test_t + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f, train_t, test_t + + +REPETITIONS = 50 +RANDOMNESS = ["random"] +DATASET = ["PTC_FM", "MUTAG", "NCI1", "ENZYMES", "PROTEINS", "DD"] +VSAS = ["BSC", "MAP", "HRR", "FHRR"] + + +for VSA in VSAS: + for d in DATASET: + acc_final = [] + f1_final = [] + train_final = [] + test_final = [] + + for i in RANDOMNESS: + acc_aux = [] + f1_aux = [] + train_aux = [] + test_aux = [] + for j in range(REPETITIONS): + acc, f1, train_t, test_t = experiment(1, i, d) + acc_aux.append(acc) + f1_aux.append(f1) + train_aux.append(train_t) + test_aux.append(test_t) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + train_final.append(round(sum(train_aux) / REPETITIONS, 2)) + test_final.append(round(sum(test_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow( + [ + "dataset", + "dimensions", + "train_time", + "test_time", + "accuracy", + "f1", + "VSA", + ] + ) + writer.writerows( + [ + [ + d, + DIM, + train_final[0], + test_final[0], + acc_final[0], + f1_final[0], + VSA, + ] + ] + ) diff --git a/GraphHD_v2/graphhd_basic_node_attr.py b/GraphHD_v2/graphhd_basic_node_attr.py new file mode 100644 index 00000000..8d84c740 --- /dev/null +++ b/GraphHD_v2/graphhd_basic_node_attr.py @@ -0,0 +1,265 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "basic_node_attr/result" + str(time.time()) + ".csv" +DIM = 10000 +import networkx as nx +from torch_geometric.utils import to_networkx + + +def experiment(randomness=0, embed="random", dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = DIM # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def degree_centrality(data): + G = to_networkx(data) + + scores = nx.degree_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size, num_node_attr): + super(Encoder, self).__init__() + self.out_features = out_features + self.node_attr = embeddings.Random(num_node_attr, out_features, vsa=VSA) + self.node_ids = embeddings.Random(size, out_features, vsa=VSA) + + def forward(self, x): + pr = pagerank(x) + pr_sort, pr_argsort = pr.sort() + + def index_value(inner_tensor): + return torch.argmax(inner_tensor) + + node_id_hvs = torchhd.empty(x.num_nodes, self.out_features, VSA) + node_id_hvs[pr_argsort] = self.node_ids.weight[: x.num_nodes] + + if len(x.x[0]) > 0: + indices_tensor = torch.stack( + [index_value(inner_tensor) for inner_tensor in x.x.unbind()] + ) + node_id_hvs = torchhd.bind( + node_id_hvs, self.node_attr.weight[indices_tensor] + ) + + row, col = to_undirected(x.edge_index) + + hvs = torchhd.bind(node_id_hvs[row], node_id_hvs[col]) + return torchhd.multiset(hvs) + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size, graphs.num_node_features) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes, VSA) + model = model.to(device) + + train_t = time.time() + with torch.no_grad(): + for samples in tqdm(train_ld, desc="Training"): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + model.add(samples_hv, samples.y) + break + train_t = time.time() - train_t + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + f1 = torchmetrics.F1Score( + num_classes=graphs.num_classes, average="macro", multiclass=True + ) + # f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + test_t = time.time() + with torch.no_grad(): + if VSA != "BSC": + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=True) + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + test_t = time.time() - test_t + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f, train_t, test_t + + +REPETITIONS = 10 +RANDOMNESS = ["random"] +DATASET = [ + "AIDS", + "BZR", + "BZR_MD", + "COX2", + "COX2_MD", + "DHFR", + "DHFR_MD", + "ER_MD", + "FRANKENSTEIN", + "MCF-7", + "MCF-7H", + "MOLT-4", + "MOLT-4H", + "Mutagenicity", + "MUTAG", + "NCI1", + "NCI109", + "NCI-H23", + "NCI-H23H", + "OVCAR-8", + "OVCAR-8H", + "P388", + "P388H", + "PC-3", + "PC-3H", + "PTC_FM", + "PTC_FR", + "PTC_MM", + "PTC_MR", + "SF-295", + "SF-295H", + "SN12C", + "SN12CH", + "SW-620", + "SW-620H", + "UACC257", + "UACC257H", + "Yeast", + "YeastH", +] +VSAS = ["FHRR"] + +with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow( + [ + "dataset", + "dimensions", + "train_time", + "test_time", + "accuracy", + "f1", + "VSA", + ] + ) + + +for VSA in VSAS: + for d in DATASET: + acc_final = [] + f1_final = [] + train_final = [] + test_final = [] + + for i in RANDOMNESS: + acc_aux = [] + f1_aux = [] + train_aux = [] + test_aux = [] + for j in range(REPETITIONS): + acc, f1, train_t, test_t = experiment(1, i, d) + acc_aux.append(acc) + f1_aux.append(f1) + train_aux.append(train_t) + test_aux.append(test_t) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + train_final.append(round(sum(train_aux) / REPETITIONS, 2)) + test_final.append(round(sum(test_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerows( + [ + [ + d, + DIM, + train_final[0], + test_final[0], + acc_final[0], + f1_final[0], + VSA, + ] + ] + ) diff --git a/GraphHD_v2/graphhd_experiment_1.py b/GraphHD_v2/graphhd_experiment_1.py new file mode 100644 index 00000000..cb41a1a6 --- /dev/null +++ b/GraphHD_v2/graphhd_experiment_1.py @@ -0,0 +1,187 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "experiment_1/result" + str(time.time()) + ".csv" +DIM = 10000 +VSA = "FHRR" + + +def experiment(randomness=0, embed="random", dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = DIM # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size): + super(Encoder, self).__init__() + self.out_features = out_features + if embed == "thermometer": + self.node_ids = embeddings.Thermometer(size, out_features, vsa=VSA) + elif embed == "circular": + self.node_ids = embeddings.Circular(size, out_features, vsa=VSA) + elif embed == "projection": + self.node_ids = embeddings.Projection(size, out_features, vsa=VSA) + elif embed == "sinusoid": + self.node_ids = embeddings.Sinusoid(size, out_features, vsa=VSA) + elif embed == "density": + self.node_ids = embeddings.Density(size, out_features, vsa=VSA) + else: + self.node_ids = embeddings.Random(size, out_features, vsa=VSA) + + def forward(self, x): + pr = pagerank(x) + pr_sort, pr_argsort = pr.sort() + + node_id_hvs = torchhd.empty(x.num_nodes, self.out_features, VSA) + node_id_hvs[pr_argsort] = self.node_ids.weight[: x.num_nodes] + + row, col = to_undirected(x.edge_index) + + hvs = torchhd.bind(node_id_hvs[row], node_id_hvs[col]) + return torchhd.multiset(hvs) + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes, VSA) + model = model.to(device) + + train_t = time.time() + with torch.no_grad(): + for samples in tqdm(train_ld, desc="Training"): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + model.add(samples_hv, samples.y) + train_t = time.time() - train_t + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + # f1 = torchmetrics.F1Score(num_classes=graphs.num_classes, average='macro', multiclass=True) + f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + test_t = time.time() + with torch.no_grad(): + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=True) + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + test_t = time.time() - test_t + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f, train_t, test_t + + +REPETITIONS = 1 +RANDOMNESS = ["random"] +DATASET = ["PTC_FM", "MUTAG", "NCI1", "ENZYMES", "PROTEINS", "DD"] + +for d in DATASET: + acc_final = [] + f1_final = [] + train_final = [] + test_final = [] + + for i in RANDOMNESS: + acc_aux = [] + f1_aux = [] + train_aux = [] + test_aux = [] + for j in range(REPETITIONS): + acc, f1, train_t, test_t = experiment(1, i, d) + acc_aux.append(acc) + f1_aux.append(f1) + train_aux.append(train_t) + test_aux.append(test_t) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + train_final.append(round(sum(train_aux) / REPETITIONS, 2)) + test_final.append(round(sum(test_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow( + ["dataset", "dimensions", "train_time", "test_time", "accuracy", "f1"] + ) + writer.writerows( + [[d, DIM, train_final[0], test_final[0], acc_final[0], f1_final[0]]] + ) diff --git a/GraphHD_v2/graphhd_experiment_2.py b/GraphHD_v2/graphhd_experiment_2.py new file mode 100644 index 00000000..359ad72d --- /dev/null +++ b/GraphHD_v2/graphhd_experiment_2.py @@ -0,0 +1,189 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "experiment_2/result" + str(time.time()) + ".csv" +DIM = 10000 +VSA = "FHRR" + + +def experiment(randomness=0, embed="random", dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = DIM # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size): + super(Encoder, self).__init__() + self.out_features = out_features + if embed == "thermometer": + self.node_ids = embeddings.Thermometer(size, out_features, vsa=VSA) + elif embed == "circular": + self.node_ids = embeddings.Circular(size, out_features, vsa=VSA) + elif embed == "projection": + self.node_ids = embeddings.Projection(size, out_features, vsa=VSA) + elif embed == "sinusoid": + self.node_ids = embeddings.Sinusoid(size, out_features, vsa=VSA) + elif embed == "density": + self.node_ids = embeddings.Density(size, out_features, vsa=VSA) + else: + self.node_ids = embeddings.Random(size, out_features, vsa=VSA) + + def forward(self, x): + nodes, _ = x.edge_index + nodes = list(set(nodes)) + node_id_hvs = torchhd.empty(x.num_nodes, self.out_features, VSA) + for i in nodes: + adjacent_nodes = x.edge_index[1][x.edge_index[0] == i] + node_id_hvs[i] = self.node_ids.weight[i] + for j in adjacent_nodes: + node_id_hvs[i] += torchhd.permute(self.node_ids.weight[j]) + + row, col = to_undirected(x.edge_index) + hvs = torchhd.bind(node_id_hvs[row], node_id_hvs[col]) + return torchhd.multiset(hvs) + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes, VSA) + model = model.to(device) + + train_t = time.time() + with torch.no_grad(): + for samples in tqdm(train_ld, desc="Training"): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + model.add(samples_hv, samples.y) + train_t = time.time() - train_t + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + # f1 = torchmetrics.F1Score(num_classes=graphs.num_classes, average='macro', multiclass=True) + f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + test_t = time.time() + with torch.no_grad(): + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=True) + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + test_t = time.time() - test_t + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f, train_t, test_t + + +REPETITIONS = 1 +RANDOMNESS = ["random"] +DATASET = ["PTC_FR", "MUTAG", "NCI1", "ENZYMES", "PROTEINS", "DD"] + +for d in DATASET: + acc_final = [] + f1_final = [] + train_final = [] + test_final = [] + + for i in RANDOMNESS: + acc_aux = [] + f1_aux = [] + train_aux = [] + test_aux = [] + for j in range(REPETITIONS): + acc, f1, train_t, test_t = experiment(1, i, d) + acc_aux.append(acc) + f1_aux.append(f1) + train_aux.append(train_t) + test_aux.append(test_t) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + train_final.append(round(sum(train_aux) / REPETITIONS, 2)) + test_final.append(round(sum(test_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow( + ["dataset", "dimensions", "train_time", "test_time", "accuracy", "f1"] + ) + writer.writerows( + [[d, DIM, train_final[0], test_final[0], acc_final[0], f1_final[0]]] + ) diff --git a/GraphHD_v2/graphhd_experiment_3.py b/GraphHD_v2/graphhd_experiment_3.py new file mode 100644 index 00000000..e9b64bb0 --- /dev/null +++ b/GraphHD_v2/graphhd_experiment_3.py @@ -0,0 +1,218 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch_geometric.utils +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "experiment_3/result" + str(time.time()) + ".csv" +DIM = 10000 +VSA = "HRR" + + +def experiment(randomness=0, embed="random", dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = DIM # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size): + super(Encoder, self).__init__() + self.out_features = out_features + self.levels = embeddings.Level(size, out_features, vsa=VSA) + self.node_ids = embeddings.Random(size, out_features, vsa=VSA) + + def local_centrality2(self, x): + nodes, _ = x.edge_index + nodes = list(set(nodes)) + node_id_hvs = torch.zeros((x.num_nodes, self.out_features), device=device) + for i in nodes: + adjacent_nodes = x.edge_index[1][x.edge_index[0] == i] + node_id_hvs[i] = self.node_ids.weight[i] + for j in adjacent_nodes: + node_id_hvs[i] += torchhd.permute(self.node_ids.weight[j]) + + row, col = to_undirected(x.edge_index) + hvs = torchhd.bind(node_id_hvs[row], node_id_hvs[col]) + return torchhd.multiset(hvs) + + def local_centrality(self, x): + nodes, _ = x.edge_index + indexs = list(map(int, torch_geometric.utils.degree(nodes))) + + node_id_hvs = torchhd.empty(x.num_nodes, self.out_features, VSA) + try: + node_id_hvs = torchhd.bind( + self.node_ids.weight[list(range(x.num_nodes))], + self.levels.weight[indexs], + ) + except: + print("err") + + row, col = to_undirected(x.edge_index) + hvs = torchhd.bind(node_id_hvs[row], node_id_hvs[col]) + return torchhd.multiset(hvs) + + def semi_local_centrality(self, x): + nodes, _ = x.edge_index + nodes = list(set(nodes)) + node_id_hvs = torch.zeros((x.num_nodes, self.out_features), device=device) + + for i in nodes: + adjacent_nodes = x.edge_index[1][x.edge_index[0] == i] + for j in adjacent_nodes: + node_id_hvs[i] = torchhd.bundle( + self.levels.weight[len(x.edge_index[1][x.edge_index[0] == j])], + node_id_hvs[i], + ) + node_id_hvs[i] = torchhd.bind(node_id_hvs[i], (self.node_ids.weight[i])) + + row, col = to_undirected(x.edge_index) + hvs = torchhd.bundle(node_id_hvs[row], node_id_hvs[col]) + return torchhd.multiset(hvs) + + def forward(self, x): + return self.local_centrality(x) + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes, VSA) + model = model.to(device) + + train_t = time.time() + with torch.no_grad(): + for samples in tqdm(train_ld, desc="Training"): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + model.add(samples_hv, samples.y) + train_t = time.time() - train_t + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + # f1 = torchmetrics.F1Score(num_classes=graphs.num_classes, average='macro', multiclass=True) + f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + test_t = time.time() + with torch.no_grad(): + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=True) + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + test_t = time.time() - test_t + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f, train_t, test_t + + +REPETITIONS = 1 +RANDOMNESS = ["random"] +DATASET = ["PTC_FR", "MUTAG", "NCI1", "ENZYMES", "PROTEINS", "DD"] + +for d in DATASET: + acc_final = [] + f1_final = [] + train_final = [] + test_final = [] + + for i in RANDOMNESS: + acc_aux = [] + f1_aux = [] + train_aux = [] + test_aux = [] + for j in range(REPETITIONS): + acc, f1, train_t, test_t = experiment(1, i, d) + acc_aux.append(acc) + f1_aux.append(f1) + train_aux.append(train_t) + test_aux.append(test_t) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + train_final.append(round(sum(train_aux) / REPETITIONS, 2)) + test_final.append(round(sum(test_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow( + ["dataset", "dimensions", "train_time", "test_time", "accuracy", "f1"] + ) + writer.writerows( + [[d, DIM, train_final[0], test_final[0], acc_final[0], f1_final[0]]] + ) diff --git a/GraphHD_v2/graphhd_experiment_33.py b/GraphHD_v2/graphhd_experiment_33.py new file mode 100644 index 00000000..1d0ca17a --- /dev/null +++ b/GraphHD_v2/graphhd_experiment_33.py @@ -0,0 +1,170 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "experiment_3/result" + str(time.time()) + ".csv" + + +def experiment(randomness=0, dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = 10000 # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size): + super(Encoder, self).__init__() + self.out_features = out_features + self.node_ids = embeddings.Random(size, out_features) + + def forward(self, x): + nodes, _ = x.edge_index + nodes = list(set(nodes)) + node_id_hvs = torch.zeros((x.num_nodes, self.out_features), device=device) + for i in nodes: + adjacent_nodes = x.edge_index[1][x.edge_index[0] == i] + node_id_hvs[i] = self.node_ids.weight[i] + for j in adjacent_nodes: + node_id_hvs[i] += torchhd.permute(self.node_ids.weight[j]) + + node_id_hvs_2 = torch.zeros((x.num_nodes, self.out_features), device=device) + + for i in nodes: + adjacent_nodes = x.edge_index[1][x.edge_index[0] == i] + node_id_hvs_2[i] = node_id_hvs[i] + for j in adjacent_nodes: + node_id_hvs_2[i] += torchhd.permute(node_id_hvs[j]) + + row, col = to_undirected(x.edge_index) + hvs = torchhd.bind(node_id_hvs_2[row], node_id_hvs_2[col]) + return torchhd.multiset(hvs) + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes) + model = model.to(device) + + with torch.no_grad(): + for samples in tqdm(train_ld, desc="Training"): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + model.add(samples_hv, samples.y) + + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + f1 = torchmetrics.F1Score( + num_classes=graphs.num_classes, average="macro", multiclass=True + ) + # f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + with torch.no_grad(): + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=True) + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f + + +REPETITIONS = 5 +DATASET = ["MUTAG", "ENZYMES", "PROTEINS"] + +for d in DATASET: + acc_final = [] + f1_final = [] + acc_aux = [] + f1_aux = [] + for j in range(REPETITIONS): + acc, f1 = experiment(100, d) + acc_aux.append(acc) + f1_aux.append(f1) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow([d] + ["RANDOM"]) + writer.writerows([acc_final]) + writer.writerows([f1_final]) diff --git a/GraphHD_v2/graphhd_experiment_4.py b/GraphHD_v2/graphhd_experiment_4.py new file mode 100644 index 00000000..569c5303 --- /dev/null +++ b/GraphHD_v2/graphhd_experiment_4.py @@ -0,0 +1,219 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "experiment_4/result" + str(time.time()) + ".csv" + + +def experiment(randomness=0, dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = 10000 # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size): + super(Encoder, self).__init__() + self.out_features = out_features + self.node_ids = embeddings.Random(size, out_features) + self.levels = embeddings.Circular(size, out_features) + + def local_centrality2(self, x): + nodes, _ = x.edge_index + nodes = list(set(nodes)) + node_id_hvs = torch.zeros((x.num_nodes, self.out_features), device=device) + for i in nodes: + adjacent_nodes = x.edge_index[1][x.edge_index[0] == i] + node_id_hvs[i] = self.node_ids.weight[i] + for j in adjacent_nodes: + node_id_hvs[i] += torchhd.permute(self.node_ids.weight[j]) + + row, col = to_undirected(x.edge_index) + hvs = torchhd.bind(node_id_hvs[row], node_id_hvs[col]) + return torchhd.multiset(hvs) + + def local_centrality(self, x): + nodes, _ = x.edge_index + nodes = list(set(nodes)) + node_id_hvs = torch.zeros((x.num_nodes, self.out_features), device=device) + + for i in nodes: + adjacent_nodes = x.edge_index[1][x.edge_index[0] == i] + node_id_hvs[i] = torchhd.bind( + self.node_ids.weight[i], self.levels.weight[len(adjacent_nodes)] + ) + + row, col = to_undirected(x.edge_index) + hvs = torchhd.bind(node_id_hvs[row], node_id_hvs[col]) + return torchhd.multiset(hvs) + + def semi_local_centrality(self, x): + nodes, _ = x.edge_index + nodes = list(set(nodes)) + node_id_hvs = torch.zeros((x.num_nodes, self.out_features), device=device) + + for i in nodes: + adjacent_nodes = x.edge_index[1][x.edge_index[0] == i] + for j in adjacent_nodes: + node_id_hvs[i] = torchhd.bundle( + self.levels.weight[len(x.edge_index[1][x.edge_index[0] == j])], + node_id_hvs[i], + ) + node_id_hvs[i] = torchhd.bind(node_id_hvs[i], (self.node_ids.weight[i])) + + row, col = to_undirected(x.edge_index) + hvs = torchhd.bundle(node_id_hvs[row], node_id_hvs[col]) + return torchhd.multiset(hvs) + + def forward(self, x): + return self.local_centrality(x) + """ + nodes, _ = x.edge_index + nodes = list(set(nodes)) + node_id_hvs = torch.zeros((x.num_nodes, self.out_features), device=device) + for i in nodes: + adjacent_nodes = x.edge_index[1][x.edge_index[0] == i] + node_id_hvs[i] = self.node_ids.weight[i] + for j in adjacent_nodes: + node_id_hvs[i] += torchhd.permute(self.node_ids.weight[j]) + + node_id_hvs_2 = torch.zeros((x.num_nodes, self.out_features), device=device) + + for i in nodes: + adjacent_nodes = x.edge_index[1][x.edge_index[0] == i] + node_id_hvs_2[i] = node_id_hvs[i] + for j in adjacent_nodes: + node_id_hvs_2[i] += torchhd.permute(node_id_hvs[j]) + + row, col = to_undirected(x.edge_index) + hvs = torchhd.bind(node_id_hvs_2[row], node_id_hvs_2[col]) + return torchhd.multiset(hvs) + """ + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes) + model = model.to(device) + + with torch.no_grad(): + for samples in tqdm(train_ld, desc="Training"): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + model.add(samples_hv, samples.y) + + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + # f1 = torchmetrics.F1Score(num_classes=graphs.num_classes, average='macro', multiclass=True) + f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + with torch.no_grad(): + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=True) + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f + + +REPETITIONS = 1 +DATASET = ["MUTAG"] + +for d in DATASET: + acc_final = [] + f1_final = [] + acc_aux = [] + f1_aux = [] + for j in range(REPETITIONS): + acc, f1 = experiment(100, d) + acc_aux.append(acc) + f1_aux.append(f1) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow([d] + ["RANDOM"]) + writer.writerows([acc_final]) + writer.writerows([f1_final]) diff --git a/GraphHD_v2/graphhd_experiment_5.py b/GraphHD_v2/graphhd_experiment_5.py new file mode 100644 index 00000000..cf8dcf1e --- /dev/null +++ b/GraphHD_v2/graphhd_experiment_5.py @@ -0,0 +1,208 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "experiment_4/result" + str(time.time()) + ".csv" + + +def experiment(randomness=0, dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = 10000 # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size, node_features): + super(Encoder, self).__init__() + self.out_features = out_features + + self.node_ids = embeddings.Random(size, out_features) + self.node_attr = embeddings.Density(node_features, out_features) + + self.levels = embeddings.Circular(size, out_features) + + def local_centrality(self, x): + nodes, _ = x.edge_index + nodes = list(set(nodes)) + node_id_hvs = torch.zeros((x.num_nodes, self.out_features), device=device) + + # for i in nodes: + # node_id_hvs[i] = torchhd.bind(self.node_ids.weight[i], self.node_attr(x.x[i])) + + node_id_hvs = torchhd.bind(self.node_ids.weight, self.node_attr(x.x)) + + row, col = to_undirected(x.edge_index) + hvs = torchhd.bind(node_id_hvs[row], node_id_hvs[col]) + + return torchhd.multiset(hvs) + + def semi_local_centrality(self, x): + nodes, _ = x.edge_index + nodes = list(set(nodes)) + node_id_hvs = torch.zeros((x.num_nodes, self.out_features), device=device) + + for i in nodes: + adjacent_nodes = x.edge_index[1][x.edge_index[0] == i] + for j in adjacent_nodes: + node_id_hvs[i] = torchhd.bundle( + self.levels.weight[len(x.edge_index[1][x.edge_index[0] == j])], + node_id_hvs[i], + ) + node_id_hvs[i] = torchhd.bind(node_id_hvs[i], (self.node_ids.weight[i])) + + row, col = to_undirected(x.edge_index) + hvs = torchhd.bundle(node_id_hvs[row], node_id_hvs[col]) + return torchhd.multiset(hvs) + + def forward(self, x): + return self.local_centrality(x) + """ + nodes, _ = x.edge_index + nodes = list(set(nodes)) + node_id_hvs = torch.zeros((x.num_nodes, self.out_features), device=device) + for i in nodes: + adjacent_nodes = x.edge_index[1][x.edge_index[0] == i] + node_id_hvs[i] = self.node_ids.weight[i] + for j in adjacent_nodes: + node_id_hvs[i] += torchhd.permute(self.node_ids.weight[j]) + + node_id_hvs_2 = torch.zeros((x.num_nodes, self.out_features), device=device) + + for i in nodes: + adjacent_nodes = x.edge_index[1][x.edge_index[0] == i] + node_id_hvs_2[i] = node_id_hvs[i] + for j in adjacent_nodes: + node_id_hvs_2[i] += torchhd.permute(node_id_hvs[j]) + + row, col = to_undirected(x.edge_index) + hvs = torchhd.bind(node_id_hvs_2[row], node_id_hvs_2[col]) + return torchhd.multiset(hvs) + """ + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size, len(graphs.x[0])) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes) + model = model.to(device) + + with torch.no_grad(): + for samples in tqdm(train_ld, desc="Training"): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + model.add(samples_hv, samples.y) + + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + # f1 = torchmetrics.F1Score(num_classes=graphs.num_classes, average='macro', multiclass=True) + f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + with torch.no_grad(): + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=True) + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f + + +REPETITIONS = 1 +DATASET = ["MUTAG", "ENZYMES", "PROTEINS"] + +for d in DATASET: + acc_final = [] + f1_final = [] + acc_aux = [] + f1_aux = [] + for j in range(REPETITIONS): + acc, f1 = experiment(100, d) + acc_aux.append(acc) + f1_aux.append(f1) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow([d] + ["RANDOM"]) + writer.writerows([acc_final]) + writer.writerows([f1_final]) diff --git a/GraphHD_v2/graphhd_experiment_aux.py b/GraphHD_v2/graphhd_experiment_aux.py new file mode 100644 index 00000000..1c5ec547 --- /dev/null +++ b/GraphHD_v2/graphhd_experiment_aux.py @@ -0,0 +1,222 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch_geometric.utils +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "experiment_aux/result" + str(time.time()) + ".csv" +DIM = 10000 +VSA = "FHRR" + + +def experiment(randomness=0, embed="random", dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = DIM # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def to_undirected_attr(edge_index, edge_attr): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + + unique_elements, inverse_indices = torch.unique( + edge_index, dim=1, return_inverse=True + ) + + unique_lists = [inverse_indices == i for i in range(len(unique_elements.t()))] + first_indices = [ + indices.nonzero(as_tuple=False)[0, 0].item() for indices in unique_lists + ] + + if edge_attr is not None: + attr_edge = edge_attr[first_indices] + else: + attr_edge = None + + return edge_index[0][first_indices], edge_index[1][first_indices], attr_edge + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size, edge_features, node_features): + super(Encoder, self).__init__() + self.out_features = out_features + self.levels = embeddings.Level(size, out_features, vsa=VSA) + self.node_ids = embeddings.Random(size, out_features, vsa=VSA) + self.edge_attr = embeddings.Random(edge_features, out_features, vsa=VSA) + self.edge_attr2 = embeddings.Density(edge_features, out_features, vsa=VSA) + self.node_attr = embeddings.Random(node_features, out_features, vsa=VSA) + self.node_attr2 = embeddings.Density(node_features, out_features, vsa=VSA) + + def local_centrality(self, x): + nodes, _ = x.edge_index + node_id_hvs = torch.zeros((x.num_nodes, self.out_features), device=device) + indexs = list(map(int, torch_geometric.utils.degree(nodes))) + + row, col, edge_attr = to_undirected_attr(x.edge_index, x.edge_attr) + + try: + node_id_hvs = torchhd.bind( + self.node_ids.weight[list(range(x.num_nodes))], + self.levels.weight[indexs], + ) + # node_id_hvs = torchhd.bind(node_id_hvs, self.node_attr.weight[x.x.argmax().item()]) + node_id_hvs = torchhd.bind(node_id_hvs, self.node_attr2(x.x)) + except: + print("err") + + hvs = torchhd.bind(node_id_hvs[row], node_id_hvs[col]) + if edge_attr is not None: + # hvs = torchhd.bind(hvs, self.edge_attr.weight[edge_attr.argmax().item()]) + hvs = torchhd.bind(hvs, self.edge_attr2(edge_attr)) + return torchhd.multiset(hvs) + + def forward(self, x): + return self.local_centrality(x) + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + + encode = Encoder( + DIMENSIONS, max_graph_size, graphs.num_edge_labels, graphs.num_node_labels + ) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes, VSA) + model = model.to(device) + + train_t = time.time() + with torch.no_grad(): + for samples in tqdm(train_ld, desc="Training"): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + model.add(samples_hv, samples.y) + + train_t = time.time() - train_t + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + # f1 = torchmetrics.F1Score(num_classes=graphs.num_classes, average='macro', multiclass=True) + f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + test_t = time.time() + with torch.no_grad(): + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + samples.edge_index = samples.edge_index.to(device) + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=True) + + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + test_t = time.time() - test_t + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f, train_t, test_t + + +REPETITIONS = 1 +RANDOMNESS = ["random"] +DATASET = ["PTC_FR", "MUTAG", "NCI1", "ENZYMES", "PROTEINS", "DD"] + +for d in DATASET: + acc_final = [] + f1_final = [] + train_final = [] + test_final = [] + + for i in RANDOMNESS: + acc_aux = [] + f1_aux = [] + train_aux = [] + test_aux = [] + for j in range(REPETITIONS): + acc, f1, train_t, test_t = experiment(1, i, d) + acc_aux.append(acc) + f1_aux.append(f1) + train_aux.append(train_t) + test_aux.append(test_t) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + train_final.append(round(sum(train_aux) / REPETITIONS, 2)) + test_final.append(round(sum(test_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow( + ["dataset", "dimensions", "train_time", "test_time", "accuracy", "f1"] + ) + writer.writerows( + [[d, DIM, train_final[0], test_final[0], acc_final[0], f1_final[0]]] + ) diff --git a/GraphHD_v2/graphhd_experiment_encoding_centrality.py b/GraphHD_v2/graphhd_experiment_encoding_centrality.py new file mode 100644 index 00000000..381e5ce8 --- /dev/null +++ b/GraphHD_v2/graphhd_experiment_encoding_centrality.py @@ -0,0 +1,239 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch_geometric.utils +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "encoding_centrality/result" + str(time.time()) + ".csv" +DIM = 10000 + + +def experiment(randomness=0, embed="random", dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = DIM # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size): + super(Encoder, self).__init__() + self.out_features = out_features + self.levels = embeddings.Level(size, out_features, vsa=VSA) + self.node_ids = embeddings.Random(size, out_features, vsa=VSA) + + def local_centrality2(self, x): + nodes, _ = x.edge_index + nodes = list(set(nodes)) + node_id_hvs = torch.zeros((x.num_nodes, self.out_features), device=device) + for i in nodes: + adjacent_nodes = x.edge_index[1][x.edge_index[0] == i] + node_id_hvs[i] = self.node_ids.weight[i] + for j in adjacent_nodes: + node_id_hvs[i] += torchhd.permute(self.node_ids.weight[j]) + + row, col = to_undirected(x.edge_index) + hvs = torchhd.bind(node_id_hvs[row], node_id_hvs[col]) + return torchhd.multiset(hvs) + + def local_centrality(self, x): + nodes, _ = x.edge_index + indexs = list(map(int, torch_geometric.utils.degree(nodes))) + + node_id_hvs = torchhd.empty(x.num_nodes, self.out_features, VSA) + try: + node_id_hvs = torchhd.bind( + self.node_ids.weight[list(range(x.num_nodes))], + self.levels.weight[indexs], + ) + except: + print("err") + + row, col = to_undirected(x.edge_index) + hvs = torchhd.bind(node_id_hvs[row], node_id_hvs[col]) + return torchhd.multiset(hvs) + + def semi_local_centrality(self, x): + nodes, _ = x.edge_index + nodes = list(set(nodes)) + node_id_hvs = torch.zeros((x.num_nodes, self.out_features), device=device) + + for i in nodes: + adjacent_nodes = x.edge_index[1][x.edge_index[0] == i] + for j in adjacent_nodes: + node_id_hvs[i] = torchhd.bundle( + self.levels.weight[len(x.edge_index[1][x.edge_index[0] == j])], + node_id_hvs[i], + ) + node_id_hvs[i] = torchhd.bind(node_id_hvs[i], (self.node_ids.weight[i])) + + row, col = to_undirected(x.edge_index) + hvs = torchhd.bundle(node_id_hvs[row], node_id_hvs[col]) + return torchhd.multiset(hvs) + + def forward(self, x): + return self.local_centrality(x) + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes, VSA) + model = model.to(device) + + train_t = time.time() + with torch.no_grad(): + for samples in tqdm(train_ld, desc="Training"): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + model.add(samples_hv, samples.y) + train_t = time.time() - train_t + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + # f1 = torchmetrics.F1Score(num_classes=graphs.num_classes, average='macro', multiclass=True) + f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + test_t = time.time() + with torch.no_grad(): + if VSA != "BSC": + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=True) + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + test_t = time.time() - test_t + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f, train_t, test_t + + +REPETITIONS = 100 +RANDOMNESS = ["random"] +DATASET = ["PTC_FM", "MUTAG", "NCI1", "ENZYMES", "PROTEINS", "DD"] +VSAS = ["BSC", "MAP", "HRR", "FHRR"] + + +for VSA in VSAS: + for d in DATASET: + acc_final = [] + f1_final = [] + train_final = [] + test_final = [] + + for i in RANDOMNESS: + acc_aux = [] + f1_aux = [] + train_aux = [] + test_aux = [] + for j in range(REPETITIONS): + acc, f1, train_t, test_t = experiment(1, i, d) + acc_aux.append(acc) + f1_aux.append(f1) + train_aux.append(train_t) + test_aux.append(test_t) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + train_final.append(round(sum(train_aux) / REPETITIONS, 2)) + test_final.append(round(sum(test_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow( + [ + "dataset", + "dimensions", + "train_time", + "test_time", + "accuracy", + "f1", + "VSA", + ] + ) + writer.writerows( + [ + [ + d, + DIM, + train_final[0], + test_final[0], + acc_final[0], + f1_final[0], + VSA, + ] + ] + ) diff --git a/GraphHD_v2/graphhd_level.py b/GraphHD_v2/graphhd_level.py new file mode 100644 index 00000000..b987c4b6 --- /dev/null +++ b/GraphHD_v2/graphhd_level.py @@ -0,0 +1,296 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "level/result" + str(time.time()) + ".csv" +DIM = 10000 +import networkx as nx +from torch_geometric.utils import to_networkx + + +def experiment(randomness=0, embed="random", dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = DIM # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def degree_centrality(data): + G = to_networkx(data) + + scores = nx.degree_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores, scores_nodes + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size, num_node_attr): + super(Encoder, self).__init__() + self.out_features = out_features + self.node_ids = embeddings.Random(size, out_features, vsa=VSA) + self.levels = embeddings.Level(size, out_features, vsa=VSA) + self.node_attr = embeddings.Random(num_node_attr, out_features, vsa=VSA) + + def forward(self, x): + # node_id_hvs = self.node_ids.weight[: x.num_nodes] + pr = pagerank(x) + pr_sort, pr_argsort = pr.sort() + + node_id_hvs = torchhd.empty(x.num_nodes, self.out_features, VSA) + node_id_hvs[pr_argsort] = self.node_ids.weight[: x.num_nodes] + + def index_value(inner_tensor): + return torch.argmax(inner_tensor) + + indices_tensor = torch.stack( + [index_value(inner_tensor) for inner_tensor in x.x.unbind()] + ) + node_attr = self.node_attr.weight[indices_tensor] + node_id_hvs = torchhd.bind(node_id_hvs, node_attr) + + row, col = to_undirected(x.edge_index) + prev = row[0] + + final_hv = torchhd.empty(1, self.out_features, VSA) + aux_hv = torchhd.identity(1, self.out_features, VSA) + + for idx in range(len(x.edge_index[0])): + i = x.edge_index[0][idx] + j = x.edge_index[1][idx] + if prev == i: + aux_hv = torchhd.bind( + aux_hv, torchhd.bind(node_id_hvs[i], node_id_hvs[j]) + ) + else: + prev = i + final_hv = torchhd.bundle(final_hv, aux_hv) + aux_hv = torchhd.identity(1, self.out_features, VSA) + + return final_hv[0] + + def forward_hashmap_label_random(self, x): + node_id_hvs = self.node_ids.weight[: x.num_nodes] + + def index_value(inner_tensor): + return torch.argmax(inner_tensor) + + indices_tensor = torch.stack( + [index_value(inner_tensor) for inner_tensor in x.x.unbind()] + ) + node_attr = self.node_attr.weight[indices_tensor] + node_id_hvs = torchhd.bind(node_id_hvs, node_attr) + + row, col = to_undirected(x.edge_index) + prev = row[0] + + final_hv = torchhd.empty(1, self.out_features, VSA) + aux_hv = torchhd.identity(1, self.out_features, VSA) + + for idx in range(len(x.edge_index[0])): + i = x.edge_index[0][idx] + j = x.edge_index[1][idx] + if prev == i: + aux_hv = torchhd.bind( + aux_hv, torchhd.bind(node_id_hvs[i], node_id_hvs[j]) + ) + else: + prev = i + final_hv = torchhd.bundle(final_hv, aux_hv) + aux_hv = torchhd.identity(1, self.out_features, VSA) + + return final_hv[0] + + def forward1(self, x): + node_id_hvs = self.node_ids.weight[: x.num_nodes] + + row, col = to_undirected(x.edge_index) + prev = row[0] + + final_hv = torchhd.empty(1, self.out_features, VSA) + aux_hv = torchhd.identity(1, self.out_features, VSA) + + for idx in range(len(x.edge_index[0])): + i = x.edge_index[0][idx] + j = x.edge_index[1][idx] + if prev == i: + aux_hv = torchhd.bind( + aux_hv, torchhd.bind(node_id_hvs[i], node_id_hvs[j]) + ) + else: + prev = i + final_hv = torchhd.bundle(final_hv, aux_hv) + aux_hv = torchhd.identity(1, self.out_features, VSA) + + return final_hv[0] + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size, graphs.num_node_features) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes, VSA) + model = model.to(device) + + train_t = time.time() + with torch.no_grad(): + for i, samples in enumerate(tqdm(train_ld, desc="Training")): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + + model.add(samples_hv, samples.y) + # break + + train_t = time.time() - train_t + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + # f1 = torchmetrics.F1Score(num_classes=graphs.num_classes, average='macro', multiclass=True) + f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + test_t = time.time() + with torch.no_grad(): + if VSA != "BSC": + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + # break + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=False) + + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + + test_t = time.time() - test_t + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f, train_t, test_t + + +REPETITIONS = 20 +RANDOMNESS = ["random"] +DATASET = ["BZR", "BZR_MD", "COX2", "COX2_MD", "DHFR", "DHFR_MD"] +# ,'BZR_MD','COX2','COX2_MD','DHFR','DHFR_MD','ER_MD', 'FRANKENSTEIN', 'NCI109','KKI','OHSU','Peking_1','PROTEINS','AIDS'] +VSAS = ["FHRR"] + + +for VSA in VSAS: + for d in DATASET: + acc_final = [] + f1_final = [] + train_final = [] + test_final = [] + + for i in RANDOMNESS: + acc_aux = [] + f1_aux = [] + train_aux = [] + test_aux = [] + for j in range(REPETITIONS): + acc, f1, train_t, test_t = experiment(1, i, d) + acc_aux.append(acc) + f1_aux.append(f1) + train_aux.append(train_t) + test_aux.append(test_t) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + train_final.append(round(sum(train_aux) / REPETITIONS, 2)) + test_final.append(round(sum(test_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow( + [ + "dataset", + "dimensions", + "train_time", + "test_time", + "accuracy", + "f1", + "VSA", + ] + ) + writer.writerows( + [ + [ + d, + DIM, + train_final[0], + test_final[0], + acc_final[0], + f1_final[0], + VSA, + ] + ] + ) diff --git a/GraphHD_v2/graphhd_list_basic.py b/GraphHD_v2/graphhd_list_basic.py new file mode 100644 index 00000000..265b64c4 --- /dev/null +++ b/GraphHD_v2/graphhd_list_basic.py @@ -0,0 +1,272 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "list_basic/result" + str(time.time()) + ".csv" +DIM = 10000 +import networkx as nx +from torch_geometric.utils import to_networkx + + +def experiment(randomness=0, embed="random", dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = DIM # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def degree_centrality(data): + G = to_networkx(data) + + scores = nx.degree_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores, scores_nodes + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size, num_node_attr): + super(Encoder, self).__init__() + self.out_features = out_features + self.node_ids = embeddings.Random(size, out_features, vsa=VSA) + self.levels = embeddings.Level(size, out_features, vsa=VSA) + self.node_attr = embeddings.Random(num_node_attr, out_features, vsa=VSA) + + def forward(self, x): + node_id_hvs = self.node_ids.weight[: x.num_nodes] + + row, col = to_undirected(x.edge_index) + if len(row) > 0: + prev = row[0] + + final_hv = torchhd.empty(1, self.out_features, VSA) + aux_hv = torchhd.identity(1, self.out_features, VSA) + + for idx in range(len(x.edge_index[0])): + i = x.edge_index[0][idx] + j = x.edge_index[1][idx] + if prev == i: + aux_hv = torchhd.bind( + aux_hv, torchhd.bind(node_id_hvs[i], node_id_hvs[j]) + ) + else: + prev = i + final_hv = torchhd.bundle(final_hv, aux_hv) + aux_hv = torchhd.bind(node_id_hvs[i], node_id_hvs[j]) + return final_hv[0] + + else: + return torchhd.empty(1, self.out_features, VSA)[0] + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size, graphs.num_node_features) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes, VSA) + model = model.to(device) + + train_t = time.time() + with torch.no_grad(): + for i, samples in enumerate(tqdm(train_ld, desc="Training")): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + model.add(samples_hv, samples.y) + + train_t = time.time() - train_t + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + f1 = torchmetrics.F1Score( + num_classes=graphs.num_classes, average="macro", multiclass=True + ) + # f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + test_t = time.time() + with torch.no_grad(): + if VSA != "BSC": + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=False) + + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + + test_t = time.time() - test_t + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f, train_t, test_t + + +REPETITIONS = 10 +RANDOMNESS = ["random"] +DATASET = [ + "AIDS", + "BZR", + "BZR_MD", + "COX2", + "COX2_MD", + "DHFR", + "DHFR_MD", + "ER_MD", + "FRANKENSTEIN", + "MCF-7", + "MCF-7H", + "MOLT-4", + "MOLT-4H", + "Mutagenicity", + "MUTAG", + "NCI1", + "NCI109", + "NCI-H23", + "NCI-H23H", + "OVCAR-8", + "OVCAR-8H", + "P388", + "P388H", + "PC-3", + "PC-3H", + "PTC_FM", + "PTC_FR", + "PTC_MM", + "PTC_MR", + "SF-295", + "SF-295H", + "SN12C", + "SN12CH", + "SW-620", + "SW-620H", + "UACC257", + "UACC257H", + "Yeast", + "YeastH", +] + + +# ,'BZR_MD','COX2','COX2_MD','DHFR','DHFR_MD','ER_MD', 'FRANKENSTEIN', 'NCI109','KKI','OHSU','Peking_1','PROTEINS','AIDS'] +VSAS = ["FHRR"] + +with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow( + [ + "dataset", + "dimensions", + "train_time", + "test_time", + "accuracy", + "f1", + "VSA", + ] + ) +for VSA in VSAS: + for d in DATASET: + acc_final = [] + f1_final = [] + train_final = [] + test_final = [] + + for i in RANDOMNESS: + acc_aux = [] + f1_aux = [] + train_aux = [] + test_aux = [] + for j in range(REPETITIONS): + acc, f1, train_t, test_t = experiment(1, i, d) + acc_aux.append(acc) + f1_aux.append(f1) + train_aux.append(train_t) + test_aux.append(test_t) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + train_final.append(round(sum(train_aux) / REPETITIONS, 2)) + test_final.append(round(sum(test_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerows( + [ + [ + d, + DIM, + train_final[0], + test_final[0], + acc_final[0], + f1_final[0], + VSA, + ] + ] + ) diff --git a/GraphHD_v2/graphhd_list_basic_node_attr.py b/GraphHD_v2/graphhd_list_basic_node_attr.py new file mode 100644 index 00000000..d577170f --- /dev/null +++ b/GraphHD_v2/graphhd_list_basic_node_attr.py @@ -0,0 +1,285 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "list_basic_node/result" + str(time.time()) + ".csv" +DIM = 10000 +import networkx as nx +from torch_geometric.utils import to_networkx + + +def experiment(randomness=0, embed="random", dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = DIM # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def degree_centrality(data): + G = to_networkx(data) + + scores = nx.degree_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores, scores_nodes + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size, num_node_attr): + super(Encoder, self).__init__() + self.out_features = out_features + self.node_ids = embeddings.Random(size, out_features, vsa=VSA) + self.levels = embeddings.Level(size, out_features, vsa=VSA) + self.node_attr = embeddings.Random(num_node_attr, out_features, vsa=VSA) + + def forward(self, x): + node_id_hvs = self.node_ids.weight[: x.num_nodes] + + def index_value(inner_tensor): + return torch.argmax(inner_tensor) + + if len(x.x[0]) > 0: + indices_tensor = torch.stack( + [index_value(inner_tensor) for inner_tensor in x.x.unbind()] + ) + node_attr = self.node_attr.weight[indices_tensor] + node_id_hvs = torchhd.bind(node_id_hvs, node_attr) + + row, col = to_undirected(x.edge_index) + if len(row) > 0: + prev = row[0] + + final_hv = torchhd.empty(1, self.out_features, VSA) + aux_hv = torchhd.identity(1, self.out_features, VSA) + + for idx in range(len(x.edge_index[0])): + i = x.edge_index[0][idx] + j = x.edge_index[1][idx] + if prev == i: + aux_hv = torchhd.bind( + aux_hv, torchhd.bind(node_id_hvs[i], node_id_hvs[j]) + ) + else: + prev = i + final_hv = torchhd.bundle(final_hv, aux_hv) + aux_hv = torchhd.bind(node_id_hvs[i], node_id_hvs[j]) + return final_hv[0] + + else: + return torchhd.empty(1, self.out_features, VSA)[0] + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size, graphs.num_node_features) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes, VSA) + model = model.to(device) + + train_t = time.time() + with torch.no_grad(): + for i, samples in enumerate(tqdm(train_ld, desc="Training")): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + + model.add(samples_hv, samples.y) + # break + + train_t = time.time() - train_t + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + f1 = torchmetrics.F1Score( + num_classes=graphs.num_classes, average="macro", multiclass=True + ) + # f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + test_t = time.time() + with torch.no_grad(): + if VSA != "BSC": + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + # break + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=False) + + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + + test_t = time.time() - test_t + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f, train_t, test_t + + +REPETITIONS = 10 +RANDOMNESS = ["random"] +DATASET = [ + "AIDS", + "BZR", + "BZR_MD", + "COX2", + "COX2_MD", + "DHFR", + "DHFR_MD", + "ER_MD", + "FRANKENSTEIN", + "MCF-7", + "MCF-7H", + "MOLT-4", + "MOLT-4H", + "Mutagenicity", + "MUTAG", + "NCI1", + "NCI109", + "NCI-H23", + "NCI-H23H", + "OVCAR-8", + "OVCAR-8H", + "P388", + "P388H", + "PC-3", + "PC-3H", + "PTC_FM", + "PTC_FR", + "PTC_MM", + "PTC_MR", + "SF-295", + "SF-295H", + "SN12C", + "SN12CH", + "SW-620", + "SW-620H", + "UACC257", + "UACC257H", + "Yeast", + "YeastH", +] + +# ,'BZR_MD','COX2','COX2_MD','DHFR','DHFR_MD','ER_MD', 'FRANKENSTEIN', 'NCI109','KKI','OHSU','Peking_1','PROTEINS','AIDS'] +VSAS = ["FHRR"] + +with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow( + [ + "dataset", + "dimensions", + "train_time", + "test_time", + "accuracy", + "f1", + "VSA", + ] + ) + +for VSA in VSAS: + for d in DATASET: + acc_final = [] + f1_final = [] + train_final = [] + test_final = [] + + for i in RANDOMNESS: + acc_aux = [] + f1_aux = [] + train_aux = [] + test_aux = [] + for j in range(REPETITIONS): + acc, f1, train_t, test_t = experiment(1, i, d) + acc_aux.append(acc) + f1_aux.append(f1) + train_aux.append(train_t) + test_aux.append(test_t) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + train_final.append(round(sum(train_aux) / REPETITIONS, 2)) + test_final.append(round(sum(test_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerows( + [ + [ + d, + DIM, + train_final[0], + test_final[0], + acc_final[0], + f1_final[0], + VSA, + ] + ] + ) diff --git a/GraphHD_v2/graphhd_list_basic_node_attr2.py b/GraphHD_v2/graphhd_list_basic_node_attr2.py new file mode 100644 index 00000000..f8298d08 --- /dev/null +++ b/GraphHD_v2/graphhd_list_basic_node_attr2.py @@ -0,0 +1,275 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "list_basic_node2/result" + str(time.time()) + ".csv" +DIM = 10000 +import networkx as nx +from torch_geometric.utils import to_networkx + + +def experiment(randomness=0, embed="random", dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = DIM # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def degree_centrality(data): + G = to_networkx(data) + + scores = nx.degree_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores, scores_nodes + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size, num_node_attr): + super(Encoder, self).__init__() + self.out_features = out_features + self.node_ids = embeddings.Random(size, out_features, vsa=VSA) + self.levels = embeddings.Level(size, out_features, vsa=VSA) + self.node_attr = embeddings.Random(num_node_attr, out_features, vsa=VSA) + + def forward(self, x): + node_id_hvs = self.node_ids.weight[: x.num_nodes] + + def index_value(inner_tensor): + return torch.argmax(inner_tensor) + + if len(x.x[0]) > 0: + indices_tensor = torch.stack( + [index_value(inner_tensor) for inner_tensor in x.x.unbind()] + ) + node_attr = self.node_attr.weight[indices_tensor] + node_id_hvs = torchhd.bind(node_id_hvs, torchhd.permute(node_attr)) + + row, col = to_undirected(x.edge_index) + + if len(row) > 0: + prev = row[0] + + final_hv = torchhd.empty(1, self.out_features, VSA) + aux_hv = torchhd.identity(1, self.out_features, VSA) + + for idx in range(len(x.edge_index[0])): + i = x.edge_index[0][idx] + j = x.edge_index[1][idx] + if prev == i: + aux_hv = torchhd.bind( + aux_hv, torchhd.bind(node_id_hvs[i], node_id_hvs[j]) + ) + else: + prev = i + final_hv = torchhd.bundle(final_hv, aux_hv) + aux_hv = torchhd.bind(node_id_hvs[i], node_id_hvs[j]) + return final_hv[0] + + else: + return torchhd.empty(1, self.out_features, VSA)[0] + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size, graphs.num_node_features) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes, VSA) + model = model.to(device) + + train_t = time.time() + with torch.no_grad(): + for i, samples in enumerate(tqdm(train_ld, desc="Training")): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + + model.add(samples_hv, samples.y) + # break + + train_t = time.time() - train_t + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + f1 = torchmetrics.F1Score( + num_classes=graphs.num_classes, average="macro", multiclass=True + ) + # f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + test_t = time.time() + with torch.no_grad(): + if VSA != "BSC": + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + # break + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=False) + + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + + test_t = time.time() - test_t + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f, train_t, test_t + + +REPETITIONS = 1 +RANDOMNESS = ["random"] +DATASET = [ + "MOLT-4", + "MOLT-4H", + "Mutagenicity", + "MUTAG", + "NCI1", + "NCI109", + "NCI-H23", + "NCI-H23H", + "OVCAR-8", + "OVCAR-8H", + "P388", + "P388H", + "PC-3", + "PC-3H", + "PTC_FM", + "PTC_FR", + "PTC_MM", + "PTC_MR", + "SF-295", + "SF-295H", + "SN12C", + "SN12CH", + "SW-620", + "SW-620H", + "UACC257", + "UACC257H", + "Yeast", + "YeastH", +] + +# ,'BZR_MD','COX2','COX2_MD','DHFR','DHFR_MD','ER_MD', 'FRANKENSTEIN', 'NCI109','KKI','OHSU','Peking_1','PROTEINS','AIDS'] +VSAS = ["FHRR"] + +with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow( + [ + "dataset", + "dimensions", + "train_time", + "test_time", + "accuracy", + "f1", + "VSA", + ] + ) + +for VSA in VSAS: + for d in DATASET: + acc_final = [] + f1_final = [] + train_final = [] + test_final = [] + + for i in RANDOMNESS: + acc_aux = [] + f1_aux = [] + train_aux = [] + test_aux = [] + for j in range(REPETITIONS): + acc, f1, train_t, test_t = experiment(1, i, d) + acc_aux.append(acc) + f1_aux.append(f1) + train_aux.append(train_t) + test_aux.append(test_t) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + train_final.append(round(sum(train_aux) / REPETITIONS, 2)) + test_final.append(round(sum(test_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerows( + [ + [ + d, + DIM, + train_final[0], + test_final[0], + acc_final[0], + f1_final[0], + VSA, + ] + ] + ) diff --git a/GraphHD_v2/graphhd_list_basic_node_attr_edge.py b/GraphHD_v2/graphhd_list_basic_node_attr_edge.py new file mode 100644 index 00000000..ee5d2435 --- /dev/null +++ b/GraphHD_v2/graphhd_list_basic_node_attr_edge.py @@ -0,0 +1,258 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "node_attr/result" + str(time.time()) + ".csv" +DIM = 10000 +import networkx as nx +from torch_geometric.utils import to_networkx + + +def experiment(randomness=0, embed="random", dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = DIM # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def degree_centrality(data): + G = to_networkx(data) + + scores = nx.degree_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores, scores_nodes + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size, num_node_attr): + super(Encoder, self).__init__() + self.out_features = out_features + self.node_ids = embeddings.Random(size, out_features, vsa=VSA) + self.levels = embeddings.Level(size, out_features, vsa=VSA) + self.node_attr = embeddings.Random(num_node_attr, out_features, vsa=VSA) + self.edge_attr = embeddings.Random(num_node_attr, out_features, vsa=VSA) + + def forward(self, x): + node_id_hvs = self.node_ids.weight[: x.num_nodes] + + def index_value(inner_tensor): + return torch.argmax(inner_tensor) + + if len(x.x[0]) > 0: + indices_tensor = torch.stack( + [index_value(inner_tensor) for inner_tensor in x.x.unbind()] + ) + node_attr = self.node_attr.weight[indices_tensor] + node_id_hvs = torchhd.bind(node_id_hvs, node_attr) + + has_attr = x.edge_attr != None + if has_attr: + edges_tensor = torch.stack( + [index_value(inner_tensor) for inner_tensor in x.edge_attr.unbind()] + ) + edge_attr = self.edge_attr.weight[edges_tensor] + + row, col = to_undirected(x.edge_index) + prev = row[0] + + final_hv = torchhd.empty(1, self.out_features, VSA) + aux_hv = torchhd.identity(1, self.out_features, VSA) + + for idx in range(len(x.edge_index[0])): + i = x.edge_index[0][idx] + j = x.edge_index[1][idx] + if prev == i: + if has_attr: + aux_hv = torchhd.bind( + aux_hv, + torchhd.bind( + torchhd.bind(node_id_hvs[i], node_id_hvs[j]), + edge_attr[idx], + ), + ) + else: + aux_hv = torchhd.bind( + aux_hv, torchhd.bind(node_id_hvs[i], node_id_hvs[j]) + ) + else: + prev = i + final_hv = torchhd.bundle(final_hv, aux_hv) + if has_attr: + aux_hv = torchhd.bind( + torchhd.bind(node_id_hvs[i], node_id_hvs[j]), edge_attr[idx] + ) + else: + aux_hv = torchhd.bind(node_id_hvs[i], node_id_hvs[j]) + + return final_hv[0] + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size, graphs.num_node_features) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes, VSA) + model = model.to(device) + + train_t = time.time() + with torch.no_grad(): + for i, samples in enumerate(tqdm(train_ld, desc="Training")): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + + model.add(samples_hv, samples.y) + + train_t = time.time() - train_t + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + # f1 = torchmetrics.F1Score(num_classes=graphs.num_classes, average='macro', multiclass=True) + f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + test_t = time.time() + with torch.no_grad(): + if VSA != "BSC": + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=False) + + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + + test_t = time.time() - test_t + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f, train_t, test_t + + +REPETITIONS = 1 +RANDOMNESS = ["random"] +DATASET = ["AIDS", "BZR", "COX2", "DHFR", "FRANKENSTEIN"] + +# ,'BZR_MD','COX2','COX2_MD','DHFR','DHFR_MD','ER_MD', 'FRANKENSTEIN', 'NCI109','KKI','OHSU','Peking_1','PROTEINS','AIDS'] +VSAS = ["FHRR"] + + +for VSA in VSAS: + for d in DATASET: + acc_final = [] + f1_final = [] + train_final = [] + test_final = [] + + for i in RANDOMNESS: + acc_aux = [] + f1_aux = [] + train_aux = [] + test_aux = [] + for j in range(REPETITIONS): + acc, f1, train_t, test_t = experiment(1, i, d) + acc_aux.append(acc) + f1_aux.append(f1) + train_aux.append(train_t) + test_aux.append(test_t) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + train_final.append(round(sum(train_aux) / REPETITIONS, 2)) + test_final.append(round(sum(test_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow( + [ + "dataset", + "dimensions", + "train_time", + "test_time", + "accuracy", + "f1", + "VSA", + ] + ) + writer.writerows( + [ + [ + d, + DIM, + train_final[0], + test_final[0], + acc_final[0], + f1_final[0], + VSA, + ] + ] + ) diff --git a/GraphHD_v2/graphhd_list_rank.py b/GraphHD_v2/graphhd_list_rank.py new file mode 100644 index 00000000..dcf08d3a --- /dev/null +++ b/GraphHD_v2/graphhd_list_rank.py @@ -0,0 +1,276 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "list_rank/result" + str(time.time()) + ".csv" +DIM = 10000 +import networkx as nx +from torch_geometric.utils import to_networkx + + +def experiment(randomness=0, embed="random", dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = DIM # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def degree_centrality(data): + G = to_networkx(data) + + scores = nx.degree_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores, scores_nodes + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size, num_node_attr): + super(Encoder, self).__init__() + self.out_features = out_features + self.node_ids = embeddings.Random(size, out_features, vsa=VSA) + self.levels = embeddings.Level(size, out_features, vsa=VSA) + self.node_attr = embeddings.Random(num_node_attr, out_features, vsa=VSA) + + def forward(self, x): + pr = pagerank(x) + pr_sort, pr_argsort = pr.sort() + + node_id_hvs = torchhd.empty(x.num_nodes, self.out_features, VSA) + node_id_hvs[pr_argsort] = self.node_ids.weight[: x.num_nodes] + + row, col = to_undirected(x.edge_index) + if len(row) > 0: + prev = row[0] + + final_hv = torchhd.empty(1, self.out_features, VSA) + aux_hv = torchhd.identity(1, self.out_features, VSA) + + for idx in range(len(x.edge_index[0])): + i = x.edge_index[0][idx] + j = x.edge_index[1][idx] + if prev == i: + aux_hv = torchhd.bind( + aux_hv, torchhd.bind(node_id_hvs[i], node_id_hvs[j]) + ) + else: + prev = i + final_hv = torchhd.bundle(final_hv, aux_hv) + aux_hv = torchhd.bind(node_id_hvs[i], node_id_hvs[j]) + return final_hv[0] + + else: + return torchhd.empty(1, self.out_features, VSA)[0] + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size, graphs.num_node_features) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes, VSA) + model = model.to(device) + + train_t = time.time() + with torch.no_grad(): + for i, samples in enumerate(tqdm(train_ld, desc="Training")): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + + model.add(samples_hv, samples.y) + # break + + train_t = time.time() - train_t + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + f1 = torchmetrics.F1Score( + num_classes=graphs.num_classes, average="macro", multiclass=True + ) + # f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + test_t = time.time() + with torch.no_grad(): + if VSA != "BSC": + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + # break + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=False) + + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + + test_t = time.time() - test_t + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f, train_t, test_t + + +REPETITIONS = 10 +RANDOMNESS = ["random"] +DATASET = [ + "AIDS", + "BZR", + "BZR_MD", + "COX2", + "COX2_MD", + "DHFR", + "DHFR_MD", + "ER_MD", + "FRANKENSTEIN", + "MCF-7", + "MCF-7H", + "MOLT-4", + "MOLT-4H", + "Mutagenicity", + "MUTAG", + "NCI1", + "NCI109", + "NCI-H23", + "NCI-H23H", + "OVCAR-8", + "OVCAR-8H", + "P388", + "P388H", + "PC-3", + "PC-3H", + "PTC_FM", + "PTC_FR", + "PTC_MM", + "PTC_MR", + "SF-295", + "SF-295H", + "SN12C", + "SN12CH", + "SW-620", + "SW-620H", + "UACC257", + "UACC257H", + "Yeast", + "YeastH", +] # ,'BZR_MD','COX2','COX2_MD','DHFR','DHFR_MD','ER_MD', 'FRANKENSTEIN', 'NCI109','KKI','OHSU','Peking_1','PROTEINS','AIDS'] +VSAS = ["FHRR"] + +with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow( + [ + "dataset", + "dimensions", + "train_time", + "test_time", + "accuracy", + "f1", + "VSA", + ] + ) +for VSA in VSAS: + for d in DATASET: + acc_final = [] + f1_final = [] + train_final = [] + test_final = [] + + for i in RANDOMNESS: + acc_aux = [] + f1_aux = [] + train_aux = [] + test_aux = [] + for j in range(REPETITIONS): + acc, f1, train_t, test_t = experiment(1, i, d) + acc_aux.append(acc) + f1_aux.append(f1) + train_aux.append(train_t) + test_aux.append(test_t) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + train_final.append(round(sum(train_aux) / REPETITIONS, 2)) + test_final.append(round(sum(test_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerows( + [ + [ + d, + DIM, + train_final[0], + test_final[0], + acc_final[0], + f1_final[0], + VSA, + ] + ] + ) diff --git a/GraphHD_v2/graphhd_list_rank_node_attr.py b/GraphHD_v2/graphhd_list_rank_node_attr.py new file mode 100644 index 00000000..e07b6d20 --- /dev/null +++ b/GraphHD_v2/graphhd_list_rank_node_attr.py @@ -0,0 +1,318 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "list_rank_node/result" + str(time.time()) + ".csv" +DIM = 10000 +import networkx as nx +from torch_geometric.utils import to_networkx + + +def experiment(randomness=0, embed="random", dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = DIM # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def degree_centrality(data): + G = to_networkx(data) + + scores = nx.degree_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores, scores_nodes + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size, num_node_attr): + super(Encoder, self).__init__() + self.out_features = out_features + self.node_ids = embeddings.Random(size, out_features, vsa=VSA) + self.levels = embeddings.Level(size, out_features, vsa=VSA) + self.node_attr = embeddings.Random(num_node_attr, out_features, vsa=VSA) + + def forward(self, x): + # node_id_hvs = self.node_ids.weight[: x.num_nodes] + pr = pagerank(x) + pr_sort, pr_argsort = pr.sort() + + node_id_hvs = torchhd.empty(x.num_nodes, self.out_features, VSA) + node_id_hvs[pr_argsort] = self.node_ids.weight[: x.num_nodes] + + def index_value(inner_tensor): + return torch.argmax(inner_tensor) + + indices_tensor = torch.stack( + [index_value(inner_tensor) for inner_tensor in x.x.unbind()] + ) + node_attr = self.node_attr.weight[indices_tensor] + node_id_hvs = torchhd.bind(node_id_hvs, node_attr) + + row, col = to_undirected(x.edge_index) + prev = row[0] + + final_hv = torchhd.empty(1, self.out_features, VSA) + aux_hv = torchhd.identity(1, self.out_features, VSA) + + for idx in range(len(x.edge_index[0])): + i = x.edge_index[0][idx] + j = x.edge_index[1][idx] + if prev == i: + aux_hv = torchhd.bind( + aux_hv, torchhd.bind(node_id_hvs[i], node_id_hvs[j]) + ) + else: + prev = i + final_hv = torchhd.bundle(final_hv, aux_hv) + aux_hv = torchhd.identity(1, self.out_features, VSA) + + return final_hv[0] + + def forward_hashmap_label_random(self, x): + node_id_hvs = self.node_ids.weight[: x.num_nodes] + + def index_value(inner_tensor): + return torch.argmax(inner_tensor) + + if len(x.x[0]) > 0: + indices_tensor = torch.stack( + [index_value(inner_tensor) for inner_tensor in x.x.unbind()] + ) + node_attr = self.node_attr.weight[indices_tensor] + node_id_hvs = torchhd.bind(node_id_hvs, node_attr) + + row, col = to_undirected(x.edge_index) + if len(row) > 0: + prev = row[0] + + final_hv = torchhd.empty(1, self.out_features, VSA) + aux_hv = torchhd.identity(1, self.out_features, VSA) + + for idx in range(len(x.edge_index[0])): + i = x.edge_index[0][idx] + j = x.edge_index[1][idx] + if prev == i: + aux_hv = torchhd.bind( + aux_hv, torchhd.bind(node_id_hvs[i], node_id_hvs[j]) + ) + else: + prev = i + final_hv = torchhd.bundle(final_hv, aux_hv) + aux_hv = torchhd.bind(node_id_hvs[i], node_id_hvs[j]) + return final_hv[0] + + else: + return torchhd.empty(1, self.out_features, VSA)[0] + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size, graphs.num_node_features) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes, VSA) + model = model.to(device) + + train_t = time.time() + with torch.no_grad(): + for i, samples in enumerate(tqdm(train_ld, desc="Training")): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + + model.add(samples_hv, samples.y) + # break + + train_t = time.time() - train_t + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + f1 = torchmetrics.F1Score( + num_classes=graphs.num_classes, average="macro", multiclass=True + ) + # f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + test_t = time.time() + with torch.no_grad(): + if VSA != "BSC": + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + # break + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=False) + + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + + test_t = time.time() - test_t + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f, train_t, test_t + + +REPETITIONS = 10 +RANDOMNESS = ["random"] +DATASET = [ + "AIDS", + "BZR", + "BZR_MD", + "COX2", + "COX2_MD", + "DHFR", + "DHFR_MD", + "ER_MD", + "FRANKENSTEIN", + "MCF-7", + "MCF-7H", + "MOLT-4", + "MOLT-4H", + "Mutagenicity", + "MUTAG", + "NCI1", + "NCI109", + "NCI-H23", + "NCI-H23H", + "OVCAR-8", + "OVCAR-8H", + "P388", + "P388H", + "PC-3", + "PC-3H", + "PTC_FM", + "PTC_FR", + "PTC_MM", + "PTC_MR", + "SF-295", + "SF-295H", + "SN12C", + "SN12CH", + "SW-620", + "SW-620H", + "UACC257", + "UACC257H", + "Yeast", + "YeastH", +] # ,'BZR_MD','COX2','COX2_MD','DHFR','DHFR_MD','ER_MD', 'FRANKENSTEIN', 'NCI109','KKI','OHSU','Peking_1','PROTEINS','AIDS'] +VSAS = ["FHRR"] + + +for VSA in VSAS: + for d in DATASET: + acc_final = [] + f1_final = [] + train_final = [] + test_final = [] + + for i in RANDOMNESS: + acc_aux = [] + f1_aux = [] + train_aux = [] + test_aux = [] + for j in range(REPETITIONS): + acc, f1, train_t, test_t = experiment(1, i, d) + acc_aux.append(acc) + f1_aux.append(f1) + train_aux.append(train_t) + test_aux.append(test_t) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + train_final.append(round(sum(train_aux) / REPETITIONS, 2)) + test_final.append(round(sum(test_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow( + [ + "dataset", + "dimensions", + "train_time", + "test_time", + "accuracy", + "f1", + "VSA", + ] + ) + writer.writerows( + [ + [ + d, + DIM, + train_final[0], + test_final[0], + acc_final[0], + f1_final[0], + VSA, + ] + ] + ) diff --git a/GraphHD_v2/graphhd_list_rank_node_attr2.py b/GraphHD_v2/graphhd_list_rank_node_attr2.py new file mode 100644 index 00000000..7a4ebc29 --- /dev/null +++ b/GraphHD_v2/graphhd_list_rank_node_attr2.py @@ -0,0 +1,285 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv + +import time + +csv_file = "list_rank_node2/result" + str(time.time()) + ".csv" +DIM = 10000 +import networkx as nx +from torch_geometric.utils import to_networkx + + +def experiment(randomness=0, embed="random", dataset="MUTAG"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = DIM # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + return v + + def degree_centrality(data): + G = to_networkx(data) + + scores = nx.degree_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores, scores_nodes + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size, num_node_attr): + super(Encoder, self).__init__() + self.out_features = out_features + self.node_ids = embeddings.Random(size, out_features, vsa=VSA) + self.levels = embeddings.Level(size, out_features, vsa=VSA) + self.node_attr = embeddings.Random(num_node_attr, out_features, vsa=VSA) + + def forward(self, x): + # node_id_hvs = self.node_ids.weight[: x.num_nodes] + pr = pagerank(x) + pr_sort, pr_argsort = pr.sort() + + node_id_hvs = torchhd.empty(x.num_nodes, self.out_features, VSA) + node_id_hvs[pr_argsort] = self.node_ids.weight[: x.num_nodes] + + def index_value(inner_tensor): + return torch.argmax(inner_tensor) + + indices_tensor = torch.stack( + [index_value(inner_tensor) for inner_tensor in x.x.unbind()] + ) + node_attr = self.node_attr.weight[indices_tensor] + node_id_hvs = torchhd.bind(node_id_hvs, node_attr) + + row, col = to_undirected(x.edge_index) + if len(row) > 0: + prev = row[0] + + final_hv = torchhd.empty(1, self.out_features, VSA) + aux_hv = torchhd.identity(1, self.out_features, VSA) + + for idx in range(len(x.edge_index[0])): + i = x.edge_index[0][idx] + j = x.edge_index[1][idx] + if prev == i: + aux_hv = torchhd.bind( + aux_hv, torchhd.bind(node_id_hvs[i], node_id_hvs[j]) + ) + else: + prev = i + final_hv = torchhd.bundle(final_hv, aux_hv) + aux_hv = torchhd.bind(node_id_hvs[i], node_id_hvs[j]) + return final_hv[0] + + else: + return torchhd.empty(1, self.out_features, VSA)[0] + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size, graphs.num_node_features) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes, VSA) + model = model.to(device) + + train_t = time.time() + with torch.no_grad(): + for i, samples in enumerate(tqdm(train_ld, desc="Training")): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + + model.add(samples_hv, samples.y) + # break + + train_t = time.time() - train_t + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + f1 = torchmetrics.F1Score( + num_classes=graphs.num_classes, average="macro", multiclass=True + ) + # f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + test_t = time.time() + with torch.no_grad(): + if VSA != "BSC": + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + # break + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=False) + + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + + test_t = time.time() - test_t + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f, train_t, test_t + + +REPETITIONS = 10 +RANDOMNESS = ["random"] +DATASET = [ + "AIDS", + "BZR", + "BZR_MD", + "COX2", + "COX2_MD", + "DHFR", + "DHFR_MD", + "ER_MD", + "FRANKENSTEIN", + "MCF-7", + "MCF-7H", + "MOLT-4", + "MOLT-4H", + "Mutagenicity", + "MUTAG", + "NCI1", + "NCI109", + "NCI-H23", + "NCI-H23H", + "OVCAR-8", + "OVCAR-8H", + "P388", + "P388H", + "PC-3", + "PC-3H", + "PTC_FM", + "PTC_FR", + "PTC_MM", + "PTC_MR", + "SF-295", + "SF-295H", + "SN12C", + "SN12CH", + "SW-620", + "SW-620H", + "UACC257", + "UACC257H", + "Yeast", + "YeastH", +] # ,'BZR_MD','COX2','COX2_MD','DHFR','DHFR_MD','ER_MD', 'FRANKENSTEIN', 'NCI109','KKI','OHSU','Peking_1','PROTEINS','AIDS'] +VSAS = ["FHRR"] + + +for VSA in VSAS: + for d in DATASET: + acc_final = [] + f1_final = [] + train_final = [] + test_final = [] + + for i in RANDOMNESS: + acc_aux = [] + f1_aux = [] + train_aux = [] + test_aux = [] + for j in range(REPETITIONS): + acc, f1, train_t, test_t = experiment(1, i, d) + acc_aux.append(acc) + f1_aux.append(f1) + train_aux.append(train_t) + test_aux.append(test_t) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + train_final.append(round(sum(train_aux) / REPETITIONS, 2)) + test_final.append(round(sum(test_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow( + [ + "dataset", + "dimensions", + "train_time", + "test_time", + "accuracy", + "f1", + "VSA", + ] + ) + writer.writerows( + [ + [ + d, + DIM, + train_final[0], + test_final[0], + acc_final[0], + f1_final[0], + VSA, + ] + ] + ) diff --git a/GraphHD_v2/graphhd_std_centrality.py b/GraphHD_v2/graphhd_std_centrality.py new file mode 100644 index 00000000..d8987243 --- /dev/null +++ b/GraphHD_v2/graphhd_std_centrality.py @@ -0,0 +1,524 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm +from torch_geometric.datasets import TUDataset +from torch_geometric.utils import to_networkx +from torch_geometric.data import DataLoader +from torch_geometric.utils.degree import degree +import networkx as nx + +# Note: this example requires the torch_geometric library: https://pytorch-geometric.readthedocs.io +from torch_geometric.datasets import TUDataset + +# Note: this example requires the torchmetrics library: https://torchmetrics.readthedocs.io +import torchmetrics + +import torchhd +from torchhd import embeddings +from torchhd.models import Centroid +import csv +from torch_geometric.utils import to_networkx +import networkx as nx +import numpy as np +import time + +csv_file = "metrics/result" + str(time.time()) + ".csv" +DIM = 10000 + + +def experiment(randomness=0, embed="random", dataset="MUTAG", metric="page_rank"): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using {} device".format(device)) + + DIMENSIONS = DIM # hypervectors dimension + + # for other available datasets see: https://pytorch-geometric.readthedocs.io/en/latest/notes/data_cheatsheet.html?highlight=tudatasets + # dataset = "MUTAG" + + graphs = TUDataset("../data", dataset) + train_size = int(0.7 * len(graphs)) + test_size = len(graphs) - train_size + train_ld, test_ld = torch.utils.data.random_split(graphs, [train_size, test_size]) + + def sparse_stochastic_graph(G): + """ + Returns a sparse adjacency matrix of the graph G. + The values indicate the probability of leaving a vertex. + This means that each column sums up to one. + """ + rows, columns = G.edge_index + # Calculate the probability for each column + values_per_column = 1.0 / torch.bincount(columns, minlength=G.num_nodes) + values_per_node = values_per_column[columns] + size = (G.num_nodes, G.num_nodes) + return torch.sparse_coo_tensor(G.edge_index, values_per_node, size) + + def centrality(data): + degree_centrality = data.edge_index[0].bincount(minlength=data.num_nodes) + degree_ranked_nodes = sorted( + range(data.num_nodes), key=lambda node: degree_centrality[node] + ) + + def semi_local_centrality(data): + G = nx.Graph() + + for i in range(data.edge_index.size(1)): + edge = tuple(data.edge_index[:, i].tolist()) + G.add_edge(*edge) + + # Calculate semi-local centrality using a custom approach + semi_local_centrality = [] + + for node in G.nodes(): + ego_graph = nx.ego_graph( + G, node, radius=2 + ) # Adjust the radius (2 in this case) + semi_local_centrality.append(len(ego_graph)) + + # Store the semi-local centrality scores in the PyTorch Geometric Data object + data.semi_local_centrality = torch.tensor(semi_local_centrality) + + # Rank nodes based on semi-local centrality + semi_local_ranked_nodes = sorted( + G.nodes(), key=lambda node: semi_local_centrality[node] + ) + return semi_local_ranked_nodes + + def degree_centrality(data): + G = to_networkx(data) + + scores = nx.degree_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def eigen_centrality(data): + G = to_networkx(data) + + scores = nx.eigenvector_centrality(G, max_iter=1000) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def katz_centrality(data): + G = to_networkx(data) + + beta = 0.1 + scores = nx.katz_centrality(G, beta=beta) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def closeness_centrality(data): + G = to_networkx(data) + + scores = nx.closeness_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def incremental_closeness_centrality(data): + G = to_networkx(data) + G = G.to_undirected() + G.add_edges_from(data.edge_index.t().tolist()) + + scores = nx.incremental_closeness_centrality(G, G.edges()) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def current_flow_closeness_centrality(data): + G = to_networkx(data) + G = G.to_undirected() + scores = nx.current_flow_closeness_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def information_centrality(data): + G = to_networkx(data) + G = G.to_undirected() + scores = nx.information_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def betweenness_centrality(data): + G = to_networkx(data) + + scores = nx.betweenness_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def edge_betweenness_centrality(data): + G = to_networkx(data) + + scores = nx.edge_betweenness_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def current_flow_betweeness_centrality(data): + G = to_networkx(data) + G = G.to_undirected() + + scores = nx.current_flow_closeness_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def edge_current_flow_betweeness_centrality(data): + G = to_networkx(data) + G = G.to_undirected() + + scores = nx.edge_current_flow_betweenness_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def communicability_betweeness_centrality(data): + G = to_networkx(data) + G = G.to_undirected() + + scores = nx.communicability_betweenness_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def load_centrality(data): + G = to_networkx(data) + + scores = nx.load_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def edge_load_centrality(data): + G = to_networkx(data) + + scores = nx.edge_load_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def subgraph_centrality(data): + G = to_networkx(data) + G = G.to_undirected() + + scores = nx.subgraph_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def subgraph_centrality_exp(data): + G = to_networkx(data) + G = G.to_undirected() + + scores = nx.subgraph_centrality_exp(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def estrada_index(data): + G = to_networkx(data) + G = G.to_undirected() + + scores = nx.estrada_index(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def harmonic_centrality(data): + G = to_networkx(data) + + scores = nx.harmonic_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def dispersion(data): + G = to_networkx(data) + + scores = nx.dispersion(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def global_reaching_centrality(data): + G = to_networkx(data) + + scores = nx.global_reaching_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def percolation_centrality(data): + G = to_networkx(data) + + scores = nx.percolation_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def second_order_centrality(data): + G = to_networkx(data) + G = G.to_undirected() + + scores = nx.second_order_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def trophic_levels(data): + G = to_networkx(data) + + scores = nx.trophic_levels(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def trophic_differences(data): + G = to_networkx(data) + + scores = nx.trophic_differences(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def trophic_incoherence_parameter(data): + G = to_networkx(data) + + scores = nx.trophic_incoherence_parameter(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def voterank(data): + G = to_networkx(data) + + scores = nx.voterank(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def laplacian_centrality(data): + G = to_networkx(data) + + scores = nx.laplacian_centrality(G) + scores_nodes = sorted(G.nodes(), key=lambda node: scores[node]) + + return scores_nodes + + def pagerank(G, alpha=0.85, max_iter=100, tol=1e-06): + N = G.num_nodes + M = sparse_stochastic_graph(G) * alpha + v = torch.zeros(N, device=G.edge_index.device) + 1 / N + p = torch.zeros(N, device=G.edge_index.device) + 1 / N + for _ in range(max_iter): + v_prev = v + v = M @ v + p * (1 - alpha) + + err = (v - v_prev).abs().sum() + if tol != None and err < N * tol: + return v + + return v + + def to_undirected(edge_index): + """ + Returns the undirected edge_index + [[0, 1], [1, 0]] will result in [[0], [1]] + """ + edge_index = edge_index.sort(dim=0)[0] + edge_index = torch.unique(edge_index, dim=1) + return edge_index + + def min_max_graph_size(graph_dataset): + if len(graph_dataset) == 0: + return None, None + + max_num_nodes = float("-inf") + min_num_nodes = float("inf") + + for G in graph_dataset: + num_nodes = G.num_nodes + max_num_nodes = max(max_num_nodes, num_nodes) + min_num_nodes = min(min_num_nodes, num_nodes) + + return min_num_nodes, max_num_nodes + + class Encoder(nn.Module): + def __init__(self, out_features, size, metric): + super(Encoder, self).__init__() + self.out_features = out_features + self.metric = metric + if embed == "thermometer": + self.node_ids = embeddings.Thermometer(size, out_features, vsa=VSA) + elif embed == "circular": + self.node_ids = embeddings.Circular(size, out_features, vsa=VSA) + elif embed == "projection": + self.node_ids = embeddings.Projection(size, out_features, vsa=VSA) + elif embed == "sinusoid": + self.node_ids = embeddings.Sinusoid(size, out_features, vsa=VSA) + elif embed == "density": + self.node_ids = embeddings.Density(size, out_features, vsa=VSA) + else: + self.node_ids = embeddings.Random(size, out_features, vsa=VSA) + + def forward(self, x): + if metric == "degree_centrality": + order = degree_centrality(x) + elif metric == "eigen_centrality": + order = eigen_centrality(x) + elif metric == "katz_centrality": + order = katz_centrality(x) + elif metric == "closeness_centrality": + order = closeness_centrality(x) + elif metric == "current_flow_closeness_centrality": + order = current_flow_closeness_centrality(x) + elif metric == "information_centrality": + order = information_centrality(x) + elif metric == "betweenness_centrality": + order = betweenness_centrality(x) + elif metric == "current_flow_betweeness_centrality": + order = current_flow_betweeness_centrality(x) + elif metric == "communicability_betweeness_centrality": + order = communicability_betweeness_centrality(x) + elif metric == "load_centrality": + order = load_centrality(x) + elif metric == "subgraph_centrality": + order = subgraph_centrality(x) + elif metric == "subgraph_centrality_exp": + order = subgraph_centrality_exp(x) + elif metric == "harmonic_centrality": + order = harmonic_centrality(x) + elif metric == "second_order_centrality": + order = second_order_centrality(x) + elif metric == "trophic_levels": + order = trophic_levels(x) + elif metric == "laplacian_centrality": + order = laplacian_centrality(x) + elif metric == "none": + order = list(range(x.num_nodes)) + else: + pr = pagerank(x) + pr_sort, order = pr.sort() + + node_id_hvs = torchhd.empty(x.num_nodes, self.out_features, VSA) + node_id_hvs[order] = self.node_ids.weight[: x.num_nodes] + + row, col = to_undirected(x.edge_index) + + hvs = torchhd.bind(node_id_hvs[row], node_id_hvs[col]) + return torchhd.multiset(hvs) + + min_graph_size, max_graph_size = min_max_graph_size(graphs) + encode = Encoder(DIMENSIONS, max_graph_size, metric) + encode = encode.to(device) + + model = Centroid(DIMENSIONS, graphs.num_classes, VSA) + model = model.to(device) + + train_t = time.time() + with torch.no_grad(): + for samples in tqdm(train_ld, desc="Training"): + samples.edge_index = samples.edge_index.to(device) + samples.y = samples.y.to(device) + + samples_hv = encode(samples).unsqueeze(0) + model.add(samples_hv, samples.y) + train_t = time.time() - train_t + accuracy = torchmetrics.Accuracy("multiclass", num_classes=graphs.num_classes) + # f1 = torchmetrics.F1Score(num_classes=graphs.num_classes, average='macro', multiclass=True) + f1 = torchmetrics.F1Score("multiclass", num_classes=graphs.num_classes) + + test_t = time.time() + with torch.no_grad(): + if VSA != "BSC": + model.normalize() + + for samples in tqdm(test_ld, desc="Testing"): + samples.edge_index = samples.edge_index.to(device) + + samples_hv = encode(samples).unsqueeze(0) + outputs = model(samples_hv, dot=True) + accuracy.update(outputs.cpu(), samples.y) + f1.update(outputs.cpu(), samples.y) + test_t = time.time() - test_t + acc = accuracy.compute().item() * 100 + f = f1.compute().item() * 100 + return acc, f, train_t, test_t + + +REPETITIONS = 1 +RANDOMNESS = ["random"] +# DATASET = ["PTC_FM", "MUTAG", "NCI1", "ENZYMES", "PROTEINS", "DD"] +METRICS = [ + "none", + "page_rank", + "degree_centrality", + "closeness_centrality", + "betweenness_centrality", + "load_centrality", + "subgraph_centrality", + "subgraph_centrality_exp", + "harmonic_centrality", +] +DATASET = ["PTC_FM", "MUTAG", "NCI1", "ENZYMES", "PROTEINS", "DD"] +# VSAS = ["BSC", "MAP", "HRR", "FHRR"] +VSAS = ["FHRR"] + + +for VSA in VSAS: + for d in DATASET: + for METRIC in METRICS: + acc_final = [] + f1_final = [] + train_final = [] + test_final = [] + for i in RANDOMNESS: + acc_aux = [] + f1_aux = [] + train_aux = [] + test_aux = [] + for j in range(REPETITIONS): + acc, f1, train_t, test_t = experiment(1, i, d, METRIC) + acc_aux.append(acc) + f1_aux.append(f1) + train_aux.append(train_t) + test_aux.append(test_t) + acc_final.append(round(sum(acc_aux) / REPETITIONS, 2)) + f1_final.append(round(sum(f1_aux) / REPETITIONS, 2)) + train_final.append(round(sum(train_aux) / REPETITIONS, 2)) + test_final.append(round(sum(test_aux) / REPETITIONS, 2)) + + with open(csv_file, mode="a", newline="") as file: + writer = csv.writer(file) + writer.writerow( + [ + "dataset", + "dimensions", + "train_time", + "test_time", + "accuracy", + "f1", + "VSA", + "metric", + ] + ) + writer.writerows( + [ + [ + d, + DIM, + train_final[0], + test_final[0], + acc_final[0], + f1_final[0], + VSA, + METRIC, + ] + ] + ) diff --git a/GraphHD_v2/level/result1699578347.486874.csv b/GraphHD_v2/level/result1699578347.486874.csv new file mode 100644 index 00000000..da7e55a6 --- /dev/null +++ b/GraphHD_v2/level/result1699578347.486874.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.04,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699578431.389778.csv b/GraphHD_v2/level/result1699578431.389778.csv new file mode 100644 index 00000000..c79187db --- /dev/null +++ b/GraphHD_v2/level/result1699578431.389778.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.02,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699578442.387155.csv b/GraphHD_v2/level/result1699578442.387155.csv new file mode 100644 index 00000000..c79187db --- /dev/null +++ b/GraphHD_v2/level/result1699578442.387155.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.02,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699578688.829068.csv b/GraphHD_v2/level/result1699578688.829068.csv new file mode 100644 index 00000000..da7e55a6 --- /dev/null +++ b/GraphHD_v2/level/result1699578688.829068.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.04,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699578728.4626272.csv b/GraphHD_v2/level/result1699578728.4626272.csv new file mode 100644 index 00000000..a9023447 --- /dev/null +++ b/GraphHD_v2/level/result1699578728.4626272.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.01,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699578747.986776.csv b/GraphHD_v2/level/result1699578747.986776.csv new file mode 100644 index 00000000..c79187db --- /dev/null +++ b/GraphHD_v2/level/result1699578747.986776.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.02,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699578761.568224.csv b/GraphHD_v2/level/result1699578761.568224.csv new file mode 100644 index 00000000..c79187db --- /dev/null +++ b/GraphHD_v2/level/result1699578761.568224.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.02,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699578833.9393332.csv b/GraphHD_v2/level/result1699578833.9393332.csv new file mode 100644 index 00000000..ad73dd2a --- /dev/null +++ b/GraphHD_v2/level/result1699578833.9393332.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.03,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699578850.797598.csv b/GraphHD_v2/level/result1699578850.797598.csv new file mode 100644 index 00000000..81ce6711 --- /dev/null +++ b/GraphHD_v2/level/result1699578850.797598.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.02,0.51,15.0,15.0,FHRR diff --git a/GraphHD_v2/level/result1699578858.695429.csv b/GraphHD_v2/level/result1699578858.695429.csv new file mode 100644 index 00000000..07352c54 --- /dev/null +++ b/GraphHD_v2/level/result1699578858.695429.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.92,0.58,19.44,19.44,FHRR diff --git a/GraphHD_v2/level/result1699578903.8540978.csv b/GraphHD_v2/level/result1699578903.8540978.csv new file mode 100644 index 00000000..613f7280 --- /dev/null +++ b/GraphHD_v2/level/result1699578903.8540978.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.01,0.44,21.67,21.67,FHRR diff --git a/GraphHD_v2/level/result1699578917.0315242.csv b/GraphHD_v2/level/result1699578917.0315242.csv new file mode 100644 index 00000000..ac6f275f --- /dev/null +++ b/GraphHD_v2/level/result1699578917.0315242.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.01,0.55,21.67,21.67,FHRR diff --git a/GraphHD_v2/level/result1699578933.621065.csv b/GraphHD_v2/level/result1699578933.621065.csv new file mode 100644 index 00000000..aff9c7a9 --- /dev/null +++ b/GraphHD_v2/level/result1699578933.621065.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.05,0.58,19.44,19.44,FHRR diff --git a/GraphHD_v2/level/result1699578953.1394298.csv b/GraphHD_v2/level/result1699578953.1394298.csv new file mode 100644 index 00000000..01bdacef --- /dev/null +++ b/GraphHD_v2/level/result1699578953.1394298.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.92,0.47,22.22,22.22,FHRR diff --git a/GraphHD_v2/level/result1699579418.071488.csv b/GraphHD_v2/level/result1699579418.071488.csv new file mode 100644 index 00000000..da7e55a6 --- /dev/null +++ b/GraphHD_v2/level/result1699579418.071488.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.04,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699579455.586331.csv b/GraphHD_v2/level/result1699579455.586331.csv new file mode 100644 index 00000000..c79187db --- /dev/null +++ b/GraphHD_v2/level/result1699579455.586331.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.02,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699579470.344507.csv b/GraphHD_v2/level/result1699579470.344507.csv new file mode 100644 index 00000000..f6367790 --- /dev/null +++ b/GraphHD_v2/level/result1699579470.344507.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.1,0.53,22.22,22.22,FHRR diff --git a/GraphHD_v2/level/result1699579491.3810759.csv b/GraphHD_v2/level/result1699579491.3810759.csv new file mode 100644 index 00000000..1ab2c1df --- /dev/null +++ b/GraphHD_v2/level/result1699579491.3810759.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.36,0.16,52.38,52.38,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.18,0.09,87.72,87.72,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,6.98,3.3,59.94,59.94,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.29,0.48,22.22,22.22,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,2.08,1.09,47.9,47.9,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DD,10000,17.81,9.1,59.89,59.89,FHRR diff --git a/GraphHD_v2/level/result1699579896.063437.csv b/GraphHD_v2/level/result1699579896.063437.csv new file mode 100644 index 00000000..824195ac --- /dev/null +++ b/GraphHD_v2/level/result1699579896.063437.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.33,0.18,53.33,53.33,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.26,0.1,61.4,61.4,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,6.5,3.45,53.04,53.04,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.34,0.6,13.33,13.33,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,2.45,1.01,52.99,52.99,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DD,10000,20.37,7.08,53.95,53.95,FHRR diff --git a/GraphHD_v2/level/result1699579972.305013.csv b/GraphHD_v2/level/result1699579972.305013.csv new file mode 100644 index 00000000..211bb9f2 --- /dev/null +++ b/GraphHD_v2/level/result1699579972.305013.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.31,0.15,62.86,62.86,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.18,0.09,78.95,78.95,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,5.49,3.47,63.02,63.02,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.16,0.68,21.11,21.11,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,3.17,1.02,60.18,60.18,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DD,10000,20.93,9.4,50.85,50.85,FHRR diff --git a/GraphHD_v2/level/result1699580103.8959098.csv b/GraphHD_v2/level/result1699580103.8959098.csv new file mode 100644 index 00000000..a9db3193 --- /dev/null +++ b/GraphHD_v2/level/result1699580103.8959098.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.27,0.14,50.48,50.48,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.16,0.08,75.44,75.44,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,5.33,2.38,56.2,56.2,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.85,0.44,17.22,17.22,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,2.0,0.88,56.29,56.29,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DD,10000,18.22,9.78,50.0,50.0,FHRR diff --git a/GraphHD_v2/level/result1699580421.0602582.csv b/GraphHD_v2/level/result1699580421.0602582.csv new file mode 100644 index 00000000..cfdd29e7 --- /dev/null +++ b/GraphHD_v2/level/result1699580421.0602582.csv @@ -0,0 +1,4 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.44,0.18,56.19,56.19,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.19,0.1,82.46,82.46,FHRR diff --git a/GraphHD_v2/level/result1699580495.8834088.csv b/GraphHD_v2/level/result1699580495.8834088.csv new file mode 100644 index 00000000..8e02c286 --- /dev/null +++ b/GraphHD_v2/level/result1699580495.8834088.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.02,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,0.01,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.01,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DD,10000,0.27,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699580505.991956.csv b/GraphHD_v2/level/result1699580505.991956.csv new file mode 100644 index 00000000..068a26ae --- /dev/null +++ b/GraphHD_v2/level/result1699580505.991956.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.01,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DD,10000,0.01,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699580536.307938.csv b/GraphHD_v2/level/result1699580536.307938.csv new file mode 100644 index 00000000..a9023447 --- /dev/null +++ b/GraphHD_v2/level/result1699580536.307938.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.01,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699580553.68577.csv b/GraphHD_v2/level/result1699580553.68577.csv new file mode 100644 index 00000000..3500f45f --- /dev/null +++ b/GraphHD_v2/level/result1699580553.68577.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.16,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699580681.258789.csv b/GraphHD_v2/level/result1699580681.258789.csv new file mode 100644 index 00000000..8f9ae2e2 --- /dev/null +++ b/GraphHD_v2/level/result1699580681.258789.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.11,0.44,15.0,15.0,FHRR diff --git a/GraphHD_v2/level/result1699580714.969506.csv b/GraphHD_v2/level/result1699580714.969506.csv new file mode 100644 index 00000000..bdbf990d --- /dev/null +++ b/GraphHD_v2/level/result1699580714.969506.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.07,0.52,22.22,22.22,FHRR diff --git a/GraphHD_v2/level/result1699580732.277983.csv b/GraphHD_v2/level/result1699580732.277983.csv new file mode 100644 index 00000000..08de8ad9 --- /dev/null +++ b/GraphHD_v2/level/result1699580732.277983.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.08,0.63,16.67,16.67,FHRR diff --git a/GraphHD_v2/level/result1699580823.903604.csv b/GraphHD_v2/level/result1699580823.903604.csv new file mode 100644 index 00000000..3b72e7aa --- /dev/null +++ b/GraphHD_v2/level/result1699580823.903604.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.09,0.57,23.33,23.33,FHRR diff --git a/GraphHD_v2/level/result1699580855.909131.csv b/GraphHD_v2/level/result1699580855.909131.csv new file mode 100644 index 00000000..74498eaf --- /dev/null +++ b/GraphHD_v2/level/result1699580855.909131.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.08,0.53,14.44,14.44,FHRR diff --git a/GraphHD_v2/level/result1699580868.360738.csv b/GraphHD_v2/level/result1699580868.360738.csv new file mode 100644 index 00000000..346de80c --- /dev/null +++ b/GraphHD_v2/level/result1699580868.360738.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.1,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699580886.5125299.csv b/GraphHD_v2/level/result1699580886.5125299.csv new file mode 100644 index 00000000..346de80c --- /dev/null +++ b/GraphHD_v2/level/result1699580886.5125299.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.1,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699580900.067785.csv b/GraphHD_v2/level/result1699580900.067785.csv new file mode 100644 index 00000000..287ec971 --- /dev/null +++ b/GraphHD_v2/level/result1699580900.067785.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.34,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699580907.976021.csv b/GraphHD_v2/level/result1699580907.976021.csv new file mode 100644 index 00000000..e6f0836b --- /dev/null +++ b/GraphHD_v2/level/result1699580907.976021.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.61,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699580917.753597.csv b/GraphHD_v2/level/result1699580917.753597.csv new file mode 100644 index 00000000..5e6f0ba7 --- /dev/null +++ b/GraphHD_v2/level/result1699580917.753597.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.12,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699580928.033039.csv b/GraphHD_v2/level/result1699580928.033039.csv new file mode 100644 index 00000000..c8cf342f --- /dev/null +++ b/GraphHD_v2/level/result1699580928.033039.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.26,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699580963.631805.csv b/GraphHD_v2/level/result1699580963.631805.csv new file mode 100644 index 00000000..7d58a9bf --- /dev/null +++ b/GraphHD_v2/level/result1699580963.631805.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.34,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699580978.174963.csv b/GraphHD_v2/level/result1699580978.174963.csv new file mode 100644 index 00000000..ecec8fa4 --- /dev/null +++ b/GraphHD_v2/level/result1699580978.174963.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.22,0.44,16.67,16.67,FHRR diff --git a/GraphHD_v2/level/result1699581310.901495.csv b/GraphHD_v2/level/result1699581310.901495.csv new file mode 100644 index 00000000..8dfd7d14 --- /dev/null +++ b/GraphHD_v2/level/result1699581310.901495.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.44,0.5,17.22,17.22,FHRR diff --git a/GraphHD_v2/level/result1699592495.3850791.csv b/GraphHD_v2/level/result1699592495.3850791.csv new file mode 100644 index 00000000..32082031 --- /dev/null +++ b/GraphHD_v2/level/result1699592495.3850791.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.42,0.45,16.11,16.11,FHRR diff --git a/GraphHD_v2/level/result1699592621.924447.csv b/GraphHD_v2/level/result1699592621.924447.csv new file mode 100644 index 00000000..8f38d3c0 --- /dev/null +++ b/GraphHD_v2/level/result1699592621.924447.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,1.42,0.52,21.11,21.11,FHRR diff --git a/GraphHD_v2/level/result1699592645.672791.csv b/GraphHD_v2/level/result1699592645.672791.csv new file mode 100644 index 00000000..fa02ef37 --- /dev/null +++ b/GraphHD_v2/level/result1699592645.672791.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.12,0.54,20.0,20.0,FHRR diff --git a/GraphHD_v2/level/result1699592736.11471.csv b/GraphHD_v2/level/result1699592736.11471.csv new file mode 100644 index 00000000..b3baa607 --- /dev/null +++ b/GraphHD_v2/level/result1699592736.11471.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.12,0.45,14.44,14.44,FHRR diff --git a/GraphHD_v2/level/result1699592777.374393.csv b/GraphHD_v2/level/result1699592777.374393.csv new file mode 100644 index 00000000..e89333a4 --- /dev/null +++ b/GraphHD_v2/level/result1699592777.374393.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.12,0.49,15.0,15.0,FHRR diff --git a/GraphHD_v2/level/result1699592951.021018.csv b/GraphHD_v2/level/result1699592951.021018.csv new file mode 100644 index 00000000..da7e55a6 --- /dev/null +++ b/GraphHD_v2/level/result1699592951.021018.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.04,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699593047.9138718.csv b/GraphHD_v2/level/result1699593047.9138718.csv new file mode 100644 index 00000000..82560db0 --- /dev/null +++ b/GraphHD_v2/level/result1699593047.9138718.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.05,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699593308.02498.csv b/GraphHD_v2/level/result1699593308.02498.csv new file mode 100644 index 00000000..da7e55a6 --- /dev/null +++ b/GraphHD_v2/level/result1699593308.02498.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.04,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699593330.139117.csv b/GraphHD_v2/level/result1699593330.139117.csv new file mode 100644 index 00000000..82560db0 --- /dev/null +++ b/GraphHD_v2/level/result1699593330.139117.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.05,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699593344.0682318.csv b/GraphHD_v2/level/result1699593344.0682318.csv new file mode 100644 index 00000000..194d5740 --- /dev/null +++ b/GraphHD_v2/level/result1699593344.0682318.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.06,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699593405.913769.csv b/GraphHD_v2/level/result1699593405.913769.csv new file mode 100644 index 00000000..da7e55a6 --- /dev/null +++ b/GraphHD_v2/level/result1699593405.913769.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.04,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699593425.931901.csv b/GraphHD_v2/level/result1699593425.931901.csv new file mode 100644 index 00000000..8fd144c6 --- /dev/null +++ b/GraphHD_v2/level/result1699593425.931901.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.12,0.53,21.11,21.11,FHRR diff --git a/GraphHD_v2/level/result1699593509.023078.csv b/GraphHD_v2/level/result1699593509.023078.csv new file mode 100644 index 00000000..856afd10 --- /dev/null +++ b/GraphHD_v2/level/result1699593509.023078.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.09,0.55,17.78,17.78,FHRR diff --git a/GraphHD_v2/level/result1699593545.857541.csv b/GraphHD_v2/level/result1699593545.857541.csv new file mode 100644 index 00000000..a2b5bfa9 --- /dev/null +++ b/GraphHD_v2/level/result1699593545.857541.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.14,0.66,12.78,12.78,FHRR diff --git a/GraphHD_v2/level/result1699593559.659127.csv b/GraphHD_v2/level/result1699593559.659127.csv new file mode 100644 index 00000000..3d6f7087 --- /dev/null +++ b/GraphHD_v2/level/result1699593559.659127.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.11,0.57,16.67,16.67,FHRR diff --git a/GraphHD_v2/level/result1699593584.1996732.csv b/GraphHD_v2/level/result1699593584.1996732.csv new file mode 100644 index 00000000..7e880c13 --- /dev/null +++ b/GraphHD_v2/level/result1699593584.1996732.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.14,0.5,15.56,15.56,FHRR diff --git a/GraphHD_v2/level/result1699593607.548141.csv b/GraphHD_v2/level/result1699593607.548141.csv new file mode 100644 index 00000000..7e0f0138 --- /dev/null +++ b/GraphHD_v2/level/result1699593607.548141.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.12,0.68,13.33,13.33,FHRR diff --git a/GraphHD_v2/level/result1699593722.7090151.csv b/GraphHD_v2/level/result1699593722.7090151.csv new file mode 100644 index 00000000..32c9b0d0 --- /dev/null +++ b/GraphHD_v2/level/result1699593722.7090151.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.1,0.47,15.56,15.56,FHRR diff --git a/GraphHD_v2/level/result1699593776.498823.csv b/GraphHD_v2/level/result1699593776.498823.csv new file mode 100644 index 00000000..194d5740 --- /dev/null +++ b/GraphHD_v2/level/result1699593776.498823.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.06,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699593798.7362618.csv b/GraphHD_v2/level/result1699593798.7362618.csv new file mode 100644 index 00000000..194d5740 --- /dev/null +++ b/GraphHD_v2/level/result1699593798.7362618.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.06,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699593817.412232.csv b/GraphHD_v2/level/result1699593817.412232.csv new file mode 100644 index 00000000..194d5740 --- /dev/null +++ b/GraphHD_v2/level/result1699593817.412232.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.06,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699593833.837172.csv b/GraphHD_v2/level/result1699593833.837172.csv new file mode 100644 index 00000000..82560db0 --- /dev/null +++ b/GraphHD_v2/level/result1699593833.837172.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.05,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699593850.161013.csv b/GraphHD_v2/level/result1699593850.161013.csv new file mode 100644 index 00000000..da7e55a6 --- /dev/null +++ b/GraphHD_v2/level/result1699593850.161013.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.04,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699593902.5936582.csv b/GraphHD_v2/level/result1699593902.5936582.csv new file mode 100644 index 00000000..da7e55a6 --- /dev/null +++ b/GraphHD_v2/level/result1699593902.5936582.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.04,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699593928.189329.csv b/GraphHD_v2/level/result1699593928.189329.csv new file mode 100644 index 00000000..82560db0 --- /dev/null +++ b/GraphHD_v2/level/result1699593928.189329.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.05,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699593938.546769.csv b/GraphHD_v2/level/result1699593938.546769.csv new file mode 100644 index 00000000..ad73dd2a --- /dev/null +++ b/GraphHD_v2/level/result1699593938.546769.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.03,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699593957.508517.csv b/GraphHD_v2/level/result1699593957.508517.csv new file mode 100644 index 00000000..67f6ed77 --- /dev/null +++ b/GraphHD_v2/level/result1699593957.508517.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.14,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699593977.545095.csv b/GraphHD_v2/level/result1699593977.545095.csv new file mode 100644 index 00000000..ccef7545 --- /dev/null +++ b/GraphHD_v2/level/result1699593977.545095.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.17,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699593984.275619.csv b/GraphHD_v2/level/result1699593984.275619.csv new file mode 100644 index 00000000..82560db0 --- /dev/null +++ b/GraphHD_v2/level/result1699593984.275619.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.05,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699594015.327884.csv b/GraphHD_v2/level/result1699594015.327884.csv new file mode 100644 index 00000000..cdd30916 --- /dev/null +++ b/GraphHD_v2/level/result1699594015.327884.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,3.67,1.55,26.11,26.11,FHRR diff --git a/GraphHD_v2/level/result1699594041.001162.csv b/GraphHD_v2/level/result1699594041.001162.csv new file mode 100644 index 00000000..c7305da2 --- /dev/null +++ b/GraphHD_v2/level/result1699594041.001162.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,2.49,1.17,25.0,25.0,FHRR diff --git a/GraphHD_v2/level/result1699594048.0195122.csv b/GraphHD_v2/level/result1699594048.0195122.csv new file mode 100644 index 00000000..dee32089 --- /dev/null +++ b/GraphHD_v2/level/result1699594048.0195122.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,2.25,1.0,23.33,23.33,FHRR diff --git a/GraphHD_v2/level/result1699595171.086411.csv b/GraphHD_v2/level/result1699595171.086411.csv new file mode 100644 index 00000000..82560db0 --- /dev/null +++ b/GraphHD_v2/level/result1699595171.086411.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.05,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699595182.878655.csv b/GraphHD_v2/level/result1699595182.878655.csv new file mode 100644 index 00000000..c79187db --- /dev/null +++ b/GraphHD_v2/level/result1699595182.878655.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.02,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699595229.028234.csv b/GraphHD_v2/level/result1699595229.028234.csv new file mode 100644 index 00000000..c79187db --- /dev/null +++ b/GraphHD_v2/level/result1699595229.028234.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.02,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699595429.294216.csv b/GraphHD_v2/level/result1699595429.294216.csv new file mode 100644 index 00000000..ad73dd2a --- /dev/null +++ b/GraphHD_v2/level/result1699595429.294216.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.03,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699595439.047681.csv b/GraphHD_v2/level/result1699595439.047681.csv new file mode 100644 index 00000000..c79187db --- /dev/null +++ b/GraphHD_v2/level/result1699595439.047681.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.02,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699595472.576586.csv b/GraphHD_v2/level/result1699595472.576586.csv new file mode 100644 index 00000000..ad73dd2a --- /dev/null +++ b/GraphHD_v2/level/result1699595472.576586.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.03,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699595482.579198.csv b/GraphHD_v2/level/result1699595482.579198.csv new file mode 100644 index 00000000..ad73dd2a --- /dev/null +++ b/GraphHD_v2/level/result1699595482.579198.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.03,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699595524.909657.csv b/GraphHD_v2/level/result1699595524.909657.csv new file mode 100644 index 00000000..ad73dd2a --- /dev/null +++ b/GraphHD_v2/level/result1699595524.909657.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.03,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699596259.9128609.csv b/GraphHD_v2/level/result1699596259.9128609.csv new file mode 100644 index 00000000..ad73dd2a --- /dev/null +++ b/GraphHD_v2/level/result1699596259.9128609.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.03,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699596287.1179352.csv b/GraphHD_v2/level/result1699596287.1179352.csv new file mode 100644 index 00000000..da7e55a6 --- /dev/null +++ b/GraphHD_v2/level/result1699596287.1179352.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.04,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699596316.49761.csv b/GraphHD_v2/level/result1699596316.49761.csv new file mode 100644 index 00000000..c79187db --- /dev/null +++ b/GraphHD_v2/level/result1699596316.49761.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.02,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699596323.287065.csv b/GraphHD_v2/level/result1699596323.287065.csv new file mode 100644 index 00000000..da7e55a6 --- /dev/null +++ b/GraphHD_v2/level/result1699596323.287065.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.04,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699596353.29406.csv b/GraphHD_v2/level/result1699596353.29406.csv new file mode 100644 index 00000000..da7e55a6 --- /dev/null +++ b/GraphHD_v2/level/result1699596353.29406.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.04,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699596388.388041.csv b/GraphHD_v2/level/result1699596388.388041.csv new file mode 100644 index 00000000..ad73dd2a --- /dev/null +++ b/GraphHD_v2/level/result1699596388.388041.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.03,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699596577.030624.csv b/GraphHD_v2/level/result1699596577.030624.csv new file mode 100644 index 00000000..ad73dd2a --- /dev/null +++ b/GraphHD_v2/level/result1699596577.030624.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.03,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699596582.7052438.csv b/GraphHD_v2/level/result1699596582.7052438.csv new file mode 100644 index 00000000..a9023447 --- /dev/null +++ b/GraphHD_v2/level/result1699596582.7052438.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ENZYMES,10000,0.01,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699738121.505389.csv b/GraphHD_v2/level/result1699738121.505389.csv new file mode 100644 index 00000000..9390f178 --- /dev/null +++ b/GraphHD_v2/level/result1699738121.505389.csv @@ -0,0 +1,10 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.01,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR_MD,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2_MD,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR,10000,0.0,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699738295.678188.csv b/GraphHD_v2/level/result1699738295.678188.csv new file mode 100644 index 00000000..916aebbd --- /dev/null +++ b/GraphHD_v2/level/result1699738295.678188.csv @@ -0,0 +1,28 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.01,0.01,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR_MD,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2_MD,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR_MD,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ER_MD,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +FRANKENSTEIN,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI109,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +KKI,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +OHSU,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +Peking_1,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PROTEINS,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +AIDS,10000,0.0,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699762784.2433422.csv b/GraphHD_v2/level/result1699762784.2433422.csv new file mode 100644 index 00000000..15e23756 --- /dev/null +++ b/GraphHD_v2/level/result1699762784.2433422.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.01,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699762804.07495.csv b/GraphHD_v2/level/result1699762804.07495.csv new file mode 100644 index 00000000..31834fe8 --- /dev/null +++ b/GraphHD_v2/level/result1699762804.07495.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,1.05,0.51,79.51,79.51,FHRR diff --git a/GraphHD_v2/level/result1699762846.623924.csv b/GraphHD_v2/level/result1699762846.623924.csv new file mode 100644 index 00000000..47a7bd6f --- /dev/null +++ b/GraphHD_v2/level/result1699762846.623924.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,1.32,0.76,76.89,76.89,FHRR diff --git a/GraphHD_v2/level/result1699762925.826139.csv b/GraphHD_v2/level/result1699762925.826139.csv new file mode 100644 index 00000000..0bee8cb2 --- /dev/null +++ b/GraphHD_v2/level/result1699762925.826139.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.71,0.35,80.41,80.41,FHRR diff --git a/GraphHD_v2/level/result1699763155.7063282.csv b/GraphHD_v2/level/result1699763155.7063282.csv new file mode 100644 index 00000000..15e23756 --- /dev/null +++ b/GraphHD_v2/level/result1699763155.7063282.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.01,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699763186.573164.csv b/GraphHD_v2/level/result1699763186.573164.csv new file mode 100644 index 00000000..0d96f29d --- /dev/null +++ b/GraphHD_v2/level/result1699763186.573164.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.03,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699763819.651474.csv b/GraphHD_v2/level/result1699763819.651474.csv new file mode 100644 index 00000000..abb511e7 --- /dev/null +++ b/GraphHD_v2/level/result1699763819.651474.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.02,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699763839.2594318.csv b/GraphHD_v2/level/result1699763839.2594318.csv new file mode 100644 index 00000000..0d96f29d --- /dev/null +++ b/GraphHD_v2/level/result1699763839.2594318.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.03,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699763951.6192908.csv b/GraphHD_v2/level/result1699763951.6192908.csv new file mode 100644 index 00000000..0d96f29d --- /dev/null +++ b/GraphHD_v2/level/result1699763951.6192908.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.03,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699764147.784245.csv b/GraphHD_v2/level/result1699764147.784245.csv new file mode 100644 index 00000000..0d96f29d --- /dev/null +++ b/GraphHD_v2/level/result1699764147.784245.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.03,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699764159.788817.csv b/GraphHD_v2/level/result1699764159.788817.csv new file mode 100644 index 00000000..15e23756 --- /dev/null +++ b/GraphHD_v2/level/result1699764159.788817.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.01,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699764170.1761541.csv b/GraphHD_v2/level/result1699764170.1761541.csv new file mode 100644 index 00000000..abb511e7 --- /dev/null +++ b/GraphHD_v2/level/result1699764170.1761541.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.02,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699764220.84833.csv b/GraphHD_v2/level/result1699764220.84833.csv new file mode 100644 index 00000000..abb511e7 --- /dev/null +++ b/GraphHD_v2/level/result1699764220.84833.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.02,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699764305.088311.csv b/GraphHD_v2/level/result1699764305.088311.csv new file mode 100644 index 00000000..d5faba8d --- /dev/null +++ b/GraphHD_v2/level/result1699764305.088311.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.79,0.38,63.52,63.52,FHRR diff --git a/GraphHD_v2/level/result1699764392.114508.csv b/GraphHD_v2/level/result1699764392.114508.csv new file mode 100644 index 00000000..95aa55f6 --- /dev/null +++ b/GraphHD_v2/level/result1699764392.114508.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.72,0.35,79.51,79.51,FHRR diff --git a/GraphHD_v2/level/result1699764460.891639.csv b/GraphHD_v2/level/result1699764460.891639.csv new file mode 100644 index 00000000..9ca80773 --- /dev/null +++ b/GraphHD_v2/level/result1699764460.891639.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.89,0.42,80.25,80.25,FHRR diff --git a/GraphHD_v2/level/result1699764552.618688.csv b/GraphHD_v2/level/result1699764552.618688.csv new file mode 100644 index 00000000..76d312d2 --- /dev/null +++ b/GraphHD_v2/level/result1699764552.618688.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.88,0.42,79.26,79.26,FHRR diff --git a/GraphHD_v2/level/result1699764626.042729.csv b/GraphHD_v2/level/result1699764626.042729.csv new file mode 100644 index 00000000..e75e10fd --- /dev/null +++ b/GraphHD_v2/level/result1699764626.042729.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.93,0.48,82.13,82.13,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR_MD,10000,3.88,1.67,53.91,53.91,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2,10000,1.15,0.54,67.66,67.66,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2_MD,10000,5.49,2.39,55.16,55.16,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR,10000,1.92,0.89,71.54,71.54,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR_MD,10000,5.99,2.67,55.76,55.76,FHRR diff --git a/GraphHD_v2/level/result1699765161.837012.csv b/GraphHD_v2/level/result1699765161.837012.csv new file mode 100644 index 00000000..880c49a4 --- /dev/null +++ b/GraphHD_v2/level/result1699765161.837012.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.89,0.43,81.72,81.72,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR_MD,10000,3.79,1.67,55.11,55.11,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2,10000,1.16,0.55,68.44,68.44,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2_MD,10000,5.57,2.41,52.36,52.36,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR,10000,1.93,0.91,73.61,73.61,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR_MD,10000,6.04,2.63,53.98,53.98,FHRR diff --git a/GraphHD_v2/level/result1699765853.11426.csv b/GraphHD_v2/level/result1699765853.11426.csv new file mode 100644 index 00000000..314d03bc --- /dev/null +++ b/GraphHD_v2/level/result1699765853.11426.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.8,0.36,79.14,79.14,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR_MD,10000,3.51,1.55,56.41,56.41,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2,10000,0.9,0.44,67.94,67.94,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2_MD,10000,5.25,2.3,51.32,51.32,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR,10000,1.53,0.72,71.94,71.94,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR_MD,10000,5.67,2.45,59.87,59.87,FHRR diff --git a/GraphHD_v2/level/result1699894465.8592472.csv b/GraphHD_v2/level/result1699894465.8592472.csv new file mode 100644 index 00000000..d287dcae --- /dev/null +++ b/GraphHD_v2/level/result1699894465.8592472.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,1.0,0.48,77.7,77.7,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR_MD,10000,3.79,1.61,56.2,56.2,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2,10000,1.24,0.57,69.79,69.79,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2_MD,10000,6.75,2.74,53.08,53.08,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR,10000,2.38,1.18,70.93,70.93,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR_MD,10000,6.44,2.79,56.95,56.95,FHRR diff --git a/GraphHD_v2/level/result1699895812.510516.csv b/GraphHD_v2/level/result1699895812.510516.csv new file mode 100644 index 00000000..7629e12a --- /dev/null +++ b/GraphHD_v2/level/result1699895812.510516.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +AIDS,10000,0.03,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699895929.36012.csv b/GraphHD_v2/level/result1699895929.36012.csv new file mode 100644 index 00000000..7629e12a --- /dev/null +++ b/GraphHD_v2/level/result1699895929.36012.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +AIDS,10000,0.03,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699896021.45926.csv b/GraphHD_v2/level/result1699896021.45926.csv new file mode 100644 index 00000000..4be8f38f --- /dev/null +++ b/GraphHD_v2/level/result1699896021.45926.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +AIDS,10000,0.02,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699896058.510886.csv b/GraphHD_v2/level/result1699896058.510886.csv new file mode 100644 index 00000000..7629e12a --- /dev/null +++ b/GraphHD_v2/level/result1699896058.510886.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +AIDS,10000,0.03,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699896171.076137.csv b/GraphHD_v2/level/result1699896171.076137.csv new file mode 100644 index 00000000..d568b9e5 --- /dev/null +++ b/GraphHD_v2/level/result1699896171.076137.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +AIDS,10000,0.01,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699896229.802478.csv b/GraphHD_v2/level/result1699896229.802478.csv new file mode 100644 index 00000000..677c636a --- /dev/null +++ b/GraphHD_v2/level/result1699896229.802478.csv @@ -0,0 +1,58 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +AIDS,10000,0.2,0.03,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.01,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR_MD,10000,0.07,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2,10000,0.01,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2_MD,10000,0.1,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR_MD,10000,0.04,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +ER_MD,10000,0.01,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +FRANKENSTEIN,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MCF-7,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MCF-7H,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MOLT-4,10000,0.01,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MOLT-4H,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +Mutagenicity,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MUTAG,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI1,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI109,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI-H23,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +NCI-H23H,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +OVCAR-8,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +OVCAR-8H,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +P388,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +P388H,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PC-3,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PC-3H,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FM,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_FR,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_MM,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +PTC_MR,10000,0.0,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/level/result1699896482.4919229.csv b/GraphHD_v2/level/result1699896482.4919229.csv new file mode 100644 index 00000000..5ab74a79 --- /dev/null +++ b/GraphHD_v2/level/result1699896482.4919229.csv @@ -0,0 +1,20 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +SF-295,10000,0.04,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +SF-295H,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +SN12C,10000,0.01,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +SN12CH,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +SW-620,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +SW-620H,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +UACC257,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +UACC257H,10000,0.01,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +Yeast,10000,0.0,0.0,0.0,0.0,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +YeastH,10000,0.0,0.0,0.0,0.0,FHRR diff --git a/GraphHD_v2/list_basic_node2/result1699913335.2692878.csv b/GraphHD_v2/list_basic_node2/result1699913335.2692878.csv new file mode 100644 index 00000000..82dad0a5 --- /dev/null +++ b/GraphHD_v2/list_basic_node2/result1699913335.2692878.csv @@ -0,0 +1 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA diff --git a/GraphHD_v2/list_basic_node2/result1699913351.271691.csv b/GraphHD_v2/list_basic_node2/result1699913351.271691.csv new file mode 100644 index 00000000..8913a230 --- /dev/null +++ b/GraphHD_v2/list_basic_node2/result1699913351.271691.csv @@ -0,0 +1,12 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +AIDS,10000,3.6,1.41,94.5,94.5,FHRR +BZR,10000,2.16,0.71,77.87,77.87,FHRR +BZR_MD,10000,4.04,1.91,60.87,60.87,FHRR +COX2,10000,1.53,0.8,63.12,63.12,FHRR +COX2_MD,10000,6.25,2.58,48.35,48.35,FHRR +DHFR,10000,2.48,1.13,72.25,72.25,FHRR +DHFR_MD,10000,7.06,3.54,52.54,52.54,FHRR +ER_MD,10000,6.2,3.31,62.69,62.69,FHRR +FRANKENSTEIN,10000,4.74,4.33,61.44,61.44,FHRR +MCF-7,10000,72.68,31.14,78.5,78.5,FHRR +MCF-7H,10000,101.0,46.87,70.98,70.98,FHRR diff --git a/GraphHD_v2/list_basic_node2/result1699914443.615314.csv b/GraphHD_v2/list_basic_node2/result1699914443.615314.csv new file mode 100644 index 00000000..82dad0a5 --- /dev/null +++ b/GraphHD_v2/list_basic_node2/result1699914443.615314.csv @@ -0,0 +1 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA diff --git a/GraphHD_v2/list_basic_node2/result1699914547.9685602.csv b/GraphHD_v2/list_basic_node2/result1699914547.9685602.csv new file mode 100644 index 00000000..82dad0a5 --- /dev/null +++ b/GraphHD_v2/list_basic_node2/result1699914547.9685602.csv @@ -0,0 +1 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA diff --git a/GraphHD_v2/list_basic_node2/result1699914583.4597728.csv b/GraphHD_v2/list_basic_node2/result1699914583.4597728.csv new file mode 100644 index 00000000..82dad0a5 --- /dev/null +++ b/GraphHD_v2/list_basic_node2/result1699914583.4597728.csv @@ -0,0 +1 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA diff --git a/GraphHD_v2/list_basic_node2/result1699914660.2943099.csv b/GraphHD_v2/list_basic_node2/result1699914660.2943099.csv new file mode 100644 index 00000000..4b3750a0 --- /dev/null +++ b/GraphHD_v2/list_basic_node2/result1699914660.2943099.csv @@ -0,0 +1,29 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +MOLT-4,10000,93.31,44.41,74.76,74.76,FHRR +MOLT-4H,10000,147.44,66.59,69.82,69.82,FHRR +Mutagenicity,10000,9.9,6.32,68.13,68.13,FHRR +MUTAG,10000,0.27,0.15,78.95,78.95,FHRR +NCI1,10000,9.85,4.53,65.45,65.45,FHRR +NCI109,10000,9.52,4.51,64.89,64.89,FHRR +NCI-H23,10000,85.36,40.34,81.69,81.69,FHRR +NCI-H23H,10000,145.81,66.12,73.44,73.44,FHRR +OVCAR-8,10000,82.35,39.76,80.58,80.58,FHRR +OVCAR-8H,10000,144.6,65.45,70.4,70.4,FHRR +P388,10000,71.2,34.87,81.17,81.17,FHRR +P388H,10000,128.39,58.76,74.36,74.36,FHRR +PC-3,10000,57.4,27.54,80.83,80.83,FHRR +PC-3H,10000,100.01,44.83,75.31,75.31,FHRR +PTC_FM,10000,0.37,0.21,58.1,58.1,FHRR +PTC_FR,10000,0.4,0.2,57.55,57.55,FHRR +PTC_MM,10000,0.36,0.19,69.31,69.31,FHRR +PTC_MR,10000,0.38,0.21,51.92,51.92,FHRR +SF-295,10000,82.4,39.88,80.98,80.98,FHRR +SF-295H,10000,142.86,64.57,73.16,73.16,FHRR +SN12C,10000,81.06,38.81,81.1,81.1,FHRR +SN12CH,10000,144.62,65.77,71.46,71.46,FHRR +SW-620,10000,82.96,39.64,78.35,78.35,FHRR +SW-620H,10000,145.85,66.82,69.69,69.69,FHRR +UACC257,10000,82.37,40.06,82.55,82.55,FHRR +UACC257H,10000,140.79,65.45,73.89,73.89,FHRR +Yeast,10000,133.86,65.9,61.5,61.5,FHRR +YeastH,10000,238.76,109.82,64.41,64.41,FHRR diff --git a/GraphHD_v2/metrics/result1699489987.410995.csv b/GraphHD_v2/metrics/result1699489987.410995.csv new file mode 100644 index 00000000..4af0e31a --- /dev/null +++ b/GraphHD_v2/metrics/result1699489987.410995.csv @@ -0,0 +1,108 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.18,0.09,54.29,54.29,FHRR,none +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.28,0.15,54.29,54.29,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.16,0.1,57.14,57.14,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.21,0.11,66.67,66.67,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.27,0.14,56.19,56.19,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.31,0.15,55.24,55.24,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,1.86,1.04,56.19,56.19,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,2.01,0.43,58.1,58.1,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PTC_FM,10000,0.34,0.12,57.14,57.14,FHRR,harmonic_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.1,0.06,84.21,84.21,FHRR,none +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.18,0.11,84.21,84.21,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.11,0.06,84.21,84.21,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.15,0.07,87.72,87.72,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.16,0.08,80.7,80.7,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.15,0.08,84.21,84.21,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.33,0.17,84.21,84.21,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.84,0.32,80.7,80.7,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +MUTAG,10000,0.29,0.08,87.72,87.72,FHRR,harmonic_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,3.23,1.65,61.56,61.56,FHRR,none +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,5.19,2.66,63.99,63.99,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,3.92,2.12,61.96,61.96,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,5.31,2.7,64.88,64.88,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,6.88,3.1,63.34,63.34,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,6.87,3.33,63.75,63.75,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,17.5,7.46,63.99,63.99,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,14.78,7.28,62.69,62.69,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +NCI1,10000,4.73,2.33,62.21,62.21,FHRR,harmonic_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.79,0.37,38.33,38.33,FHRR,none +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.86,0.44,25.56,25.56,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,0.82,0.39,17.78,17.78,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,1.24,0.7,22.78,22.78,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,1.46,0.68,25.56,25.56,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,1.41,0.66,23.33,23.33,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,3.16,1.46,31.67,31.67,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,3.93,1.93,22.22,22.22,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +ENZYMES,10000,1.46,0.5,27.22,27.22,FHRR,harmonic_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,1.78,0.79,67.37,67.37,FHRR,none +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,1.91,0.99,70.66,70.66,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,1.82,1.01,70.66,70.66,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,2.77,1.17,67.37,67.37,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,4.91,1.96,66.17,66.17,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,4.35,2.03,68.86,68.86,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,7.11,9.03,70.06,70.06,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,38.65,14.57,68.86,68.86,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +PROTEINS,10000,2.62,1.36,65.57,65.57,FHRR,harmonic_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +DD,10000,13.78,8.62,70.34,70.34,FHRR,none +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +DD,10000,14.7,7.09,75.71,75.71,FHRR,page_rank +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +DD,10000,18.09,6.54,73.16,73.16,FHRR,degree_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +DD,10000,41.61,34.44,73.73,73.73,FHRR,closeness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +DD,10000,218.13,55.61,74.01,74.01,FHRR,betweenness_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +DD,10000,196.91,53.53,73.73,73.73,FHRR,load_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +DD,10000,79.52,21.22,73.45,73.45,FHRR,subgraph_centrality +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +DD,10000,460.37,188.7,75.14,75.14,FHRR,subgraph_centrality_exp +dataset,dimensions,train_time,test_time,accuracy,f1,VSA,metric +DD,10000,72.85,21.18,73.16,73.16,FHRR,harmonic_centrality diff --git a/GraphHD_v2/node_attr/result1699904378.53636.csv b/GraphHD_v2/node_attr/result1699904378.53636.csv new file mode 100644 index 00000000..766e7a77 --- /dev/null +++ b/GraphHD_v2/node_attr/result1699904378.53636.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +AIDS,10000,3.18,1.09,74.67,74.67,FHRR diff --git a/GraphHD_v2/node_attr/result1699904412.549875.csv b/GraphHD_v2/node_attr/result1699904412.549875.csv new file mode 100644 index 00000000..3d1a5c9e --- /dev/null +++ b/GraphHD_v2/node_attr/result1699904412.549875.csv @@ -0,0 +1,8 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +AIDS,10000,1.9,1.02,71.17,71.17,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.86,0.41,81.15,81.15,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2,10000,1.12,0.52,59.57,59.57,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR,10000,1.86,0.87,77.97,77.97,FHRR diff --git a/GraphHD_v2/node_attr/result1699904503.967231.csv b/GraphHD_v2/node_attr/result1699904503.967231.csv new file mode 100644 index 00000000..25781d71 --- /dev/null +++ b/GraphHD_v2/node_attr/result1699904503.967231.csv @@ -0,0 +1,8 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +AIDS,10000,2.09,1.03,69.17,69.17,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.98,0.6,78.69,78.69,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2,10000,2.09,0.6,70.92,70.92,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR,10000,1.96,0.88,71.37,71.37,FHRR diff --git a/GraphHD_v2/node_attr/result1699910347.078403.csv b/GraphHD_v2/node_attr/result1699910347.078403.csv new file mode 100644 index 00000000..3ed9d045 --- /dev/null +++ b/GraphHD_v2/node_attr/result1699910347.078403.csv @@ -0,0 +1,2 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +FRANKENSTEIN,10000,4.39,2.19,61.9,61.9,FHRR diff --git a/GraphHD_v2/node_attr/result1699910477.3889751.csv b/GraphHD_v2/node_attr/result1699910477.3889751.csv new file mode 100644 index 00000000..315021ff --- /dev/null +++ b/GraphHD_v2/node_attr/result1699910477.3889751.csv @@ -0,0 +1,10 @@ +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +AIDS,10000,3.38,1.63,93.5,93.5,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +BZR,10000,0.99,0.48,81.97,81.97,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +COX2,10000,1.27,0.6,64.54,64.54,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +DHFR,10000,2.27,1.05,74.01,74.01,FHRR +dataset,dimensions,train_time,test_time,accuracy,f1,VSA +FRANKENSTEIN,10000,7.22,4.32,62.37,62.37,FHRR diff --git a/examples/graphhd.py b/examples/graphhd.py index fa86bd08..acba9c24 100644 --- a/examples/graphhd.py +++ b/examples/graphhd.py @@ -85,7 +85,7 @@ class Encoder(nn.Module): def __init__(self, out_features, size): super(Encoder, self).__init__() self.out_features = out_features - self.node_ids = embeddings.Random(size, out_features) + self.node_ids = embeddings.Level(size, out_features, randomness=0.05) def forward(self, x): pr = pagerank(x) diff --git a/torchhd/models.py b/torchhd/models.py index 0f7de6dc..782504b5 100644 --- a/torchhd/models.py +++ b/torchhd/models.py @@ -108,6 +108,39 @@ def add(self, input: Tensor, target: Tensor, lr: float = 1.0) -> None: """Adds the input vectors scaled by the lr to the target prototype vectors.""" self.weight.index_add_(0, target, input, alpha=lr) + @torch.no_grad() + def add_refine(self, input: Tensor, target: Tensor, lr: float = 1.0) -> None: + logit = self(input) + predx = torch.topk(logit, 2) + pred = torch.tensor([predx.indices[0][0]]) + is_wrong = target != pred + + alpha = 1 - (abs(predx[0][0][0]) - abs(predx[0][0][1])) + + self.similarity_sum += logit.max(1).values.item() + self.count += 1 + if self.error_count == 0: + val = self.similarity_sum / self.count + else: + val = self.error_similarity_sum / self.error_count + if is_wrong.sum().item() == 0: + if logit.max(1).values.item() < val: + self.weight.index_add_(0, target, lr * alpha * input) + return + + self.error_count += 1 + self.error_similarity_sum += logit.max(1).values.item() + + logit = logit[is_wrong] + input = input[is_wrong] + target = target[is_wrong] + pred = pred[is_wrong] + alpha1 = 1.0 - logit.gather(1, target.unsqueeze(1)) + alpha2 = logit.gather(1, pred.unsqueeze(1)) - 1 + + self.weight.index_add_(0, target, lr * alpha1 * alpha * input) + self.weight.index_add_(0, pred, lr * alpha2 * alpha * input) + @torch.no_grad() def add_online(self, input: Tensor, target: Tensor, lr: float = 1.0) -> None: r"""Only updates the prototype vectors on wrongly predicted inputs.