Skip to content

Commit

Permalink
benchmark element wise
Browse files Browse the repository at this point in the history
  • Loading branch information
miguelgfierro committed Jan 23, 2019
1 parent 75dbefb commit 697cc43
Show file tree
Hide file tree
Showing 9 changed files with 818 additions and 389 deletions.
1,105 changes: 759 additions & 346 deletions Benchmark_Matrix_Multiplication/Benchmark.ipynb

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
n_processors,cpu_memory,gpu_name,gpu_memory,data_type,size1,size2,operation,numpy,numexpr,numba_cpu,numba_gpu
24,440.9097557067871,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100,100,a*b*c*d*e,1.3581210032856947e-05,0.00046468317585666747,8.282319437143347e-06,0.0028059603900016687
24,440.9097557067871,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,1000,1000,a*b*c*d*e,0.0013337169801429158,0.0006419563641427106,0.0007270283627143986,0.00989134470429105
24,440.9097557067871,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,10000,10000,a*b*c*d*e,0.4125056517142574,0.06698089989996724,0.2844813708574553,0.464562417428689
24,440.9097557067871,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,10000,a*b*c*d*e,4.1152933948573525,0.6377878464287018,2.786389794571213,OOM
24,440.9097557067871,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,100000,a*b*c*d*e,47.78345681300016,6.025674656142655,32.410866868571766,OOM
n_processors,cpu_memory,gpu_name,gpu_memory,data_type,size1,size2,operation,numpy,numexpr,numba_cpu,numba_gpu,pytorch
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100,100,a*b*c*d*e,1.3314335137142084e-05,0.00046685892700004065,9.486397272855487e-06,0.002813677330002195,7.812552372857421e-05
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,1000,1000,a*b*c*d*e,0.0013523419867142365,0.0006516142775714927,0.0007518953235712615,0.009415570445714495,0.0009352954792858717
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,10000,10000,a*b*c*d*e,0.4178540187142841,0.06475651068571356,0.28613329128568565,0.4414985505714607,0.09511444592857775
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,10000,a*b*c*d*e,4.077511876714327,0.631055496285366,2.8118292300002525,OOM,0.9488148807143132
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,100000,a*b*c*d*e,46.99647830357156,5.077069783285489,37.74534277514327,OOM,OOM
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
n_processors,cpu_memory,gpu_name,gpu_memory,data_type,size1,size2,operation,numpy,numexpr,numba_cpu,numba_gpu
24,440.9097557067871,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100,100,a*b*c,6.4124899871447785e-06,0.00041419081571439164,5.187748427147848e-06,0.0018743201149999161
24,440.9097557067871,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,1000,1000,a*b*c,0.0007116139951431251,0.0006108094447141151,0.0005290857704286671,0.006705413462857125
24,440.9097557067871,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,10000,10000,a*b*c,0.25532521299984573,0.04212911611424975,0.21453310000002343,0.34459062757170095
24,440.9097557067871,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,10000,a*b*c,2.504560864142799,0.40079889657187906,2.080107593714274,3.4073408494288224
24,440.9097557067871,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,100000,a*b*c,25.455446619571376,3.727235563999784,20.779519485999895,OOM
n_processors,cpu_memory,gpu_name,gpu_memory,data_type,size1,size2,operation,numpy,numexpr,numba_cpu,numba_gpu,pytorch
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100,100,a*b*c,6.464026218571754e-06,0.00044929349028578666,5.676793847140808e-06,0.0018813094286867585,7.77269639428596e-05
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,1000,1000,a*b*c,0.0007129314922857313,0.0006264775219999916,0.0005297740238573689,0.006384324315716055,0.0009455658921424402
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,10000,10000,a*b*c,0.2568606435710795,0.04384491037140573,0.20798630542867613,0.3228037311429424,0.09466351931430057
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,10000,a*b*c,2.5300688950002432,0.4067240268571238,2.0737399060002906,OOM,0.9416879449997525
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,100000,a*b*c,25.632265304714174,3.8640156865712925,20.689618044857134,OOM,OOM
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
n_processors,cpu_memory,gpu_name,gpu_memory,data_type,size1,size2,operation,numpy,numexpr,numba_cpu,numba_gpu,pytorch
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100,100,a*b,3.045885471429496e-06,0.0004575765387142902,3.0989484514287014e-06,0.0014751817551428498,8.01050435856983e-05
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,1000,1000,a*b,0.00039699770428569536,0.0005653649115713182,0.0004287809088571391,0.004793813258571131,0.0009438266607142265
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,10000,10000,a*b,0.17558790781430228,0.03153449689999823,0.1770340029428488,0.26488750714276776,0.09466861130000066
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,10000,a*b,1.7425319677143176,0.28743536971426564,1.7483348917143562,2.673609233142867,0.9540179944286008
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,100000,a*b,17.41499249328581,2.741685099142939,17.431798834142814,OOM,OOM
n_processors,cpu_memory,gpu_name,gpu_memory,data_type,size1,size2,operation,numpy,numexpr,numba_cpu,numba_paral,numba_gpu,pytorch
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100,100,a*b,2.987131757134713e-06,0.0004546850882855194,3.0781821314199726e-06,2.1711546157062653e-05,0.0014689059048573004,8.485092691423363e-05
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,1000,1000,a*b,0.0003994075638573641,0.0005509132387134222,0.0004276636307144404,9.28257201714067e-05,0.005227682087133871,0.0009523596867142519
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,10000,10000,a*b,0.17534522141426612,0.03154633504278276,0.17578468248565124,0.03082487477139304,0.2837231542866253,0.1007043093856607
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,10000,a*b,1.7354448185713929,0.2944207122860202,1.7409478180000275,0.28915328571331755,2.828428289143111,0.930100245142447
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,100000,a*b,17.45532528000019,2.847748285856921,17.439471589857153,2.8781361737144056,OOM,OOM
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
n_processors,cpu_memory,gpu_name,gpu_memory,data_type,size1,size2,operation,numpy,numexpr,numba_cpu,numba_gpu,pytorch
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.int16'>,100,100,a*b,1.9991034942859187e-06,0.00045739563228575466,1.8972129631427898e-06,0.0014672904286336624,8.545400003510752e-05
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.int16'>,1000,1000,a*b,0.00021265959385716217,0.0005932952708571001,0.00020811603614288287,0.004303231087143168,0.0005770849582856953
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.int16'>,10000,10000,a*b,0.08859053924285751,0.02164836238571622,0.08823964820000714,0.13688202329999513,0.046876007085708285
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.int16'>,100000,10000,a*b,0.9008962752856989,0.18334504785713893,0.8878276047143377,1.321105720428607,0.47380919914287134
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.int16'>,100000,100000,a*b,8.931364757714293,1.7544247782857383,8.775939682857175,OOM,OOM
n_processors,cpu_memory,gpu_name,gpu_memory,data_type,size1,size2,operation,numpy,numexpr,numba_cpu,numba_paral,numba_gpu,pytorch
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.int16'>,100,100,a*b,2.039115792852369e-06,0.0004643989202855404,1.8989987715706646e-06,2.0655061999942907e-05,0.0013705184283026028,8.778157072291444e-05
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.int16'>,1000,1000,a*b,0.00021486445414224624,0.0005826712047138635,0.00021471793271380843,6.545712704298368e-05,0.004634505211428664,0.0005904582495708643
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.int16'>,10000,10000,a*b,0.0914265518427523,0.021019129699925542,0.08813348357148894,0.014700855225715454,0.15000161921431884,0.05372178914289439
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.int16'>,100000,10000,a*b,0.8948549364291206,0.16324322308568556,0.932832152714192,0.11516414079997049,1.4424943634289125,0.5205061795729437
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.int16'>,100000,100000,a*b,8.982922411000411,1.751220227999251,8.841003321142384,1.3662953261425304,OOM,OOM
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
n_processors,cpu_memory,gpu_name,gpu_memory,data_type,size1,size2,operation,numpy,numexpr,numba_cpu,numba_gpu
24,440.9097557067871,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100,100,a*exp(b)*sin(c),0.00021543800671471608,0.00047155348785729855,0.00024377982014240322,0.0018910060882855238
24,440.9097557067871,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,1000,1000,a*exp(b)*sin(c),0.021249865214278022,0.001345897794571491,0.02441082470000505,0.006708680931434563
24,440.9097557067871,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,10000,10000,a*exp(b)*sin(c),2.394257359285153,0.09868702881425893,2.6065998092856586,0.34488680914312553
24,440.9097557067871,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,10000,a*exp(b)*sin(c),24.050353823856703,0.9652101279999832,25.900560946428023,3.435679279999801
24,440.9097557067871,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,100000,a*exp(b)*sin(c),264.15686427157146,9.863348285000288,274.2337177021431,OOM
n_processors,cpu_memory,gpu_name,gpu_memory,data_type,size1,size2,operation,numpy,numexpr,numba_cpu,numba_paral,numba_gpu,pytorch
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100,100,a*exp(b)*sin(c),0.00021517495485698287,0.00047714401842832943,0.00024692697157141605,4.486216859996993e-05,0.0019899224214277637,0.00017381985630241355
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,1000,1000,a*exp(b)*sin(c),0.021383786285722246,0.0013720472614285038,0.02445581019996358,0.0011566014062856474,0.007350393672854157,0.0014644619665731234
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,10000,10000,a*exp(b)*sin(c),2.3831087324282896,0.10039842750000909,2.5772207735722725,0.11285294645713294,0.3556431357142823,0.14903337714297646
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,10000,a*exp(b)*sin(c),23.818135832286185,1.0197124159998825,25.803186465571862,1.1209558734273222,OOM,OOM
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,100000,a*exp(b)*sin(c),238.69038650842828,9.864430603572796,258.55260184785794,11.205122575571295,OOM,OOM
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
n_processors,cpu_memory,gpu_name,gpu_memory,data_type,size1,size2,operation,numpy,numexpr,numba_cpu,numba_gpu,pytorch
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100,100,a*exp(b),2.982013507142775e-06,0.0004321573989999966,3.0786444985713905e-06,0.0014421546107142344,7.769559442858086e-05
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,1000,1000,a*exp(b),0.00039744053457135516,0.0005893131675714878,0.00042798026814281395,0.004855860305714747,0.000938265726285798
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,10000,10000,a*exp(b),0.17511989802856728,0.031773674457151305,0.17426568362857323,0.26353477399996855,0.09535574244285791
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,10000,a*exp(b),1.738480907428636,0.28479482242866133,1.7474476704285604,2.6887384551427465,0.9571765117142864
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,100000,a*exp(b),17.508171226571513,2.8324364292856052,17.42119853099991,OOM,OOM
n_processors,cpu_memory,gpu_name,gpu_memory,data_type,size1,size2,operation,numpy,numexpr,numba_cpu,numba_paral,numba_gpu,pytorch
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100,100,a*exp(b),8.917235815715685e-05,0.00046339296314337324,0.00010645946105713457,3.1168423499963997e-05,0.0014637198821422187,0.00010251934371418819
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,1000,1000,a*exp(b),0.008706716332864224,0.0008437514447146636,0.010515943800003567,0.0005232509011433909,0.005160149144285242,0.0009690882581427494
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,10000,10000,a*exp(b),1.0017973611434823,0.04764318785710202,1.1454509152858268,0.051388816214239345,0.2868888848575547,0.10265677987138458
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,10000,a*exp(b),10.033744361428294,0.4521159338567356,11.474778997713916,0.5055008477143669,2.8392534427153544,OOM
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,100000,a*exp(b),100.23663710071352,4.384261509571453,114.72585597471387,5.04238381214392,OOM,OOM
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
n_processors,cpu_memory,gpu_name,gpu_memory,data_type,size1,size2,operation,numpy,numexpr,numba_cpu,numba_gpu,pytorch
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100,100,a*sin(b),3.0429509814270468e-06,0.00046869145928550484,3.085531941429248e-06,0.001448389068999989,7.831865549999618e-05
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,1000,1000,a*sin(b),0.0004007217732857628,0.0005562703384286059,0.0004281853741428806,0.004788380401428835,0.0009465599031429
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,10000,10000,a*sin(b),0.175495497357146,0.03155957509999488,0.172847165528544,0.25844179857163646,0.09418561889999962
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,10000,a*sin(b),1.7254022387145855,0.28565062599994107,1.7560810279998935,2.663610499285564,0.9563363498572082
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,100000,a*sin(b),17.43876769014293,2.7671249662858566,17.360332666428544,OOM,OOM
n_processors,cpu_memory,gpu_name,gpu_memory,data_type,size1,size2,operation,numpy,numexpr,numba_cpu,numba_paral,numba_gpu,pytorch
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100,100,a*sin(b),8.316231904287373e-05,0.0004706603919995749,9.897592167144466e-05,3.065217034283186e-05,0.0014532517035711083,0.0001006364071428834
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,1000,1000,a*sin(b),0.008509979234290118,0.0008002521891425463,0.01075968549714162,0.0005327208404284778,0.005180610324283147,0.0009662554287138586
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,10000,10000,a*sin(b),0.9812918034294853,0.04574554689991471,1.1744664499991424,0.05176699802858431,0.28607839371501803,0.10084118415720046
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,10000,a*sin(b),9.929281715857153,0.427062691142055,11.722497981714175,0.5094462405717682,2.829140944143416,OOM
24,440.90975189208984,Tesla V100-PCIE-16GB,15.78173828125,<class 'numpy.float32'>,100000,100000,a*sin(b),98.50450994314295,4.18694622242817,117.06101147671454,5.07823613628664,OOM,OOM
18 changes: 17 additions & 1 deletion Benchmark_Matrix_Multiplication/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,4 +172,20 @@ class AttributeDict(dict):
source: https://stackoverflow.com/a/5021467
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__setattr__ = dict.__setitem__


def clear_memory_all_gpus():
"""Clear memory of all GPUs.
Examples:
>>> clear_memory_all_gpus()
No CUDA available
"""
try:
for gpu in cuda.gpus:
with gpu:
cuda.current_context().deallocations.clear()
except CudaSupportError:
print("No CUDA available")


0 comments on commit 697cc43

Please sign in to comment.