Skip to content

Commit

Permalink
Merge pull request GoogleCloudPlatform#3083 from SMU-ATT-Center-for-V…
Browse files Browse the repository at this point in the history
…irtualization:netperf_aggregate

PiperOrigin-RevId: 393386857
  • Loading branch information
copybara-github committed Aug 27, 2021
2 parents 073645f + 397557a commit 4d868cb
Show file tree
Hide file tree
Showing 4 changed files with 159 additions and 38 deletions.
94 changes: 93 additions & 1 deletion perfkitbenchmarker/data/netperf.patch
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,8 @@ diff -u -r netperf-2.6.0/doc/examples/runemomniaggdemo.sh netperf-2.6.0-patch/do
# net.core.[r|w]mem_max are sufficiently large
-DO_BIDIR=1;
+DO_BIDIR=0;
DO_RRAGG=1;
-DO_RRAGG=1;
+DO_RRAGG=0;
-DO_RR=1;
-DO_ANCILLARY=1;
+DO_RR=0;
Expand Down Expand Up @@ -339,3 +340,94 @@ diff -u -r netperf-2.7.0/config.guess netperf-2.7.0-patch/config.guess
alpha:Linux:*:*)
case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
EV5) UNAME_MACHINE=alphaev5 ;;

diff -u -r netperf-2.7.0/doc/examples/post_proc.py netperf-2.7.0-patch/doc/examples/post_proc.py
--- netperf-2.7.0/doc/examples/post_proc.py 2015-07-20 12:39:35.000000000 -0500
+++ netperf-2.7.0-patch/doc/examples/post_proc.py 2021-07-16 10:22:34.642546090 -0500
@@ -125,7 +125,7 @@
ksink[key] += float(result[0])
else:
if result[0]:
- print "Key %d not in ksink" % key
+ print("Key %d not in ksink" % key)

def process_result(basename, raw_results, end_time, ksink):
first_result = True
@@ -209,7 +209,7 @@
try:
update_rrd(basename,interim_result,interim_end)
except Exception as e:
- print "Update to %s with %s at %s failed with %s" % (basename,interim_result,interim_end,e)
+ print("Update to %s with %s at %s failed with %s" % (basename,interim_result,interim_end,e))
have_result = False
had_results = True

@@ -221,7 +221,7 @@
return 0, 0

def process_result_files(prefix,start_time,end_time,ksink):
- print "Prefix is %s" % prefix
+ print("Prefix is %s" % prefix)
min_timestamp = 9999999999.9
results_list = glob.glob(prefix+"*.out")

@@ -254,13 +254,13 @@
# find that key in the kitchen sink, we will use the value to
# update the overall rrd.
prevkey = -1
- for key in xrange(int(start_time),int(end_time)+1):
+ for key in range(int(start_time),int(end_time)+1):
if key in ksink:
try:
update_rrd(overall,ksink[key],key)
prevkey = key
except Exception as e:
- print "Update to %s failed for %d, previous %d %s" % (overall, key, prevkey, e)
+ print("Update to %s failed for %d, previous %d %s" % (overall, key, prevkey, e))

def overall_min_max_avg(prefix,start_time,end_time,intervals):

@@ -312,7 +312,7 @@
imax = float(result[2].strip('"'))
results_list.append((iavg, imin, imax, start, end))

- for time in xrange(start,end+1):
+ for time in range(start,end+1):
rrdtool.update(prefix + "_intervals.rrd",
'%d:%f:%f:%f' % (time, iavg, imin, imax))
if iavg > max_average:
@@ -444,13 +444,13 @@
# the time being I will preallocate the entire dict in one fell
# swoop until I can modify add_to_ksink() accordingly
length = int(end_time + 1) - int(start_time)
- ksink=dict(zip(xrange(int(start_time),
+ ksink=dict(zip(range(int(start_time),
int(end_time)+1),
[0.0] * length))

min_timestamp = process_result_files(prefix,start_time,end_time,ksink)
if min_timestamp == 9999999999.9:
- print "There were no valid results for this prefix!"
+ print("There were no valid results for this prefix!")
exit()

# print "Min timestamp for %s is %s start time is %s end_time is %s" % (prefix,min_timestamp,start_time,end_time)
@@ -471,12 +471,12 @@
annotation=args.annotation,override=args.title)

units, multiplier, direction = units_et_al_by_prefix(prefix)
- print "Average of peak interval is %.3f %s from %d to %d" % (results_list[0][0] * float(multiplier), units, peak_start, peak_end)
- print "Minimum of peak interval is %.3f %s from %d to %d" % (peak_minimum * float(multiplier), units, peak_start, peak_end)
- print "Maximum of peak interval is %.3f %s from %d to %d" % (peak_maximum * float(multiplier), units, peak_start, peak_end)
+ print("Average of peak interval is %.3f %s from %d to %d" % (results_list[0][0] * float(multiplier), units, peak_start, peak_end))
+ print("Minimum of peak interval is %.3f %s from %d to %d" % (peak_minimum * float(multiplier), units, peak_start, peak_end))
+ print("Maximum of peak interval is %.3f %s from %d to %d" % (peak_maximum * float(multiplier), units, peak_start, peak_end))

if args.intervals:
for id, interval in enumerate(results_list[1:]):
- print "Average of interval %d is %.3f %s from %d to %d" % (id, interval[0] * float(multiplier), units, interval[3], interval[4])
- print "Minimum of interval %d is %.3f %s from %d to %d" % (id, interval[1] * float(multiplier), units, interval[3], interval[4])
- print "Maximum of interval %d is %.3f %s from %d to %d" % (id, interval[2] * float(multiplier), units, interval[3], interval[4])
+ print("Average of interval %d is %.3f %s from %d to %d" % (id, interval[0] * float(multiplier), units, interval[3], interval[4]))
+ print("Minimum of interval %d is %.3f %s from %d to %d" % (id, interval[1] * float(multiplier), units, interval[3], interval[4]))
+ print("Maximum of interval %d is %.3f %s from %d to %d" % (id, interval[2] * float(multiplier), units, interval[3], interval[4]))
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
# Copyright 2021 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -18,25 +18,38 @@
https://hewlettpackard.github.io/netperf/doc/netperf.html
manpage: http://manpages.ubuntu.com/manpages/maverick/man1/netperf.1.html
Runs UDP_RR in script between one source machine and two target machines
to test packets per second
Runs multiple tests in script between one source machine and two target machines
to test packets per second and inbound and outbound throughput
"""

import collections
import logging
import os
import re
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import netperf

ALL_BENCHMARKS = ['STREAM', 'MAERTS', 'BIDIR', 'RRAGG']
flags.DEFINE_list('netperf_aggregate_benchmarks', ALL_BENCHMARKS,
'The netperf aggregate benchmark(s) to run. '
'STREAM measures outbound throughput. '
'MAERTS measures inbound throughput. '
'RRAGG measures packets per second.'
'BIDIR measure bidirectional bulk throughput')
flags.register_validator(
'netperf_aggregate_benchmarks',
lambda benchmarks: benchmarks and set(benchmarks).issubset(ALL_BENCHMARKS))

FLAGS = flags.FLAGS

BENCHMARK_NAME = 'netperf_pps'
BENCHMARK_NAME = 'netperf_aggregate'
BENCHMARK_CONFIG = """
netperf_pps:
description: test packets per second performance using netperf
netperf_aggregate:
description: simultaneous netperf to multiple endpoints
vm_groups:
servers:
vm_spec: *default_single_core
Expand All @@ -61,10 +74,19 @@ def GetConfig(user_config):
def PrepareNetperfAggregate(vm):
"""Installs netperf on a single vm."""

vm.Install('python3')
vm.Install('pip3')
vm.RemoteCommand('sudo pip3 install --upgrade pip')
vm.Install('texinfo')
vm.Install('python_rrdtool')
vm.Install('netperf')

# Enable test types in the script runemomniaggdemo.sh
for benchmark in FLAGS.netperf_aggregate_benchmarks:
vm.RemoteCommand(
f'sed -i "s/DO_{benchmark}=0;/DO_{benchmark}=1;/g" /opt/pkb/netperf-netperf-2.7.0/doc/examples/runemomniaggdemo.sh'
)

port_end = PORT_START

if vm_util.ShouldRunOnExternalIpAddress():
Expand All @@ -75,9 +97,6 @@ def PrepareNetperfAggregate(vm):
netserver_path=netperf.NETSERVER_PATH)
vm.RemoteCommand(netserver_cmd)

remote_path = netperf.NETPERF_EXAMPLE_DIR + REMOTE_SCRIPT
vm.RemoteCommand('chmod +x %s' % (remote_path))


def Prepare(benchmark_spec):
"""Install netperf on the target vm.
Expand All @@ -88,14 +107,20 @@ def Prepare(benchmark_spec):
"""

vms = benchmark_spec.vms
client_vm = benchmark_spec.vm_groups['client'][0]

vm_util.RunThreaded(PrepareNetperfAggregate, vms)
client_vm.RemoteCommand(
f'sudo chmod 777 {os.path.join(netperf.NETPERF_EXAMPLE_DIR, REMOTE_SCRIPT)}'
)


def ParseNetperfAggregateOutput(stdout):
def ParseNetperfAggregateOutput(stdout, test_type):
"""Parses the stdout of a single netperf process.
Args:
stdout: the stdout of the netperf process
test_type: the type of test
Returns:
A tuple containing (throughput_sample, latency_samples, latency_histogram)
Expand All @@ -110,7 +135,7 @@ def ParseNetperfAggregateOutput(stdout):
match = re.search('peak interval', line)
if match:
line_split = line.split()
metric = line_split[0] + ' ' + line_split[6]
metric = f'{test_type} {line_split[0]} {line_split[6]}'
value = float(line_split[5])
unit = line_split[6]
aggregate_samples.append(sample.Sample(
Expand Down Expand Up @@ -149,7 +174,6 @@ def RunNetperfAggregate(vm, server_ips):
vm.RemoteCommand(f"echo 'NUM_REMOTE_HOSTS={len(server_ips)}' >> "
f"{netperf.NETPERF_EXAMPLE_DIR}/remote_hosts")

# allow script to be executed and run script
vm.RemoteCommand(
f'cd {netperf.NETPERF_EXAMPLE_DIR} && '
'export PATH=$PATH:. && '
Expand All @@ -160,24 +184,25 @@ def RunNetperfAggregate(vm, server_ips):
login_shell=False,
timeout=1200)

# print out netperf_tps.log to log
stdout_1, stderr_1 = vm.RemoteCommand(
f'cat {netperf.NETPERF_EXAMPLE_DIR}/netperf_tps.log',
ignore_failure=True,
should_log=True,
login_shell=False,
timeout=1200)

logging.info(stdout_1)
logging.info(stderr_1)

# do post processing step
proc_stdout, _ = vm.RemoteCommand(
f'cd {netperf.NETPERF_EXAMPLE_DIR} && ./post_proc.py '
'--intervals netperf_tps.log',
ignore_failure=True)

samples = ParseNetperfAggregateOutput(proc_stdout)
interval_naming = collections.namedtuple(
'IntervalNaming', 'output_file parse_name')
benchmark_interval_mapping = {
'STREAM': interval_naming('netperf_outbound', 'Outbound'),
'MAERTS': interval_naming('netperf_inbound', 'Inbound'),
'RRAGG': interval_naming('netperf_tps', 'Request/Response Aggregate'),
'BIDIR': interval_naming('netperf_bidirectional', 'Bidirectional')
}

samples = []
for benchmark in FLAGS.netperf_aggregate_benchmarks:
output_file = benchmark_interval_mapping[benchmark].output_file
parse_name = benchmark_interval_mapping[benchmark].parse_name
proc_stdout, _ = vm.RemoteCommand(
f'cd {netperf.NETPERF_EXAMPLE_DIR} && python3 post_proc.py '
f'--intervals {output_file}.log',
ignore_failure=False)
vm.RemoteCommand(f'cd {netperf.NETPERF_EXAMPLE_DIR} && rm {output_file}*')
samples.extend(ParseNetperfAggregateOutput(proc_stdout, f'{parse_name}'))

return samples

Expand Down
14 changes: 9 additions & 5 deletions perfkitbenchmarker/linux_packages/python_rrdtool.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
# Copyright 2021 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -17,10 +17,14 @@


def YumInstall(vm):
"""Installs build tools on the VM."""
vm.InstallPackages('python-rrdtool')
"""Installs python rrdtool on the VM."""
vm.InstallPackages('rrdtool')
vm.RemoteCommand('sudo dnf --enablerepo=powertools install -y rrdtool-devel')
vm.RemoteCommand('sudo pip3 install rrdtool')


def AptInstall(vm):
"""Installs build tools on the VM."""
vm.InstallPackages('python-rrdtool')
"""Installs python rrdtool on the VM."""
vm.InstallPackages('librrd-dev')
vm.InstallPackages('libpython3-dev')
vm.RemoteCommand('sudo pip install rrdtool')
4 changes: 2 additions & 2 deletions perfkitbenchmarker/linux_packages/texinfo.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@
"""Module containing build tools installation and cleanup functions."""


def YumInstall(vm):
def YumInstall(_):
"""Installs build tools on the VM."""
vm.InstallPackages('texinfo')
pass


def AptInstall(vm):
Expand Down

0 comments on commit 4d868cb

Please sign in to comment.