Skip to content

Commit

Permalink
Update FEMU scripts
Browse files Browse the repository at this point in the history
  • Loading branch information
huaicheng committed Mar 7, 2018
1 parent 7f19cc0 commit 20d5a46
Show file tree
Hide file tree
Showing 5 changed files with 170 additions and 42 deletions.
21 changes: 21 additions & 0 deletions femu-scripts/femu-copy-scripts.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/bin/bash
# Huaicheng <[email protected]>
# Copy necessary scripts for running FEMU

FSD="../femu-scripts"

CPL=(pkgdep.sh femu-compile.sh run-whitebox.sh run-blackbox.sh run-nossd.sh pin.sh ftk vssd1.conf)

echo ""
echo "==> Copying following FEMU script to current directory:"
for f in "${CPL[@]}"
do
if [[ ! -e $FSD/$f ]]; then
echo "Make sure you are under build-femu/ directory!"
exit
fi
cp -r $FSD/$f . && echo " --> $f"
done
echo "Done!"
echo ""

9 changes: 7 additions & 2 deletions femu-scripts/pin.sh
Original file line number Diff line number Diff line change
@@ -1,8 +1,13 @@
#!/bin/bash
# Huaicheng Li <[email protected]>
# pin vcpu and qemu main thread to certain set of physical CPUs
#

NRCPUS="$(cat /proc/cpuinfo | grep "vendor_id" | wc -l)"

# pin vcpus (use at most 36 pCPUs)
sudo ./ftk/qmp-vcpu-pin -s ./qmp-sock $(seq 8 23) #$(seq 30 47) $(seq 24 29)
sudo ./ftk/qmp-vcpu-pin -s ./qmp-sock $(seq 0 $NRCPUS) #$(seq 30 47) $(seq 24 29)

# pin main thread to the rest of pCPUs
qemu_pid=$(ps -ef | grep qemu | grep -v grep | tail -n 1 | awk '{print $2}')
sudo taskset -cp 1-7 ${qemu_pid}
sudo taskset -cp 1-$NRCPUS ${qemu_pid}
88 changes: 88 additions & 0 deletions femu-scripts/run-blackbox.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
#!/bin/bash
# Huaicheng Li <[email protected]>
# Run VM with FEMU support: FEMU as a black-box SSD (FTL managed by the device)

# image directory
IMGDIR=$HOME/images
# virtual machine disk image
OSIMGF=$IMGDIR/u14s.qcow2
# virtual NVMe disk image
NVMEIMGF=$IMGDIR/vssd1.raw
# virtual NVMe disk size: 1GB
NVMEIMGSZ=1G

# every time we create a new SSD image file
#sudo rm -rf $IMGDIR/vssd1.raw

if [[ ! -e "$OSIMGF" ]]; then
echo ""
echo "VM disk image couldn't be found ..."
echo "Please prepare a usable VM image and place it as $OSIMGF"
echo "Once VM disk image is ready, please rerun this script again"
echo ""
exit
fi

# Please match the image file size with the emulated SSD size in vssd1.conf file
[[ ! -e $NVMEIMGF ]] && ./qemu-img create -f raw $NVMEIMGF $NVMEIMGSZ

# huge page related settings
#echo 25000 | sudo tee /proc/sys/vm/nr_hugepages

#[[ ! -d /dev/hugepages2M ]] && sudo mkdir /dev/hugepages2M && sudo mount -t hugetlbfs none /dev/hugepages2M -o pagesize=2M


# Useful options you may want to further try:
#-object iothread,id=iothread0 \
#-device virtio-blk-pci,iothread=iothread0,drive=id0 \
#-nographic \
#-device nvme,drive=id0,serial=serial0,id=nvme0 \
#-kernel /home/huaicheng/git/linux/arch/x86_64/boot/bzImage \
#-append "root=/dev/vda1 console=ttyS0,115200n8 console=tty0" \
#-virtfs local,path=/home/huaicheng/share/,security_model=passthrough,mount_tag=host_share \

#must come before all other qemu options!!!!!!
#-trace events=/tmp/events \
#-object memory-backend-file,id=mem1,size=8G,mem-path=/dev/hugepages2M \
#-device pc-dimm,id=dimm1,memdev=mem1 \
#-device virtio-scsi-pci,id=scsi1 \
#-device scsi-hd,drive=hd1 \
#-drive file=$IMGDIR/vmdata.qcow2,if=none,aio=native,cache=none,format=qcow2,id=hd1 \


sudo x86_64-softmmu/qemu-system-x86_64 \
-name "FEMU-blackbox-SSD" \
-enable-kvm \
-cpu host \
-smp 4 \
-m 4G \
-device virtio-scsi-pci,id=scsi0 \
-device scsi-hd,drive=hd0 \
-drive file=$OSIMGF,if=none,aio=native,cache=none,format=qcow2,id=hd0 \
-drive file=$NVMEIMGF,if=none,aio=threads,format=raw,id=id0 \
-device nvme,femu_mode=1,drive=id0,serial=serial0,id=nvme0 \
-net user,hostfwd=tcp::8080-:22 \
-net nic,model=virtio \
-nographic \
-qmp unix:./qmp-sock,server,nowait
#-object iothread,id=iothread0 \
#-display none \
#-nographic \
#-monitor stdio \
#-s -S \
#

#sleep 10

#
# Please manually run the following commands for better FEMU performance/accuracy
#

#./pin.sh
#sshsim "~/tsc.sh"
#sshsim "echo 0 | sudo tee /proc/sys/kernel/timer_migration"
#sshsim "echo 0 | sudo tee /sys/kernel/debug/tracing/tracing_on"

echo "VM is up, enjoy it :)"

wait
46 changes: 29 additions & 17 deletions femu-scripts/wcc-run.sh → femu-scripts/run-nossd.sh
Original file line number Diff line number Diff line change
@@ -1,30 +1,38 @@
#!/bin/bash
# Huaicheng Li <[email protected]>
# Run VM with FEMU support
# Run VM with FEMU support: FEMU as a black-box SSD (FTL managed by the device)

# image directory
IMGDIR=$HOME/images

is_mounted=$(mount | grep "/mnt/tmpfs")

if [[ $is_mounted == "" ]]; then
sudo mkdir -p /mnt/tmpfs
# huge=always
#sudo mount -t tmpfs -o size=4G,huge=always tmpfs /mnt/tmpfs
# virtual machine disk image
OSIMGF=$IMGDIR/u14s.qcow2
# virtual NVMe disk image
NVMEIMGF=$IMGDIR/vssd1.raw
# virtual NVMe disk size: 1GB
NVMEIMGSZ=1G

# every time we create a new SSD image file
#sudo rm -rf $IMGDIR/vssd1.raw

if [[ ! -e "$OSIMGF" ]]; then
echo ""
echo "VM disk image couldn't be found ..."
echo "Please prepare a usable VM image and place it as $OSIMGF"
echo "Once VM disk image is ready, please rerun this script again"
echo ""
exit
fi


# every time we run a new SSD
sudo rm -rf $IMGDIR/vssd1.raw

# Please match the image file size with the emulated SSD size in vssd1.conf file
[[ ! -e $IMGDIR/vssd1.raw ]] && ./qemu-img create -f raw $IMGDIR/vssd1.raw 1G
[[ ! -e $NVMEIMGF ]] && ./qemu-img create -f raw $NVMEIMGF $NVMEIMGSZ

# huge page related settings
#echo 25000 | sudo tee /proc/sys/vm/nr_hugepages

#[[ ! -d /dev/hugepages2M ]] && sudo mkdir /dev/hugepages2M && sudo mount -t hugetlbfs none /dev/hugepages2M -o pagesize=2M


# Useful options you may want to further try:
#-object iothread,id=iothread0 \
#-device virtio-blk-pci,iothread=iothread0,drive=id0 \
#-nographic \
Expand All @@ -43,20 +51,20 @@ sudo rm -rf $IMGDIR/vssd1.raw


sudo x86_64-softmmu/qemu-system-x86_64 \
-name "nvme-FEMU-test" \
-name "FEMU-blackbox-SSD" \
-enable-kvm \
-cpu host \
-smp 4 \
-m 4G \
-device virtio-scsi-pci,id=scsi0 \
-device scsi-hd,drive=hd0 \
-drive file=$IMGDIR/u14s.qcow2,if=none,aio=native,cache=none,format=qcow2,id=hd0 \
-drive file=$IMGDIR/vssd1.raw,if=none,aio=threads,format=raw,id=id0 \
-drive file=$OSIMGF,if=none,aio=native,cache=none,format=qcow2,id=hd0 \
-drive file=$NVMEIMGF,if=none,aio=threads,format=raw,id=id0 \
-device nvme,drive=id0,serial=serial0,id=nvme0 \
-net user,hostfwd=tcp::8080-:22 \
-net nic,model=virtio \
-nographic \
-qmp unix:./qmp-sock,server,nowait | tee /media/log
-qmp unix:./qmp-sock,server,nowait
#-object iothread,id=iothread0 \
#-display none \
#-nographic \
Expand All @@ -66,6 +74,10 @@ sudo x86_64-softmmu/qemu-system-x86_64 \

#sleep 10

#
# Please manually run the following commands for better FEMU performance/accuracy
#

#./pin.sh
#sshsim "~/tsc.sh"
#sshsim "echo 0 | sudo tee /proc/sys/kernel/timer_migration"
Expand Down
48 changes: 25 additions & 23 deletions femu-scripts/lnvm-run.sh → femu-scripts/run-whitebox.sh
Original file line number Diff line number Diff line change
@@ -1,28 +1,30 @@
#!/bin/bash
# Huaicheng Li <[email protected]>
# Run VM with lightnvm support
# Run VM with lightnvm support: FEMU as a whitebox SSD (OpenChannel-SSD)

# image directory
IMGDIR=$HOME/images

is_mounted=$(mount | grep "/mnt/tmpfs")

if [[ $is_mounted == "" ]]; then
sudo mkdir -p /mnt/tmpfs
# huge=always
sudo mount -t tmpfs -o size=4G,huge=always tmpfs /mnt/tmpfs
# virtual machine disk image
OSIMGF=$IMGDIR/u14s.qcow2
# virtual NVMe disk image
NVMEIMGF=$IMGDIR/vssd1.raw
# virtual NVMe disk size: 1GB
NVMEIMGSZ=1G

# every time we create a new SSD image file
#sudo rm -rf $IMGDIR/vssd1.raw

if [[ ! -e "$OSIMGF" ]]; then
echo ""
echo "VM disk image couldn't be found ..."
echo "Please prepare a usable VM image and place it as $OSIMGF"
echo "Once VM disk image is ready, please rerun this script again"
echo ""
exit
fi


# every time we run a new SSD
sudo rm -rf /mnt/tmpfs/test1.raw

[[ ! -e /mnt/tmpfs/test1.raw ]] && ./qemu-img create -f raw /mnt/tmpfs/test1.raw 4G

# huge page related settings
#echo 25000 | sudo tee /proc/sys/vm/nr_hugepages

[[ ! -d /dev/hugepages2M ]] && sudo mkdir /dev/hugepages2M && sudo mount -t hugetlbfs none /dev/hugepages2M -o pagesize=2M

# Please match the image file size with the emulated SSD size in vssd1.conf file
[[ ! -e $NVMEIMGF ]] && ./qemu-img create -f raw $NVMEIMGF $NVMEIMGSZ

#-object iothread,id=iothread0 \
#-device virtio-blk-pci,iothread=iothread0,drive=id0 \
Expand All @@ -42,20 +44,20 @@ sudo rm -rf /mnt/tmpfs/test1.raw


sudo x86_64-softmmu/qemu-system-x86_64 \
-name "nvme-FEMU-whitebox-test" \
-name "FEMU-whitebox-SSD" \
-enable-kvm \
-cpu host \
-smp 4 \
-m 4G \
-device virtio-scsi-pci,id=scsi0 \
-device scsi-hd,drive=hd0 \
-drive file=$IMGDIR/u14s.qcow2,if=none,aio=native,cache=none,format=qcow2,id=hd0 \
-drive file=/mnt/tmpfs/test1.raw,if=none,aio=threads,format=raw,id=id0 \
-drive file=$OSIMGF,if=none,aio=native,cache=none,format=qcow2,id=hd0 \
-drive file=$NVMEIMGF,if=none,aio=threads,format=raw,id=id0 \
-device nvme,drive=id0,serial=serial0,id=nvme0,namespaces=1,lver=1,lmetasize=16,ll2pmode=0,nlbaf=5,lba_index=3,mdts=10,lnum_ch=2,lnum_lun=8,lnum_pln=2,lsec_size=4096,lsecs_per_pg=4,lpgs_per_blk=512,ldebug=0,femu_mode=0 \
-net user,hostfwd=tcp::8080-:22 \
-net nic,model=virtio \
-nographic \
-qmp unix:./qmp-sock,server,nowait | tee /media/log
-qmp unix:./qmp-sock,server,nowait
#-object iothread,id=iothread0 \
#-display none \
#-nographic \
Expand Down

0 comments on commit 20d5a46

Please sign in to comment.