From 1f1134544de5679e75632f8a12a82a1d16b89ffe Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Thu, 27 Sep 2018 16:45:57 -0700 Subject: [PATCH] gpu: nvgpu: unit: Add script to install unit tests Add a script to install unit tests on a target jetson board. The installation consists of copying all the binaries generated by tmake over to that target board and building a source tree that matches the nvgpu code. The reason the systemimage out directory is not used is this is created by the `image createfs' command during the build process. This script will work after just a simple `tmm[p]' Change-Id: I1f2650e666a42c12762ab444159b69ba8fc582f8 Signed-off-by: Alex Waterman Reviewed-on: https://git-master.nvidia.com/r/1850545 Reviewed-by: Philip Elcan GVS: Gerrit_Virtual_Submit Reviewed-by: mobile promotions Tested-by: mobile promotions --- userspace/install-unit.sh | 110 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100755 userspace/install-unit.sh diff --git a/userspace/install-unit.sh b/userspace/install-unit.sh new file mode 100755 index 000000000..2fe3d1b4f --- /dev/null +++ b/userspace/install-unit.sh @@ -0,0 +1,110 @@ +#!/bin/bash +# +# Install the unit test binaries, build by tmake, onto a jetson running +# L4T. The only argument is the IP address of the jetson. +# + +usage() { + echo "Usage:" + echo "" + echo " $ $1 [-hs] [--help] [--install-sshkey] " + echo "" +} + +# Copy to target jetson. Takes a local path and a remote path relative +# to ubuntu's $HOME. +# +# It helps a lot of you set up an authorized key on the target! Otherwise +# you may have a lot of typing to do... +jcp() { + cmd="rsync -qru $1 $target:$2" + echo "> $cmd" + $cmd +} + +# Add our public key to the authorized key list on the target host. +install_ssh_key() { + ssh-copy-id -f $target + return $? +} + +# Variables which may be set by CLI arguments. +install_sshkey= + +# To start with filter out any non-target arguments. Right now that's only -h +# and -s. +positionals=() +while [[ $# -gt 0 ]] +do + arg="$1" + + case $arg in + -h|--help) + usage $0 + exit 1 + ;; + -s|--install-sshkey) + install_sshkey=yes + shift + ;; + *) + positionals+=("$1") + shift + ;; + esac +done + +set -- "${positionals[@]}" + +if [ "x$1" == "x" ] +then + echo "Missing IP address!" + usage $0 + exit 1 +fi + +if [ "x$TOP" == "x" ] +then + echo "\$TOP must be set!" + exit 1 +fi + +target="ubuntu@$1" +nvgpu_bins=$TOP/out/*/nvidia/kernel/nvgpu + +# Install the ssh key if needed. +if [ "$install_sshkey" == "yes" ] +then + echo "Installing our SSH key" + install_ssh_key $target || exit 1 +fi + +# Building the necessary directory structure. It may not be present +# first time this is run. +ssh $target mkdir -p nvgpu_unit/units +ssh $target mkdir -p $TOP/kernel +if [ $? != 0 ]; then + echo + echo "!! Unable to make $TOP on the target jetson! This directory needs" + echo "!! to be present and writable by this script in order for coverage" + echo "!! tracking to work." + exit 1 +fi + +# And copy... +jcp $nvgpu_bins/userspace-l4t_64/nvgpu_unit nvgpu_unit/nvgpu_unit +jcp $nvgpu_bins/userspace-l4t_64/libnvgpu_unit-lib.so nvgpu_unit/libnvgpu-unit.so +jcp $nvgpu_bins/drivers/gpu/nvgpu-l4t_64/libnvgpu-drv.so nvgpu_unit/libnvgpu-drv.so +jcp $TOP/kernel/nvgpu/userspace/unit.sh nvgpu_unit/unit.sh + +for unit_dir in `ls $nvgpu_bins/userspace/units`; do + unit=${unit_dir%-l4t_64} + jcp $nvgpu_bins/userspace/units/$unit_dir/lib$unit.so nvgpu_unit/units +done + +# Set up the necessary coverage files. Basically what we do is recreate just +# enough of the source/build output here on the local machine over on the +# target jetson. This means you may +jcp $nvgpu_bins nvgpu_unit +jcp $TOP/kernel/nvgpu $TOP/kernel +jcp $TOP/kernel/nvgpu-next $TOP/kernel