解决:根据服务器上上的计算资源类型进入不同的的虚拟环境后再执行相应的脚本
以 tensorflow object detection api 的模型导出部分脚本化为例:
高亮部分就是判断当前主机是否有GPU,有就进入tensorflow-gpu版本的虚拟环境,
不然就是进入cpu版本的环境
#!/bin/bash
show_usage=“args: [–pipeline_config_path, --trained_checkpoint_prefix, --output_directory]”
source /etc/profile
lspci | grep -i nvidia &> /dev/null
if [ $? -eq 0 ];then
echo "有显卡"
source activate cvtf
else
echo "没显卡"
source activate cpu_cvtf
fi
INPUT_TYPE=image_tensor
PIPELINE_CONFIG_PATH=
1
T
R
A
I
N
E
D
C
K
P
T
P
R
E
F
I
X
=
{1} TRAINED_CKPT_PREFIX=
1TRAINEDCKPTPREFIX={2}
EXPORT_DIR=
3
p
y
t
h
o
n
p
a
t
h
=
‘
/
u
s
r
/
b
i
n
/
w
h
i
c
h
p
y
t
h
o
n
‘
d
n
a
m
e
=
{3} python_path=`/usr/bin/which python` dname=
3pythonpath=‘/usr/bin/whichpython‘dname=(dirname “$PWD”)
$python_path KaTeX parse error: Expected 'EOF', got '\ ' at position 89: …=image_tensor \̲ ̲--pipeline_conf…{PIPELINE_CONFIG_PATH}
–trained_checkpoint_prefix=KaTeX parse error: Expected 'EOF', got '\ ' at position 24: …_CKPT_PREFIX} \̲ ̲--output_direct…{EXPORT_DIR}