def make_session(num_cpu=None, make_default=False, graph=None):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu,
allow_soft_placement=True,
gpu_options=gpu_options)
tf_config.gpu_options.allocator_type = 'BFC'
tf_config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=tf_config, graph=graph)
else:
return tf.Session(config=tf_config, graph=graph)
nvidia-smi:查看GPU使用情况
当服务器有多块显卡时,例如四块,编号分别为0,1,2,3, 在代码中设置
with tf.device('/gpu:3'): 利用GPU 3来跑, 但是只会指定在GPU 3上计算,但仍然会默认占用所有GPU资源
解决方法:先export CUDA_VISIBLE_DEVICES=3, 指定GPU3对程序可见,然后在代码中with tf.device('/gpu:0'): (程序只能看到一块GPU了,这里GPU:0实际上是服务器的GPU:3), 然后程序只用GPU:3