TF分布式训练
导读:记录如何使用tensorflow进行多机分布式训练,主要参考文档:https://tensorflow.google.cn/tutorials/distribute/multi_worker_with_keras?hl=en
环境:
- Hadoop 3.2.1
- Tensorflow 2.1.0
- python 3.7.6
- 使用的是CPU进行分布式训练
准备
各软件安装完成后,需要配置Hadoop的相关环境变量,再运行tensorflow,环境变量如下(注意修改成自己的目录):
export HADOOP_PREFIX=/home/maqy/hadoop-3.2.1
export HADOOP_HOME=/home/maqy/hadoop-3.2.1
export LIB_HDFS=$HADOOP_PREFIX/lib/native
export JAVA_HOME=/usr/lib/LOCALCLUSTER/jdk1.8.0_162
export LIB_JVM=$JAVA_HOME/jre/lib/amd64/server
export LD_LIBRARY_PATH=${LIB_HDFS}:${LIB_JVM}:${LD_LIBRARY_PATH}
export CLASSPATH=$(/home/maqy/hadoop-3.2.1/bin/hadoop classpath --glob)
数据集的下载下面这篇博客中有,下好后解压上传至HDFS中(不想使用HDFS也是可行的,每个机器上相同目录传一下数据即可):
https://blog.youkuaiyun.com/u013036495/article/details/104803500
代码如下:
注意修改其中的index,输入和输出文件位置。
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import tensorflow_datasets as tfds
import tensorflow as tf
import os
BUFFER_SIZE = 10000
BATCH_SIZE = 32
os.environ['TF_CONFIG'] = json.dumps({
'cluster': {
'worker': ["10.10.23.112:12345", "10.10.23.110:23456"]
},
'task': {'type': 'worker', 'index': 0} #另一台机器上修改index为1
})
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' #设置不使用GPU
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
def make_datasets_unbatched():
# Scaling MNIST data from (0, 255] to (0., 1.]
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label
datasets, info = tfds.load(name='mnist',data_dir='hdfs://hostname:9000/home/maqy/tensorflow_datasets', with_info=True, as_supervised=True)
return datasets['train'].map(scale, num_parallel_calls=tf.data.experimental.AUTOTUNE).cache().shuffle(BUFFER_SIZE)
def build_and_compile_cnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10)
])
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.SGD(learning_rate=0.001),
metrics=['accuracy'])
return model
#single_worker_model = build_and_compile_cnn_model()
#single_worker_model.fit(x=train_datasets, epochs=3, steps_per_epoch=5)
NUM_WORKERS = 2
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size. Previously we used 64,
# and now this becomes 128.
GLOBAL_BATCH_SIZE = 64 * NUM_WORKERS
# Creation of dataset needs to be after MultiWorkerMirroredStrategy object
# is instantiated.
train_datasets = make_datasets_unbatched().batch(GLOBAL_BATCH_SIZE)
#maqy add
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA
train_datasets_no_auto_shard = train_datasets.with_options(options)
#with strategy.scope():
# # Model building/compiling need to be within `strategy.scope()`.
# multi_worker_model = build_and_compile_cnn_model()
# Keras' `model.fit()` trains the model with specified number of epochs and
# number of steps per epoch. Note that the numbers here are for demonstration
# purposes only and may not sufficiently produce a model with good quality.
#multi_worker_model.fit(x=train_datasets_no_auto_shard, epochs=500, steps_per_epoch=5)
# Replace the `filepath` argument with a path in the file system
# accessible by all workers.
callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath='hdfs://hostname/keras-ckpt')]
with strategy.scope():
multi_worker_model = build_and_compile_cnn_model()
multi_worker_model.fit(x=train_datasets_no_auto_shard,
epochs=3,
steps_per_epoch=5,
callbacks=callbacks)
按官方文档可能出现下面的错误:
There aren’t enough elements in this dataset for each shard to have at least one element (# elems = 1, # shards = 2)
代码中的这一小段就是用来解决这个问题的:
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA
train_datasets_no_auto_shard = train_datasets.with_options(options)