9_3_Distributed.py

本文介绍了一种使用TensorFlow在多个工作节点上进行分布式的MNIST数据集训练方法。通过将参数分布在两个参数服务器上,定义操作在工作节点上执行,实现模型并行训练。详细阐述了如何设置集群,启动服务器,以及如何在不同任务间协调,以加速简单模型的训练过程。
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

"""Distributed MNIST training and validation, with model replicas.

A simple softmax model with one hidden layer is defined. The parameters
(weights and biases) are located on two parameter servers (ps), while the
ops are defined on a worker node. The TF sessions also run on the worker
node.
Multiple invocations of this script can be done in parallel, with different
values for --task_index. There should be exactly one invocation with
--task_index, which will create a master session that carries out variable
initialization. The other, non-master, sessions will wait for the master
session to finish the initialization before proceeding to the training stage.

The coordination between the multiple worker invocations occurs due to
the definition of the parameters on the same ps devices. The parameter updates
from one worker is visible to all other workers. As such, the workers can
perform forward computation and gradient calculation in parallel, which
should lead to increased training speed for the simple model.
"""


#from __future__ import absolute_import
#from __future__ import division
#from __future__ import print_function

import math
#import sys
import tempfile
import time

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data


flags = tf.app.flags
flags.DEFINE_string("data_dir", "/tmp/mnist-data",
                    "Directory for storing mnist data")
#flags.DEFINE_boolean("download_only", False,
#                     "Only perform downloading of data; Do not proceed to "
#                     "session preparation, model definition or training")
flags.DEFINE_integer("task_index", None,
                     "Worker task index, should be >= 0. task_index=0 is "
                     "the master worker task the performs the variable "
                     "initialization ")
#flags.DEFINE_integer("num_gpus", 2,
#                     "Total number of gpus for each machine."
#                     "If you don't use GPU, please set it to '0'")
flags.DEFINE_integer("replicas_to_aggregate", None,
                     "Number of replicas to aggregate before parameter update"
                     "is applied (For sync_replicas mode only; default: "
                     "num_workers)")
flags.DEFINE_integer("hidden_units", 100,
                     "Number of units in the hidden layer of the NN")
flags.DEFINE_integer("train_steps", 1000000,
                     "Number of (global) training steps to perform")
flags.DEFINE_integer("batch_size", 100, "Training batch size")
flags.DEFINE_float("learning_rate", 0.01, "Learning rate")
flags.DEFINE_boolean("sync_replicas", False,
                     "Use the sync_replicas (synchronized replicas) mode, "
                     "wherein the parameter updates from workers are aggregated "
                     "before applied to avoid stale gradients")
#flags.DEFINE_boolean(
#    "existing_servers", False, "Whether servers already exists. If True, "
#    "will use the worker hosts via their GRPC URLs (one client process "
#    "per worker host). Otherwise, will create an in-process TensorFlow "
#    "server.")
flags.DEFINE_string("ps_hosts","192.168.233.201:2222",
                    "Comma-separated list of hostname:port pairs")
flags.DEFINE_string("worker_hosts", "192.168.233.202:2223,192.168.233.203:2224",
                    "Comma-separated list of hostname:port pairs")
flags.DEFINE_string("job_name", None,"job name: worker or ps")

FLAGS = flags.FLAGS


IMAGE_PIXELS = 28


def main(unused_argv):
  mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
#  if FLAGS.download_only:
#    sys.exit(0)

  if FLAGS.job_name is None or FLAGS.job_name == "":
    raise ValueError("Must specify an explicit `job_name`")
  if FLAGS.task_index is None or FLAGS.task_index =="":
    raise ValueError("Must specify an explicit `task_index`")

  print("job name = %s" % FLAGS.job_name)
  print("task index = %d" % FLAGS.task_index)

  #Construct the cluster and start the server
  ps_spec = FLAGS.ps_hosts.split(",")
  worker_spec = FLAGS.worker_hosts.split(",")

  # Get the number of workers.
  num_workers = len(worker_spec)

  cluster = tf.train.ClusterSpec({
      "ps": ps_spec,
      "worker": worker_spec})

  #if not FLAGS.existing_servers:
    # Not using existing servers. Create an in-process server.
  server = tf.train.Server(
      cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index)
  if FLAGS.job_name == "ps":
    server.join()

  is_chief = (FLAGS.task_index == 0)
  
#  if FLAGS.num_gpus > 0:
#    if FLAGS.num_gpus < num_workers:
#      raise ValueError("number of gpus is less than number of workers")
#    # Avoid gpu allocation conflict: now allocate task_num -> #gpu 
#    # for each worker in the corresponding machine
#    gpu = (FLAGS.task_index % FLAGS.num_gpus)
#    worker_device = "/job:worker/task:%d/gpu:%d" % (FLAGS.task_index, gpu)
#  elif FLAGS.num_gpus == 0:
#    # Just allocate the CPU to worker server
#    cpu = 0
#    worker_device = "/job:worker/task:%d/cpu:%d" % (FLAGS.task_index, cpu)
#  # The device setter will automatically place Variables ops on separate
#  # parameter servers (ps). The non-Variable ops will be placed on the workers.
#  # The ps use CPU and workers use corresponding GPU
  
  worker_device = "/job:worker/task:%d/gpu:0" % FLAGS.task_index
  with tf.device(
      tf.train.replica_device_setter(
          worker_device=worker_device,
          ps_device="/job:ps/cpu:0",
          cluster=cluster)):
    global_step = tf.Variable(0, name="global_step", trainable=False)

    # Variables of the hidden layer
    hid_w = tf.Variable(
        tf.truncated_normal(
            [IMAGE_PIXELS * IMAGE_PIXELS, FLAGS.hidden_units],
            stddev=1.0 / IMAGE_PIXELS),
        name="hid_w")
    hid_b = tf.Variable(tf.zeros([FLAGS.hidden_units]), name="hid_b")

    # Variables of the softmax layer
    sm_w = tf.Variable(
        tf.truncated_normal(
            [FLAGS.hidden_units, 10],
            stddev=1.0 / math.sqrt(FLAGS.hidden_units)),
        name="sm_w")
    sm_b = tf.Variable(tf.zeros([10]), name="sm_b")

    # Ops: located on the worker specified with FLAGS.task_index
    x = tf.placeholder(tf.float32, [None, IMAGE_PIXELS * IMAGE_PIXELS])
    y_ = tf.placeholder(tf.float32, [None, 10])

    hid_lin = tf.nn.xw_plus_b(x, hid_w, hid_b)
    hid = tf.nn.relu(hid_lin)

    y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b))
    cross_entropy = -tf.reduce_sum(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))

    opt = tf.train.AdamOptimizer(FLAGS.learning_rate)

    if FLAGS.sync_replicas:
      if FLAGS.replicas_to_aggregate is None:
        replicas_to_aggregate = num_workers
      else:
        replicas_to_aggregate = FLAGS.replicas_to_aggregate

      opt = tf.train.SyncReplicasOptimizer(
          opt,
          replicas_to_aggregate=replicas_to_aggregate,
          total_num_replicas=num_workers,
          replica_id=FLAGS.task_index,
          name="mnist_sync_replicas")

    train_step = opt.minimize(cross_entropy, global_step=global_step)

    if FLAGS.sync_replicas and is_chief:
      # Initial token and chief queue runners required by the sync_replicas mode
      chief_queue_runner = opt.get_chief_queue_runner()
      init_tokens_op = opt.get_init_tokens_op()

    init_op = tf.global_variables_initializer()
    train_dir = tempfile.mkdtemp()
    sv = tf.train.Supervisor(
        is_chief=is_chief,
        logdir=train_dir,
        init_op=init_op,
        recovery_wait_secs=1,
        global_step=global_step)

    sess_config = tf.ConfigProto(
        allow_soft_placement=True,
        log_device_placement=False,
        device_filters=["/job:ps", "/job:worker/task:%d" % FLAGS.task_index])

    # The chief worker (task_index==0) session will prepare the session,
    # while the remaining workers will wait for the preparation to complete.
    if is_chief:
      print("Worker %d: Initializing session..." % FLAGS.task_index)
    else:
      print("Worker %d: Waiting for session to be initialized..." %
            FLAGS.task_index)

#    if FLAGS.existing_servers:
#      server_grpc_url = "grpc://" + worker_spec[FLAGS.task_index]
#      print("Using existing server at: %s" % server_grpc_url)
#
#      sess = sv.prepare_or_wait_for_session(server_grpc_url, config=sess_config)
#    else:
    sess = sv.prepare_or_wait_for_session(server.target,
                                            config=sess_config)

    print("Worker %d: Session initialization complete." % FLAGS.task_index)

    if FLAGS.sync_replicas and is_chief:
      # Chief worker will start the chief queue runner and call the init op
      print("Starting chief queue runner and running init_tokens_op")
      sv.start_queue_runners(sess, [chief_queue_runner])
      sess.run(init_tokens_op)

    # Perform training
    time_begin = time.time()
    print("Training begins @ %f" % time_begin)

    local_step = 0
    while True:
      # Training feed
      batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batch_size)
      train_feed = {x: batch_xs, y_: batch_ys}

      _, step = sess.run([train_step, global_step], feed_dict=train_feed)
      local_step += 1

      now = time.time()
      print("%f: Worker %d: training step %d done (global step: %d)" %
            (now, FLAGS.task_index, local_step, step))

      if step >= FLAGS.train_steps:
        break

    time_end = time.time()
    print("Training ends @ %f" % time_end)
    training_time = time_end - time_begin
    print("Training elapsed time: %f s" % training_time)

    # Validation feed
    val_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
    val_xent = sess.run(cross_entropy, feed_dict=val_feed)
    print("After %d training step(s), validation cross entropy = %g" %
          (FLAGS.train_steps, val_xent))


if __name__ == "__main__":
  tf.app.run()
Traceback (most recent call last): [rank0]: File "/data3/workspace/chenzh/Face-SVD/train.py", line 1668, in <module> [rank0]: main(**OmegaConf.load(args.config)) [rank0]: File "/data3/workspace/chenzh/Face-SVD/train.py", line 1409, in main [rank0]: discr_fake_pred = attr_discriminator(vt_hat_latents) [rank0]: File "/data1/miniconda3/envs/face_svd/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl [rank0]: return self._call_impl(*args, **kwargs) [rank0]: File "/data1/miniconda3/envs/face_svd/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl [rank0]: return forward_call(*args, **kwargs) [rank0]: File "/data1/miniconda3/envs/face_svd/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1632, in forward [rank0]: inputs, kwargs = self._pre_forward(*inputs, **kwargs) [rank0]: File "/data1/miniconda3/envs/face_svd/lib/python3.10/site-packages/torch/nn/parallel/distributed.py", line 1523, in _pre_forward [rank0]: if torch.is_grad_enabled() and self.reducer._rebuild_buckets(): [rank0]: RuntimeError: Expected to have finished reduction in the prior iteration before starting a new one. This error indicates that your module has parameters that were not used in producing loss. You can enable unused parameter detection by passing the keyword argument `find_unused_parameters=True` to `torch.nn.parallel.DistributedDataParallel`, and by [rank0]: making sure all `forward` function outputs participate in calculating loss. [rank0]: If you already have done the above, then the distributed data parallel module wasn&#39;t able to locate the output tensors in the return value of your module&#39;s `forward` function. Please include the loss function and the structure of the return value of `forward` of your module when reporting this issue (e.g. list, dict, iterable). [rank0]: Parameters which did not receive grad for rank 0: backbone.encoder_block3.0.norm.bias, backbone.encoder_block3.0.norm.weight, backbone.encoder_block3.0.conv.weight, backbone.encoder_block2.2.attn.to_out.0.bias, backbone.encoder_block2.2.attn.to_out.0.weight, backbone.encoder_block2.2.attn.to_v.weight, backbone.encoder_block2.2.attn.to_k.weight, backbone.encoder_block2.2.attn.to_q.weight, backbone.encoder_block2.2.norm.bias, backbone.encoder_block2.2.norm.weight, backbone.encoder_block2.2.resnet.conv2.bias, backbone.encoder_block2.2.resnet.conv2.weight, backbone.encoder_block2.2.resnet.norm2.bias, backbone.encoder_block2.2.resnet.norm2.weight, backbone.encoder_block2.2.resnet.conv1.bias, backbone.encoder_block2.2.resnet.conv1.weight, backbone.encoder_block2.2.resnet.norm1.bias, backbone.encoder_block2.2.resnet.norm1.weight, backbone.encoder_block2.1.attn.to_out.0.bias, backbone.encoder_block2.1.attn.to_out.0.weight, backbone.encoder_block2.1.attn.to_v.weight, backbone.encoder_block2.1.attn.to_k.weight, backbone.encoder_block2.1.attn.to_q.weight, backbone.encoder_block2.1.norm.bias, backbone.encoder_block2.1.norm.weight, backbone.encoder_block2.1.resnet.conv2.bias, backbone.encoder_block2.1.resnet.conv2.weight, backbone.encoder_block2.1.resnet.norm2.bias, backbone.encoder_block2.1.resnet.norm2.weight, backbone.encoder_block2.1.resnet.conv1.bias, backbone.encoder_block2.1.resnet.conv1.weight, backbone.encoder_block2.1.resnet.norm1.bias, backbone.encoder_block2.1.resnet.norm1.weight, backbone.encoder_block2.0.norm.bias, backbone.encoder_block2.0.norm.weight, backbone.encoder_block2.0.conv.weight, backbone.encoder_block1.1.attn.to_out.0.bias, backbone.encoder_block1.1.attn.to_out.0.weight, backbone.encoder_block1.1.attn.to_v.weight, backbone.encoder_block1.1.attn.to_k.weight, backbone.encoder_block1.1.attn.to_q.weight, backbone.encoder_block1.1.norm.bias, backbone.encoder_block1.1.norm.weight, backbone.encoder_block1.1.resnet.conv2.bias, backbone.encoder_block1.1.resnet.conv2.weight, backbone.encoder_block1.1.resnet.norm2.bias, backbone.encoder_block1.1.resnet.norm2.weight, backbone.encoder_block1.1.resnet.conv1.bias, backbone.encoder_block1.1.resnet.conv1.weight, backbone.encoder_block1.1.resnet.norm1.bias, backbone.encoder_block1.1.resnet.norm1.weight, backbone.encoder_block1.0.attn.to_out.0.bias, backbone.encoder_block1.0.attn.to_out.0.weight, backbone.encoder_block1.0.attn.to_v.weight, backbone.encoder_block1.0.attn.to_k.weight, backbone.encoder_block1.0.attn.to_q.weight, backbone.encoder_block1.0.norm.bias, backbone.encoder_block1.0.norm.weight, backbone.encoder_block1.0.resnet.conv2.bias, backbone.encoder_block1.0.resnet.conv2.weight, backbone.encoder_block1.0.resnet.norm2.bias, backbone.encoder_block1.0.resnet.norm2.weight, backbone.encoder_block1.0.resnet.conv1.bias, backbone.encoder_block1.0.resnet.conv1.weight, backbone.encoder_block1.0.resnet.norm1.bias, backbone.encoder_block1.0.resnet.norm1.weight, backbone.conv1x1.bias, backbone.conv1x1.weight [rank0]: Parameter indices which did not receive grad for rank 0: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
最新发布
05-14
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值