tensorflow name scope怎么让tensorboard显示更加clean

没有加下面这条语句的效果

tf.name_scope('relu')

手动整理一下tensorboard的输出




import tensorflow as tf

def relu(X):
	with tf.name_scope('relu'):
		w_shape = (int(X.get_shape()[1]), 1)
		w = tf.Variable(tf.random_normal(w_shape), name="weights")
		b = tf.Variable(0.0, name="bias")
		z = tf.add(tf.matmul(X, w), b, name="z")
		return tf.maximum(z, 0., name="relu")
		
n_features = 3
X = tf.placeholder(tf.float32, shape=(None, n_features), name="X")
relus = [relu(X) for i in range(5)]
output = tf.add_n(relus, name="output")

file_writer = tf.summary.FileWriter('log/', tf.get_default_graph())



可以看到加了name_scope使得每次循环调用relu的时候,把relu内部的代码全部整合到了一个block里面,每个block用不同的名字区分


InvalidArgumentError Traceback (most recent call last) Cell In[6], line 90 88 # ================= 开始训练 ================= 89 print("\n开始GPU加速训练...") ---> 90 bert_history = bert_model.fit( 91 train_ds, 92 validation_data=test_ds, 93 epochs=3, 94 verbose=1, 95 callbacks=[ 96 tensorboard_cb, 97 tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=2) 98 ] 99 ) 101 # ================= 评估模型 ================= 102 bert_results = bert_model.evaluate(test_ds) File D:\Anaconda\envs\pytorch1\lib\site-packages\transformers\modeling_tf_utils.py:1229, in TFPreTrainedModel.fit(self, *args, **kwargs) 1226 @functools.wraps(keras.Model.fit) 1227 def fit(self, *args, **kwargs): 1228 args, kwargs = convert_batch_encoding(*args, **kwargs) -> 1229 return super().fit(*args, **kwargs) File D:\Anaconda\envs\pytorch1\lib\site-packages\keras\src\utils\traceback_utils.py:70, in filter_traceback.<locals>.error_handler(*args, **kwargs) 67 filtered_tb = _process_traceback_frames(e.__traceback__) 68 # To get the full stack trace, call: 69 # `tf.debugging.disable_traceback_filtering()` ---> 70 raise e.with_traceback(filtered_tb) from None 71 finally: 72 del filtered_tb File D:\Anaconda\envs\pytorch1\lib\site-packages\tensorflow\python\eager\execute.py:53, in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name) 51 try: 52 ctx.ensure_initialized() ---> 53 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name, 54 inputs, attrs, num_outputs) 55 except core._NotOkStatusException as e: 56 if name is not None: InvalidArgumentError: Graph execution error: Detected at node 'tf_bert_for_sequence_classification/bert/embeddings/assert_less/Assert/Assert' defined at (most recent call last): File "D:\Anaconda\envs\pytorch1\lib\threading.py", line 890, in _bootstrap self._bootstrap_inner() File "D:\Anaconda\envs\pytorch1\lib\threading.py", line 932, in _bootstrap_inner self.run() File "D:\Anaconda\envs\pytorch1\lib\site-packages\keras\src\engine\training.py", line 1303, in run_step outputs = model.train_step(data) File "D:\Anaconda\envs\pytorch1\lib\site-packages\transformers\modeling_tf_utils.py", line 1672, in train_step y_pred = self(x, training=True) File "D:\Anaconda\envs\pytorch1\lib\site-packages\keras\src\utils\traceback_utils.py", line 65, in error_handler return fn(*args, **kwargs) File "D:\Anaconda\envs\pytorch1\lib\site-packages\keras\src\engine\training.py", line 569, in __call__ return super().__call__(*args, **kwargs) File "D:\Anaconda\envs\pytorch1\lib\site-packages\keras\src\utils\traceback_utils.py", line 65, in error_handler return fn(*args, **kwargs) File "D:\Anaconda\envs\pytorch1\lib\site-packages\keras\src\engine\base_layer.py", line 1150, in __call__ outputs = call_fn(inputs, *args, **kwargs) File "D:\Anaconda\envs\pytorch1\lib\site-packages\keras\src\utils\traceback_utils.py", line 96, in error_handler return fn(*args, **kwargs) File "D:\Anaconda\envs\pytorch1\lib\site-packages\transformers\modeling_tf_utils.py", line 1734, in run_call_with_unpacked_inputs if not self._using_dummy_loss and parse(tf.__version__) < parse("2.11.0"): File "D:\Anaconda\envs\pytorch1\lib\site-packages\transformers\models\bert\modeling_tf_bert.py", line 1746, in call outputs = self.bert( File "D:\Anaconda\envs\pytorch1\lib\site-packages\keras\src\utils\traceback_utils.py", line 65, in error_handler return fn(*args, **kwargs) File "D:\Anaconda\envs\pytorch1\lib\site-packages\keras\src\engine\base_layer.py", line 1150, in __call__ outputs = call_fn(inputs, *args, **kwargs) File "D:\Anaconda\envs\pytorch1\lib\site-packages\keras\src\utils\traceback_utils.py", line 96, in error_handler return fn(*args, **kwargs) File "D:\Anaconda\envs\pytorch1\lib\site-packages\transformers\modeling_tf_utils.py", line 1734, in run_call_with_unpacked_inputs if not self._using_dummy_loss and parse(tf.__version__) < parse("2.11.0"): File "D:\Anaconda\envs\pytorch1\lib\site-packages\transformers\models\bert\modeling_tf_bert.py", line 887, in call embedding_output = self.embeddings( File "D:\Anaconda\envs\pytorch1\lib\site-packages\keras\src\utils\traceback_utils.py", line 65, in error_handler return fn(*args, **kwargs) File "D:\Anaconda\envs\pytorch1\lib\site-packages\keras\src\engine\base_layer.py", line 1150, in __call__ outputs = call_fn(inputs, *args, **kwargs) File "D:\Anaconda\envs\pytorch1\lib\site-packages\keras\src\utils\traceback_utils.py", line 96, in error_handler return fn(*args, **kwargs) File "D:\Anaconda\envs\pytorch1\lib\site-packages\transformers\models\bert\modeling_tf_bert.py", line 180, in call if input_ids is not None: File "D:\Anaconda\envs\pytorch1\lib\site-packages\transformers\models\bert\modeling_tf_bert.py", line 181, in call check_embeddings_within_bounds(input_ids, self.config.vocab_size) File "D:\Anaconda\envs\pytorch1\lib\site-packages\transformers\tf_utils.py", line 190, in check_embeddings_within_bounds tf.debugging.assert_less( Node: 'tf_bert_for_sequence_classification/bert/embeddings/assert_less/Assert/Assert' assertion failed: [The maximum value of input_ids (Tensor(\"tf_bert_for_sequence_classification/bert/embeddings/Max:0\", shape=(), dtype=int32, device=/job:localhost/replica:0/task:0/device:CPU:0)) must be smaller than the embedding layer\'s input dimension (30522). The likely cause is some problem at tokenization time.] [Condition x < y did not hold element-wise:] [x (cond/Identity_1:0) = ] [[101 2267 4530...]...] [y (tf_bert_for_sequence_classification/bert/embeddings/Cast/x:0) = ] [30522] [[{{node tf_bert_for_sequence_classification/bert/embeddings/assert_less/Assert/Assert}}]] [Op:__inference_train_function_51172]
最新发布
06-24
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值