feedable_iterator

feedable Iterator使用四步骤

创建Dataset
创建handle和Iterator(可以看成placeholder)
run handle
feed进去

import tensorflow as tf
import glob
import numpy as np
# 用glob读取所有文件名
image_filenames = glob.glob(r"E:/日月光华tensorflow/日月光华-tensorflow资料/数据集/dc/train/*.jpg")

# 将文件名乱序
image_filenames = np.random.permutation(image_filenames)

# 读取对应label。cat=1,非cat=0
train_labels = list(map(lambda x: [float(x.split('\\')[1].split('.')[0] == 'cat')], image_filenames))
train_labels = tf.data.Dataset.from_tensor_slices(train_labels)

# 创建dataset   from_tensor_slices
train_dataset = tf.data.Dataset.from_tensor_slices(image_filenames)

train_labels
<DatasetV1Adapter shapes: (1,), types: tf.float32>
# 定义一个函数,用于处理dataset
def _pre_read(img_filename):
    image = tf.read_file(img_filename)
    image = tf.image.decode_jpeg(image, channels=3)
    image = tf.image.rgb_to_grayscale(image)
    image = tf.image.resize_images(image, (200,200))
    image = tf.reshape(image,[200,200,1])
    image = tf.image.per_image_standardization(image)
    return image
# 处理train_dataset
train_dataset = train_dataset.map(_pre_read)  # 转换
train_label_dataset = tf.data.Dataset.zip((train_dataset, train_labels))   # zip将两组数据打包
train_label_dataset = train_label_dataset.shuffle(90)   # shuffle是乱序,其中参数300是缓存
train_label_dataset = train_label_dataset.repeat(5)  # 循环轮次epoch
train_label_dataset = train_label_dataset.batch(20)  # 每批
train_label_dataset
WARNING:tensorflow:From D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tensorflow\python\ops\control_flow_ops.py:3632: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
WARNING:tensorflow:From D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tensorflow\python\ops\image_ops_impl.py:1241: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Deprecated in favor of operator or tf.math.divide.





<DatasetV1Adapter shapes: ((?, 200, 200, 1), (?, 1)), types: (tf.float32, tf.float32)>
# test数据
# 用glob读取所有文件名
image_filenames_test = glob.glob(r"E:/日月光华tensorflow/日月光华-tensorflow资料/数据集/dc/test/*.jpg")
test_dataset = tf.data.Dataset.from_tensor_slices(image_filenames_test)
test_dataset = test_dataset.map(_pre_read)  # 转换
test_labels = tf.data.Dataset.from_tensor_slices(
    np.random.randint(0,1,len(image_filenames_test)).astype(np.float32).reshape(-1, 1)) # 这里是随机生成的,真实场景应该读取
test_label_dataset = tf.data.Dataset.zip((test_dataset, test_labels))   # zip将两组数据打包
# train_label_dataset = train_label_dataset.shuffle(300)   # shuffle是乱序,其中参数300是缓存
test_label_dataset = test_label_dataset.repeat(1)  # 循环轮次epoch
test_label_dataset = test_label_dataset.batch(20)  # 每批
test_label_dataset
<DatasetV1Adapter shapes: ((?, 200, 200, 1), (?, 1)), types: (tf.float32, tf.float32)>
# 定义handle和iterator
handle = tf.placeholder(tf.string, shape=[])  # []代表输入的是标量   handle其实就是个placeholder
iterator = tf.data.Iterator.from_string_handle(handle, test_label_dataset.output_types, test_label_dataset.output_shapes)
next_element = iterator.get_next()   #得到数据
# 准备两个iterator,用于feed进handle
training_iterator = train_label_dataset.make_one_shot_iterator()
test_iterator = test_label_dataset.make_one_shot_iterator()
# 定义网络
conv2d_1 = tf.contrib.layers.convolution2d(
    next_element[0],
    num_outputs=32, #channel
    weights_initializer=tf.truncated_normal_initializer(stddev=0.001), # 标准差,让它尽量接近0,因为前面已经进行了标准化
    kernel_size = (5,5),   #卷积核
    activation_fn = tf.nn.relu,
    stride = (1,1), # 高度和宽度的跨度,一般来说,卷积层跨度都是1
    padding="SAME",
    trainable=True)  # 是否训练参数

# 卷积就是长宽变小,高度增加的过程(提取了特征)

pool_1 = tf.nn.max_pool(conv2d_1,ksize=[1,3,3,1],strides=[1,2,2,1],padding="SAME")  # strides宽度和高度都降低一倍

conv2d_2 = tf.contrib.layers.convolution2d(
    pool_1,
    num_outputs=32, #channel
    weights_initializer=tf.truncated_normal_initializer(stddev=0.01), # 标准差,让它尽量接近0,因为前面已经进行了标准化
    kernel_size = (5,5),   #卷积核
    activation_fn = tf.nn.relu,
    stride = (1,1), # 高度和宽度的跨度,一般来说,卷积层跨度都是1
    padding="SAME",
    trainable=True)  # 是否训练参数

# 卷积就是长宽变小,高度增加的过程(提取了特征)

pool_2 = tf.nn.max_pool(conv2d_2,ksize=[1,3,3,1],strides=[1,2,2,1],padding="SAME")  # strides宽度和高度都降低一倍

conv2d_3 = tf.contrib.layers.convolution2d(
    pool_2,
    num_outputs=64, #channel
    weights_initializer=tf.truncated_normal_initializer(stddev=0.01), # 标准差,让它尽量接近0,因为前面已经进行了标准化
    kernel_size = (5,5),   #卷积核
    activation_fn = tf.nn.relu,
    stride = (1,1), # 高度和宽度的跨度,一般来说,卷积层跨度都是1
    padding="SAME",
    trainable=True)  # 是否训练参数

# 卷积就是长宽变小,高度增加的过程(提取了特征)

pool_3 = tf.nn.max_pool(conv2d_3,ksize=[1,3,3,1],strides=[1,2,2,1],padding="SAME")  # strides宽度和高度都降低一倍
pool_3.get_shape()
WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.
For more information, please see:
  * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md
  * https://github.com/tensorflow/addons
If you depend on functionality not listed there, please file an issue.






TensorShape([Dimension(None), Dimension(25), Dimension(25), Dimension(64)])
# 全连接,首先要扁平化处理
pool3_flat = tf.reshape(pool_3, [-1, 25*25*64])
fc_1 = tf.contrib.layers.fully_connected(pool3_flat, 1024, 
                                         weights_initializer=tf.truncated_normal_initializer(stddev=0.1),  # stddev: 正态分布的标准差。
                                         activation_fn=tf.nn.relu)
fc_2 = tf.contrib.layers.fully_connected(fc_1, 192, 
                                         weights_initializer=tf.truncated_normal_initializer(stddev=0.1),  # stddev: 正态分布的标准差。
                                         activation_fn=tf.nn.relu)
out_wl = tf.Variable(tf.truncated_normal([192,1]))
out_bl = tf.Variable(tf.truncated_normal([1]))
# comb_out = tf.matmul(fc_2,out_wl)+out_bl
comb_out = tf.matmul(fc_2,out_wl)+out_bl
pred = tf.sigmoid(comb_out)
pred.get_shape()
TensorShape([Dimension(None), Dimension(1)])
next_element[1]
<tf.Tensor 'IteratorGetNext:1' shape=(?, 1) dtype=float32>
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=next_element[1], logits=comb_out))
train_step = tf.train.AdamOptimizer(0.0001).minimize(loss)
predicted = tf.cast(pred>0.5,tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, next_element[1]),tf.float32))
step = 0
# 保存检查点
saver = tf.train.Saver()
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    training_handle = sess.run(training_iterator.string_handle())
    test_handle = sess.run(test_iterator.string_handle())
    # 训练
    for i in range(200):
        sess.run(train_step, feed_dict={handle: training_handle})
        if (step%100 == 0):
            res = sess.run([loss,accuracy], feed_dict={handle: training_handle})
            print(step,res)
            saver.save(sess, './feedable_iterator',global_step=step)
        step += 1
    
    # 查看训练情况(测试)
    accuracy_rate = []
    while True:
        try:
            acc_rate = sess.run(accuracy, feed_dict={handle: test_handle})
            accuracy_rate.append(acc_rate)
        except tf.errors.OutOfRangeError:
            print(np.mean(accuracy_rate))
        
0 [0.8071561, 0.3]
100 [0.629634, 0.7]
0.71



---------------------------------------------------------------------------

OutOfRangeError                           Traceback (most recent call last)

D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
   1333     try:
-> 1334       return fn(*args)
   1335     except errors.OpError as e:


D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tensorflow\python\client\session.py in _run_fn(feed_dict, fetch_list, target_list, options, run_metadata)
   1318       return self._call_tf_sessionrun(
-> 1319           options, feed_dict, fetch_list, target_list, run_metadata)
   1320 


D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tensorflow\python\client\session.py in _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, run_metadata)
   1406         self._session, options, feed_dict, fetch_list, target_list,
-> 1407         run_metadata)
   1408 


OutOfRangeError: End of sequence
	 [[{{node IteratorGetNext}}]]


During handling of the above exception, another exception occurred:


OutOfRangeError                           Traceback (most recent call last)

<ipython-input-13-ab59ea0ea89c> in <module>
     20         try:
---> 21             acc_rate = sess.run(accuracy, feed_dict={handle: test_handle})
     22             accuracy_rate.append(acc_rate)


D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
    928       result = self._run(None, fetches, feed_dict, options_ptr,
--> 929                          run_metadata_ptr)
    930       if run_metadata:


D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
   1151       results = self._do_run(handle, final_targets, final_fetches,
-> 1152                              feed_dict_tensor, options, run_metadata)
   1153     else:


D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tensorflow\python\client\session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1327       return self._do_call(_run_fn, feeds, fetches, targets, options,
-> 1328                            run_metadata)
   1329     else:


D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
   1347       message = error_interpolation.interpolate(message, self._graph)
-> 1348       raise type(e)(node_def, op, message)
   1349 


OutOfRangeError: End of sequence
	 [[node IteratorGetNext (defined at <ipython-input-6-2d7095b2a8eb>:4) ]]

Caused by op 'IteratorGetNext', defined at:
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\runpy.py", line 193, in _run_module_as_main
    "__main__", mod_spec)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\runpy.py", line 85, in _run_code
    exec(code, run_globals)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\ipykernel_launcher.py", line 16, in <module>
    app.launch_new_instance()
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\traitlets\config\application.py", line 837, in launch_instance
    app.start()
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\ipykernel\kernelapp.py", line 612, in start
    self.io_loop.start()
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tornado\platform\asyncio.py", line 149, in start
    self.asyncio_loop.run_forever()
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\asyncio\base_events.py", line 541, in run_forever
    self._run_once()
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\asyncio\base_events.py", line 1786, in _run_once
    handle._run()
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\asyncio\events.py", line 88, in _run
    self._context.run(self._callback, *self._args)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tornado\ioloop.py", line 690, in <lambda>
    lambda f: self._run_callback(functools.partial(callback, future))
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tornado\ioloop.py", line 743, in _run_callback
    ret = callback()
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tornado\gen.py", line 787, in inner
    self.run()
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tornado\gen.py", line 748, in run
    yielded = self.gen.send(value)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\ipykernel\kernelbase.py", line 381, in dispatch_queue
    yield self.process_one()
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tornado\gen.py", line 225, in wrapper
    runner = Runner(result, future, yielded)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tornado\gen.py", line 714, in __init__
    self.run()
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tornado\gen.py", line 748, in run
    yielded = self.gen.send(value)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\ipykernel\kernelbase.py", line 365, in process_one
    yield gen.maybe_future(dispatch(*args))
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tornado\gen.py", line 209, in wrapper
    yielded = next(result)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\ipykernel\kernelbase.py", line 268, in dispatch_shell
    yield gen.maybe_future(handler(stream, idents, msg))
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tornado\gen.py", line 209, in wrapper
    yielded = next(result)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\ipykernel\kernelbase.py", line 545, in execute_request
    user_expressions, allow_stdin,
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tornado\gen.py", line 209, in wrapper
    yielded = next(result)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\ipykernel\ipkernel.py", line 306, in do_execute
    res = shell.run_cell(code, store_history=store_history, silent=silent)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\ipykernel\zmqshell.py", line 536, in run_cell
    return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\IPython\core\interactiveshell.py", line 2877, in run_cell
    raw_cell, store_history, silent, shell_futures)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\IPython\core\interactiveshell.py", line 2922, in _run_cell
    return runner(coro)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\IPython\core\async_helpers.py", line 68, in _pseudo_sync_runner
    coro.send(None)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\IPython\core\interactiveshell.py", line 3146, in run_cell_async
    interactivity=interactivity, compiler=compiler, result=result)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\IPython\core\interactiveshell.py", line 3337, in run_ast_nodes
    if (await self.run_code(code, result,  async_=asy)):
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\IPython\core\interactiveshell.py", line 3417, in run_code
    exec(code_obj, self.user_global_ns, self.user_ns)
  File "<ipython-input-6-2d7095b2a8eb>", line 4, in <module>
    next_element = iterator.get_next()   #得到数据
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tensorflow\python\data\ops\iterator_ops.py", line 414, in get_next
    output_shapes=self._structure._flat_shapes, name=name)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tensorflow\python\ops\gen_dataset_ops.py", line 1685, in iterator_get_next
    output_shapes=output_shapes, name=name)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 788, in _apply_op_helper
    op_def=op_def)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tensorflow\python\util\deprecation.py", line 507, in new_func
    return func(*args, **kwargs)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tensorflow\python\framework\ops.py", line 3300, in create_op
    op_def=op_def)
  File "D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tensorflow\python\framework\ops.py", line 1801, in __init__
    self._traceback = tf_stack.extract_stack()

OutOfRangeError (see above for traceback): End of sequence
	 [[node IteratorGetNext (defined at <ipython-input-6-2d7095b2a8eb>:4) ]]



During handling of the above exception, another exception occurred:


AssertionError                            Traceback (most recent call last)

<ipython-input-13-ab59ea0ea89c> in <module>
     23         except tf.errors.OutOfRangeError:
     24             print(np.mean(accuracy_rate))
---> 25             tf.reset_default_graph()
     26 


D:\Code\Miniconda3\envs\tensorflow1.13-gpu\lib\site-packages\tensorflow\python\framework\ops.py in reset_default_graph()
   5586   """
   5587   if not _default_graph_stack.is_cleared():
-> 5588     raise AssertionError("Do not use tf.reset_default_graph() to clear "
   5589                          "nested graphs. If you need a cleared graph, "
   5590                          "exit the nesting and create a new graph.")


AssertionError: Do not use tf.reset_default_graph() to clear nested graphs. If you need a cleared graph, exit the nesting and create a new graph.
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值