import tensorflow as tf
import sklearn.datasets as datasets
import numpy as np
x_data = datasets.load_iris().data
y_data = datasets.load_iris().target
np.random.seed(168)
np.random.shuffle(x_data)
np.random.seed(168)
np.random.shuffle(y_data)
x_train = x_data[:-30]
y_train = y_data[:-30]
x_test = x_data[-30:]
y_test = y_data[-30:]
x_train = tf.cast(x_train,tf.float32)
x_test = tf.cast(x_test,tf.float32)
train_data = tf.data.Dataset.from_tensor_slices((x_train,y_train)).batch(32)
test_data = tf.data.Dataset.from_tensor_slices((x_test,y_test)).batch(32)
w = tf.Variable(tf.random.truncated_normal([4,3],stddev=0.1,dtype = tf.float32))
b = tf.Variable(tf.random.truncated_normal([3],stddev=0.1,dtype=tf.float32))
lr = 0.1
epochs = 500
classes = 3
train_loss_result = []
accuracy_result = []
loss_all = 0
for epoch in range(epochs):
for x_train,y_train in train_data:
with tf.GradientTape() as tape:
y = tf.matmul(x_train,w) + b
y = tf.nn.softmax(y)
y_= tf.one_hot(y_train,depth =classes)
loss = tf.reduce_mean(tf.square(y - y_))
loss_all += loss.numpy()
grads = tape.gradient(loss,[w,b])
w.assign_sub(lr * grads[0])
b.assign_sub(lr * grads[1])
train_loss_result.append(loss_all / 4)
loss_all = 0
total_correct ,total_num =0,0
for x_test,y_test in test_data:
y = tf.matmul(x_test,w) + b
y = tf.nn.softmax(y)
pred = tf.argmax(y,axis=1)
pred = tf.cast(pred,dtype = tf.int32)
correct = tf.cast(tf.equal(pred,y_test),dtype = tf.int32)
correct = tf.reduce_sum(correct)
total_correct += correct
total_num += x_test.shape[0]
accuracy = total_correct / total_num
accuracy_result.append(accuracy)
