import tensorflow as tf
#############
# 데이터 준비
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
y_train = tf.keras.utils.to_categorical(y_train)
y_test = tf.keras.utils.to_categorical(y_test)
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
###############
# 모델 선택
# 데이터 받을 곳 정의
X = tf.placeholder(tf.float32, shape=[None, 784])
Y = tf.placeholder(tf.float32, shape=[None, 10])
# 머신이 학습할 값 정의
W = tf.Variable(tf.random_normal(shape=[784,10]))
B = tf.Variable(tf.random_normal(shape=[10]))
# logit 계산
logit = tf.matmul(X,W)+B
# 값 예측
pred = tf.nn.softmax(logit)
# loss 계산
loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits = logit, labels=Y)
loss = tf.reduce_mean(loss)
# optimizer 설정
optimizer = tf.train.AdamOptimizer(0.01).minimize(loss)
# 정확도 계산
acc = tf.equal(tf.argmax(pred, axis=1), tf.argmax(Y, axis=1))
acc = tf.reduce_mean(tf.cast(acc, tf.float32))
###################
# 모델학습
# 세션 정의 및 초기화
sess = tf.Session()
sess.run(tf.global_variables_initializer())
#배치 학습
epochs = 20
batch = 512
for e in range(epochs):
n = len (x_train) // batch
for b in range(n):
x = x_train[b * batch : (b+1) * batch]
y = y_train[b * batch : (b+1) * batch]
sess.run(optimizer, feed_dict={X:x, Y:y})
# print (sess.run(acc, feed_dict={X:x_train, Y:y_train}))
############
# 모델평가
print("모델평가")
print(sess.run(acc, feed_dict={X:x_test, Y:y_test}))
'Develop > AI' 카테고리의 다른 글
Scikit-learn LinearRegression (0) | 2019.07.15 |
---|---|
placeholder / feed_dick (0) | 2019.07.14 |