[인공지능] TensorFlow 간단한 신경망 구조

참조


용어 정리

  • 배열에서는 rank와 shape이라는 2가지 용어가 있습니다.
  • rank란 특정 배열의 차원을 이야기 하고, shape이란 특졍 배열이 어떻게 생겼는지, 즉 몇개의 요소가 있는지를 이야기합니다.
t = np.array([0., 1., 2., 3., 4., 5., 6.])
  • 즉 위 예제코드에서 t 배열의 rank는 1이고, shape은 7입니다.

뉴런

  • 입력 -> 연산 -> 활성화함수 -> 출력
import timeit
import numpy as np
import tensorflow as tf

def sigmoid(x):
    return (1 / (1 + np.exp(-x)))

def Neuron(x, W, bias = 0):
    z = x * W + bias
    return sigmoid(z)

x = tf.random.normal((1,2), 0, 1)
W = tf.random.normal((1,2), 0, 1)

print('x.shape :' , x.shape)
print('W.shape:', W.shape)

print(x)
print(W)

print(Neuron(x,W))
x.shape : (1, 2)
W.shape: (1, 2)
tf.Tensor([[ 0.5970093 -0.2332151]], shape=(1, 2), dtype=float32)
tf.Tensor([[-0.9255325 -0.0097003]], shape=(1, 2), dtype=float32)
[[0.36527264 0.5005656 ]]

퍼셉트론 학습 알고리즘(가중치 업데이트)

  • w(nextstep)=w+η (y−y~) x
  • w : 가중치
  • η : 학습률
  • y : 정답 레이블
  • y~: 예측 레이블
import timeit
import numpy as np
import tensorflow as tf

def sigmoid(x):
    return (1 / (1 + np.exp(-x)))

def Neuron(x, W, bias = 0):
    z = x * W + bias
    return sigmoid(z)

def Neuron2(x, W, bias = 0):
    z = tf.matmul(x, W, transpose_b=True) + bias
    return sigmoid(z)

x = 1
y = 0
W = tf.random.normal([1], 0, 1)

print(Neuron(x, W))
print('y:', y)

for index in range(1000):
    output = Neuron(x, W)
    error = y - output
    W = W + x * 0.1 * error

    if index % 100 == 99:
        print("{}\t{}\t{}".format(index + 1, error, output))

x2 = tf.random.normal((1,3), 0, 1)
y2 = tf.ones(1)
W2 = tf.random.normal((1,3), 0, 1)

print(Neuron2(x2, W2))
print('y2:', y2)

for i in  range(1000):
    output = Neuron2(x2, W2)
    error = y2 - output
    W2 = W2 + x2 * 0.1 * error

    if i % 100 == 99:
        print("{}\t{}\t{}".format(index + 1, error, output))
[0.30850583]
y: 0
100     [-0.08585579]   [0.08585579]
200     [-0.04751848]   [0.04751848]
300     [-0.03260403]   [0.03260403]
400     [-0.02475172]   [0.02475172]
500     [-0.0199242]    [0.0199242]
600     [-0.01666195]   [0.01666195]
700     [-0.01431227]   [0.01431227]
800     [-0.01254034]   [0.01254034]
900     [-0.01115692]   [0.01115692]
1000    [-0.01004725]   [0.01004725]
[[0.320976]]
y2: tf.Tensor([1.], shape=(1,), dtype=float32)
1000    [[0.03050131]]  [[0.9694987]]
1000    [[0.01481593]]  [[0.9851841]]
1000    [[0.00975931]]  [[0.9902407]]
1000    [[0.00727063]]  [[0.99272937]]
1000    [[0.00579131]]  [[0.9942087]]
1000    [[0.00481141]]  [[0.9951886]]
1000    [[0.00411481]]  [[0.9958852]]
1000    [[0.0035941]]   [[0.9964059]]
1000    [[0.00319022]]  [[0.9968098]]
1000    [[0.00286794]]  [[0.99713206]]

AND Gate

import timeit
import numpy as np
import tensorflow as tf

def sigmoid(x):
    return (1 / (1 + np.exp(-x)))

X = np.array([[1,1], [1,0], [0,1], [0,0]])
Y = np.array([[1], [0], [0], [0]])

W = tf.random.normal([2], 0, 1)
b = tf.random.normal([1], 0, 1)
b_x = 1

for i in range(2000):
    error_sum = 0

    for j in range(4):
        output = sigmoid(np.sum(X[j] * W) + b_x + b)
        error = Y[j][0] - output
        W = W + X[j] * 0.1 * error
        b = b + b_x * 0.1 * error
        error_sum += error

    if i % 200 == 0:
        print("Epoch {:4d}\tError Sum{}".format(i, error_sum))

print("\n가중치\t: {}".format(W))
print("편향\t: {}".format(b))

for i in range(4):
    print("X: {} Y: {} Output: {}".format(X[i], Y[i], sigmoid(np.sum(X[i] * W) + b)))
Epoch    0      Error Sum[-0.5211484]
Epoch  200      Error Sum[-0.10944445]
Epoch  400      Error Sum[-0.06540047]
Epoch  600      Error Sum[-0.04651934]
Epoch  800      Error Sum[-0.03599498]
Epoch 1000      Error Sum[-0.02929975]
Epoch 1200      Error Sum[-0.02467646]
Epoch 1400      Error Sum[-0.02129728]
Epoch 1600      Error Sum[-0.01872229]
Epoch 1800      Error Sum[-0.01669659]

가중치  : [6.974747  6.9778795]
편향    : [-11.641185]
X: [1 1] Y: [1] Output: [0.9098202]
X: [1 0] Y: [0] Output: [0.00931807]
X: [0 1] Y: [0] Output: [0.00934703]
X: [0 0] Y: [0] Output: [8.796174e-06]

OR Gate

import timeit
import numpy as np
import tensorflow as tf

def sigmoid(x):
    return (1 / (1 + np.exp(-x)))

X = np.array([[1,1], [1,0], [0,1], [0,0]])
Y = np.array([[1], [1], [1], [0]])

W = tf.random.normal([2], 0, 1)
b = tf.random.normal([1], 0, 1)
b_x = 1

for i in range(2000):
    error_sum = 0

    for j in range(4):
        output = sigmoid(np.sum(X[j] * W) + b_x + b)
        error = Y[j][0] - output
        W = W + X[j] * 0.1 * error
        b = b + b_x * 0.1 * error
        error_sum += error

    if i % 200 == 0:
        print("Epoch {:4d}\tError Sum{}".format(i, error_sum))

print("\n가중치\t: {}".format(W))
print("편향\t: {}".format(b))

for i in range(4):
    print("X: {} Y: {} Output: {}".format(X[i], Y[i], sigmoid(np.sum(X[i] * W) + b)))
Epoch    0      Error Sum[1.3766354]
Epoch  200      Error Sum[-0.04944375]
Epoch  400      Error Sum[-0.02580962]
Epoch  600      Error Sum[-0.01736318]
Epoch  800      Error Sum[-0.01303778]
Epoch 1000      Error Sum[-0.01041865]
Epoch 1200      Error Sum[-0.00866691]
Epoch 1400      Error Sum[-0.00741445]
Epoch 1600      Error Sum[-0.00647528]
Epoch 1800      Error Sum[-0.00574668]

가중치  : [8.210555  8.2069435]
편향    : [-4.6388216]
X: [1 1] Y: [1] Output: [0.9999924]
X: [1 0] Y: [1] Output: [0.9726614]
X: [0 1] Y: [1] Output: [0.9725652]
X: [0 0] Y: [0] Output: [0.00957649]

XOR Gate

from pickletools import optimize
from tabnanny import verbose
from matplotlib import units
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense

np.random.seed(111)

def sigmoid(x):
    return (1 / (1 + np.exp(-x)))

X = np.array([[1,1], [1,0], [0,1], [0,0]])
Y = np.array([[0], [1], [1], [0]])

model = Sequential([Dense(units=2, activation='sigmoid', input_shape=(2,)),
                    Dense(units=1, activation='sigmoid')])

model.compile(optimizer = tf.keras.optimizers.SGD(lr=0.1), loss='mse')

model.summary()

histopry = model.fit(X, Y, epochs = 2000, batch_size=1, verbose=1)

print(model.predict(X))
[[0.15112422]
 [0.8442709 ]
 [0.801257  ]
 [0.17558895]]

728x90

이 글을 공유하기

댓글

Designed by JB FACTORY